From a4e18de767e38c9244387c201ef4d28129047997 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:07:23 -0500 Subject: [PATCH 0001/1789] New translations global.json (Spanish) --- website/src/pages/es/global.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/es/global.json b/website/src/pages/es/global.json index b9c8db5fa5fa..d6b685d94ea3 100644 --- a/website/src/pages/es/global.json +++ b/website/src/pages/es/global.json @@ -1,13 +1,13 @@ { "navigation": { - "title": "Main navigation", + "title": "Navegación principal", "show": "Show navigation", "hide": "Hide navigation", "subgraphs": "Subgrafos", "substreams": "Corrientes secundarias", "sps": "Substreams-Powered Subgraphs", "indexing": "Indexing", - "resources": "Resources", + "resources": "Recursos", "archived": "Archived" }, "page": { From 6f7a23865de6c994ccf54c97f131485c70dc2355 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:07:29 -0500 Subject: [PATCH 0002/1789] New translations global.json (Russian) --- website/src/pages/ru/global.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/ru/global.json b/website/src/pages/ru/global.json index 0b02b6ff1575..d1b17d0a8349 100644 --- a/website/src/pages/ru/global.json +++ b/website/src/pages/ru/global.json @@ -7,7 +7,7 @@ "substreams": "Substreams", "sps": "Substreams-Powered Subgraphs", "indexing": "Indexing", - "resources": "Resources", + "resources": "Ресурсы", "archived": "Archived" }, "page": { From 8ee17724145fd6286cf4762288fb9f82ee8ed89f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:07:34 -0500 Subject: [PATCH 0003/1789] New translations global.json (Swahili) --- website/src/pages/sw/global.json | 35 ++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100644 website/src/pages/sw/global.json diff --git a/website/src/pages/sw/global.json b/website/src/pages/sw/global.json new file mode 100644 index 000000000000..f0bd80d9715b --- /dev/null +++ b/website/src/pages/sw/global.json @@ -0,0 +1,35 @@ +{ + "navigation": { + "title": "Main navigation", + "show": "Show navigation", + "hide": "Hide navigation", + "subgraphs": "Subgraphs", + "substreams": "Substreams", + "sps": "Substreams-Powered Subgraphs", + "indexing": "Indexing", + "resources": "Resources", + "archived": "Archived" + }, + "page": { + "lastUpdated": "Last updated", + "readingTime": { + "title": "Reading time", + "minutes": "minutes" + }, + "previous": "Previous page", + "next": "Next page", + "edit": "Edit on GitHub", + "onThisPage": "On this page", + "tableOfContents": "Table of contents", + "linkToThisSection": "Link to this section" + }, + "content": { + "note": "Note", + "video": "Video" + }, + "notFound": { + "title": "Oops! This page was lost in space...", + "subtitle": "Check if you’re using the right address or explore our website by clicking on the link below.", + "back": "Go Home" + } +} From 4d8be02af1886b8f5680e848978d05f7256782d8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:07:36 -0500 Subject: [PATCH 0004/1789] New translations index.json (Spanish) --- website/src/pages/es/index.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/es/index.json b/website/src/pages/es/index.json index c980229ff3d5..2b5f75c9858b 100644 --- a/website/src/pages/es/index.json +++ b/website/src/pages/es/index.json @@ -1,10 +1,10 @@ { - "title": "Home", + "title": "Inicio", "hero": { "title": "The Graph Docs", "description": "Kick-start your web3 project with the tools to extract, transform, and load blockchain data.", "cta1": "How The Graph works", - "cta2": "Build your first subgraph" + "cta2": "Crea tu primer subgrafo" }, "products": { "title": "The Graph’s Products", @@ -83,7 +83,7 @@ "description": "What is The Graph? How does it work? Why does it matter so much to web3 developers? Learn how and why The Graph is the backbone of web3 in this short, non-technical video." }, "whatIsDelegating": { - "title": "What is Delegating?", + "title": "¿Qué es la delegación?", "description": "Delegators are key participants who help secure The Graph by staking their GRT tokens to Indexers. This video explains key concepts to understand before delegating." }, "howToIndexSolana": { From b4b5215a4995a7149008e8abdbb60f2850be4cf4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:07:42 -0500 Subject: [PATCH 0005/1789] New translations index.json (Russian) --- website/src/pages/ru/index.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/ru/index.json b/website/src/pages/ru/index.json index 11e1eef7f22c..f159be696235 100644 --- a/website/src/pages/ru/index.json +++ b/website/src/pages/ru/index.json @@ -4,7 +4,7 @@ "title": "The Graph Docs", "description": "Kick-start your web3 project with the tools to extract, transform, and load blockchain data.", "cta1": "How The Graph works", - "cta2": "Build your first subgraph" + "cta2": "Создайте свой первый субграф" }, "products": { "title": "The Graph’s Products", @@ -83,7 +83,7 @@ "description": "What is The Graph? How does it work? Why does it matter so much to web3 developers? Learn how and why The Graph is the backbone of web3 in this short, non-technical video." }, "whatIsDelegating": { - "title": "What is Delegating?", + "title": "Что такое Делегирование?", "description": "Delegators are key participants who help secure The Graph by staking their GRT tokens to Indexers. This video explains key concepts to understand before delegating." }, "howToIndexSolana": { From 3a5567945f6d6a897bf07e2b63424e52c9609407 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:07:47 -0500 Subject: [PATCH 0006/1789] New translations index.json (Swahili) --- website/src/pages/sw/index.json | 99 +++++++++++++++++++++++++++++++++ 1 file changed, 99 insertions(+) create mode 100644 website/src/pages/sw/index.json diff --git a/website/src/pages/sw/index.json b/website/src/pages/sw/index.json new file mode 100644 index 000000000000..787097b1fbc4 --- /dev/null +++ b/website/src/pages/sw/index.json @@ -0,0 +1,99 @@ +{ + "title": "Home", + "hero": { + "title": "The Graph Docs", + "description": "Kick-start your web3 project with the tools to extract, transform, and load blockchain data.", + "cta1": "How The Graph works", + "cta2": "Build your first subgraph" + }, + "products": { + "title": "The Graph’s Products", + "description": "Choose a solution that fits your needs—interact with blockchain data your way.", + "subgraphs": { + "title": "Subgraphs", + "description": "Extract, process, and query blockchain data with open APIs.", + "cta": "Develop a subgraph" + }, + "substreams": { + "title": "Substreams", + "description": "Fetch and consume blockchain data with parallel execution.", + "cta": "Develop with Substreams" + }, + "sps": { + "title": "Substreams-Powered Subgraphs", + "description": "Boost your subgraph’s efficiency and scalability by using Substreams.", + "cta": "Set up a Substreams-powered subgraph" + }, + "graphNode": { + "title": "Graph Node", + "description": "Index blockchain data and serve it via GraphQL queries.", + "cta": "Set up a local Graph Node" + }, + "firehose": { + "title": "Firehose", + "description": "Extract blockchain data into flat files to enhance sync times and streaming capabilities.", + "cta": "Get started with Firehose" + } + }, + "supportedNetworks": { + "title": "Supported Networks", + "description": { + "base": "The Graph supports {0}. To add a new network, {1}", + "networks": "networks", + "completeThisForm": "complete this form" + } + }, + "guides": { + "title": "Guides", + "description": "", + "explorer": { + "title": "Find Data in Graph Explorer", + "description": "Leverage hundreds of public subgraphs for existing blockchain data." + }, + "publishASubgraph": { + "title": "Publish a Subgraph", + "description": "Add your subgraph to the decentralized network." + }, + "publishSubstreams": { + "title": "Publish Substreams", + "description": "Launch your Substreams package to the Substreams Registry." + }, + "queryingBestPractices": { + "title": "Querying Best Practices", + "description": "Optimize your subgraph queries for faster, better results." + }, + "timeseries": { + "title": "Optimized Timeseries & Aggregations", + "description": "Streamline your subgraph for efficiency." + }, + "apiKeyManagement": { + "title": "API Key Management", + "description": "Easily create, manage, and secure API keys for your subgraphs." + }, + "transferToTheGraph": { + "title": "Transfer to The Graph", + "description": "Seamlessly upgrade your subgraph from any platform." + } + }, + "videos": { + "title": "Video Tutorials", + "watchOnYouTube": "Watch on YouTube", + "theGraphExplained": { + "title": "The Graph Explained In 1 Minute", + "description": "What is The Graph? How does it work? Why does it matter so much to web3 developers? Learn how and why The Graph is the backbone of web3 in this short, non-technical video." + }, + "whatIsDelegating": { + "title": "What is Delegating?", + "description": "Delegators are key participants who help secure The Graph by staking their GRT tokens to Indexers. This video explains key concepts to understand before delegating." + }, + "howToIndexSolana": { + "title": "How to Index Solana with a Substreams-powered Subgraph", + "description": "If you’re familiar with subgraphs, discover how Substreams offer a different approach for key use cases. This video walks you through the process of building your first Substreams-powered subgraph." + } + }, + "time": { + "reading": "Reading time", + "duration": "Duration", + "minutes": "min" + } +} From 064f9269290afc4148eb40748514d6d0b8ccc4a7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:07:59 -0500 Subject: [PATCH 0007/1789] New translations docsearch.json (Swahili) --- website/src/pages/sw/docsearch.json | 42 +++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 website/src/pages/sw/docsearch.json diff --git a/website/src/pages/sw/docsearch.json b/website/src/pages/sw/docsearch.json new file mode 100644 index 000000000000..8cfff967936d --- /dev/null +++ b/website/src/pages/sw/docsearch.json @@ -0,0 +1,42 @@ +{ + "button": { + "buttonText": "Search", + "buttonAriaLabel": "Search" + }, + "modal": { + "searchBox": { + "resetButtonTitle": "Clear the query", + "resetButtonAriaLabel": "Clear the query", + "cancelButtonText": "Cancel", + "cancelButtonAriaLabel": "Cancel" + }, + "startScreen": { + "recentSearchesTitle": "Recent", + "noRecentSearchesText": "No recent searches", + "saveRecentSearchButtonTitle": "Save this search", + "removeRecentSearchButtonTitle": "Remove this search from history", + "favoriteSearchesTitle": "Favorite", + "removeFavoriteSearchButtonTitle": "Remove this search from favorites" + }, + "errorScreen": { + "titleText": "Unable to fetch results", + "helpText": "You might want to check your network connection." + }, + "footer": { + "selectText": "to select", + "selectKeyAriaLabel": "Enter key", + "navigateText": "to navigate", + "navigateUpKeyAriaLabel": "Arrow up", + "navigateDownKeyAriaLabel": "Arrow down", + "closeText": "to close", + "closeKeyAriaLabel": "Escape key", + "searchByText": "Search by" + }, + "noResultsScreen": { + "noResultsText": "No results for", + "suggestedQueryText": "Try searching for", + "reportMissingResultsText": "Believe this query should return results?", + "reportMissingResultsLinkText": "Let us know." + } + } +} From 7880e8584cbe123df9fdbdc5d338c18526bbc420 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:08:00 -0500 Subject: [PATCH 0008/1789] New translations about.mdx (Romanian) --- website/src/pages/ro/about.mdx | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/website/src/pages/ro/about.mdx b/website/src/pages/ro/about.mdx index 25c810045af0..0c18db196d1b 100644 --- a/website/src/pages/ro/about.mdx +++ b/website/src/pages/ro/about.mdx @@ -30,25 +30,25 @@ Blockchain properties, such as finality, chain reorganizations, and uncled block ## The Graph Provides a Solution -The Graph solves this challenge with a decentralized protocol that indexes and enables the efficient and high-performance querying of blockchain data. These APIs (indexed "subgraphs") can then be queried with a standard GraphQL API. +The Graph solves this challenge with a decentralized protocol that indexes and enables the efficient and high-performance querying of blockchain data. These APIs (indexed "Subgraphs") can then be queried with a standard GraphQL API. Today, there is a decentralized protocol that is backed by the open source implementation of [Graph Node](https://github.com/graphprotocol/graph-node) that enables this process. ### How The Graph Functions -Indexing blockchain data is very difficult, but The Graph makes it easy. The Graph learns how to index Ethereum data by using subgraphs. Subgraphs are custom APIs built on blockchain data that extract data from a blockchain, processes it, and stores it so that it can be seamlessly queried via GraphQL. +Indexing blockchain data is very difficult, but The Graph makes it easy. The Graph learns how to index Ethereum data by using Subgraphs. Subgraphs are custom APIs built on blockchain data that extract data from a blockchain, processes it, and stores it so that it can be seamlessly queried via GraphQL. #### Specifics -- The Graph uses subgraph descriptions, which are known as the subgraph manifest inside the subgraph. +- The Graph uses Subgraph descriptions, which are known as the Subgraph manifest inside the Subgraph. -- The subgraph description outlines the smart contracts of interest for a subgraph, the events within those contracts to focus on, and how to map event data to the data that The Graph will store in its database. +- The Subgraph description outlines the smart contracts of interest for a Subgraph, the events within those contracts to focus on, and how to map event data to the data that The Graph will store in its database. -- When creating a subgraph, you need to write a subgraph manifest. +- When creating a Subgraph, you need to write a Subgraph manifest. -- After writing the `subgraph manifest`, you can use the Graph CLI to store the definition in IPFS and instruct an Indexer to start indexing data for that subgraph. +- After writing the `subgraph manifest`, you can use the Graph CLI to store the definition in IPFS and instruct an Indexer to start indexing data for that Subgraph. -The diagram below provides more detailed information about the flow of data after a subgraph manifest has been deployed with Ethereum transactions. +The diagram below provides more detailed information about the flow of data after a Subgraph manifest has been deployed with Ethereum transactions. ![A graphic explaining how The Graph uses Graph Node to serve queries to data consumers](/img/graph-dataflow.png) @@ -56,12 +56,12 @@ The flow follows these steps: 1. A dapp adds data to Ethereum through a transaction on a smart contract. 2. The smart contract emits one or more events while processing the transaction. -3. Graph Node continually scans Ethereum for new blocks and the data for your subgraph they may contain. -4. Graph Node finds Ethereum events for your subgraph in these blocks and runs the mapping handlers you provided. The mapping is a WASM module that creates or updates the data entities that Graph Node stores in response to Ethereum events. +3. Graph Node continually scans Ethereum for new blocks and the data for your Subgraph they may contain. +4. Graph Node finds Ethereum events for your Subgraph in these blocks and runs the mapping handlers you provided. The mapping is a WASM module that creates or updates the data entities that Graph Node stores in response to Ethereum events. 5. The dapp queries the Graph Node for data indexed from the blockchain, using the node's [GraphQL endpoint](https://graphql.org/learn/). The Graph Node in turn translates the GraphQL queries into queries for its underlying data store in order to fetch this data, making use of the store's indexing capabilities. The dapp displays this data in a rich UI for end-users, which they use to issue new transactions on Ethereum. The cycle repeats. ## Next Steps -The following sections provide a more in-depth look at subgraphs, their deployment and data querying. +The following sections provide a more in-depth look at Subgraphs, their deployment and data querying. -Before you write your own subgraph, it's recommended to explore [Graph Explorer](https://thegraph.com/explorer) and review some of the already deployed subgraphs. Each subgraph's page includes a GraphQL playground, allowing you to query its data. +Before you write your own Subgraph, it's recommended to explore [Graph Explorer](https://thegraph.com/explorer) and review some of the already deployed Subgraphs. Each Subgraph's page includes a GraphQL playground, allowing you to query its data. From a626750a054b33d7fd427b9d8e35fd685f9b1bb5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:08:03 -0500 Subject: [PATCH 0009/1789] New translations about.mdx (French) --- website/src/pages/fr/about.mdx | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/website/src/pages/fr/about.mdx b/website/src/pages/fr/about.mdx index 0740a57e71c5..1cce1a4218ea 100644 --- a/website/src/pages/fr/about.mdx +++ b/website/src/pages/fr/about.mdx @@ -30,25 +30,25 @@ Les spécificités de la blockchain, comme la finalité des transactions, les r ## The Graph apporte une solution -The Graph répond à ce défi grâce à un protocole décentralisé qui indexe les données de la blockchain et permet de les interroger de manière efficace et performantes. Ces API (appelées "subgraphs" indexés) peuvent ensuite être interrogées via une API standard GraphQL. +The Graph solves this challenge with a decentralized protocol that indexes and enables the efficient and high-performance querying of blockchain data. These APIs (indexed "Subgraphs") can then be queried with a standard GraphQL API. Aujourd'hui, il existe un protocole décentralisé soutenu par l'implémentation open source de [Graph Node](https://github.com/graphprotocol/graph-node) qui permet ce processus. ### Comment fonctionne The Graph⁠ -Indexer les données de la blockchain est une tâche complexe, mais The Graph la simplifie. Il apprend à indexer les données d'Ethereum en utilisant des subgraphs. Les subgraphs sont des API personnalisées construites sur les données de la blockchain qui extraient, traitent et stockent ces données pour qu'elles puissent être interrogées facilement via GraphQL. +Indexing blockchain data is very difficult, but The Graph makes it easy. The Graph learns how to index Ethereum data by using Subgraphs. Subgraphs are custom APIs built on blockchain data that extract data from a blockchain, processes it, and stores it so that it can be seamlessly queried via GraphQL. #### Spécificités⁠ -- The Graph utilise des descriptions de subgraph, qui sont connues sous le nom de "manifeste de subgraph" à l'intérieur du subgraph. +- The Graph uses Subgraph descriptions, which are known as the Subgraph manifest inside the Subgraph. -- Ce manifeste définit les contrats intelligents intéressants pour un subgraph, les événements spécifiques à surveiller au sein de ces contrats, et la manière de mapper les données de ces événements aux données que The Graph stockera dans sa base de données. +- The Subgraph description outlines the smart contracts of interest for a Subgraph, the events within those contracts to focus on, and how to map event data to the data that The Graph will store in its database. -- Lors de la création d'un subgraph, vous devez rédiger ce manifeste. +- When creating a Subgraph, you need to write a Subgraph manifest. -- Une fois le `manifeste du subgraph` écrit, vous pouvez utiliser l'outil en ligne de commande Graph CLI pour stocker la définition en IPFS et demander à un Indexeur de commencer à indexer les données pour ce subgraph. +- After writing the `subgraph manifest`, you can use the Graph CLI to store the definition in IPFS and instruct an Indexer to start indexing data for that Subgraph. -Le schéma ci-dessous illustre plus en détail le flux de données après le déploiement d'un manifeste de subgraph avec des transactions Ethereum. +The diagram below provides more detailed information about the flow of data after a Subgraph manifest has been deployed with Ethereum transactions. ![Un graphique expliquant comment The Graph utilise Graph Node pour répondre aux requêtes des consommateurs de données](/img/graph-dataflow.png) @@ -56,12 +56,12 @@ La description des étapes du flux : 1. Une dapp ajoute des données à Ethereum via une transaction sur un contrat intelligent. 2. Le contrat intelligent va alors produire un ou plusieurs événements lors du traitement de la transaction. -3. Parallèlement, Le nœud de The Graph scanne continuellement Ethereum à la recherche de nouveaux blocs et de nouvelles données intéressantes pour votre subgraph. -4. The Graph Node trouve alors les événements Ethereum d'intérêt pour votre subgraph dans ces blocs et vient exécuter les corrélations correspondantes que vous avez fournies. Le gestionnaire de corrélation se définit comme un module WASM qui crée ou met à jour les entités de données que le nœud de The Graph stocke en réponse aux événements Ethereum. +3. Graph Node continually scans Ethereum for new blocks and the data for your Subgraph they may contain. +4. Graph Node finds Ethereum events for your Subgraph in these blocks and runs the mapping handlers you provided. The mapping is a WASM module that creates or updates the data entities that Graph Node stores in response to Ethereum events. 5. Le dapp interroge le Graph Node pour des données indexées à partir de la blockchain, à l'aide du [point de terminaison GraphQL](https://graphql.org/learn/) du noeud. À son tour, le Graph Node traduit les requêtes GraphQL en requêtes pour sa base de données sous-jacente afin de récupérer ces données, en exploitant les capacités d'indexation du magasin. Le dapp affiche ces données dans une interface utilisateur riche pour les utilisateurs finaux, qui s'en servent pour émettre de nouvelles transactions sur Ethereum. Le cycle se répète. ## Les Étapes suivantes -Les sections suivantes proposent une exploration plus approfondie des subgraphs, de leur déploiement et de la manière d'interroger les données. +The following sections provide a more in-depth look at Subgraphs, their deployment and data querying. -Avant de créer votre propre subgraph, il est conseillé de visiter Graph Explorer et d'examiner certains des subgraphs déjà déployés. Chaque page de subgraph comprend un playground (un espace de test) GraphQL, vous permettant d'interroger ses données. +Before you write your own Subgraph, it's recommended to explore [Graph Explorer](https://thegraph.com/explorer) and review some of the already deployed Subgraphs. Each Subgraph's page includes a GraphQL playground, allowing you to query its data. From 37e987b82156a5e00b0aacd0fb752cdac19f313a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:08:05 -0500 Subject: [PATCH 0010/1789] New translations about.mdx (Spanish) --- website/src/pages/es/about.mdx | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/website/src/pages/es/about.mdx b/website/src/pages/es/about.mdx index 22dafa9785ad..ffa133b4e0b7 100644 --- a/website/src/pages/es/about.mdx +++ b/website/src/pages/es/about.mdx @@ -30,25 +30,25 @@ Blockchain properties, such as finality, chain reorganizations, and uncled block ## The Graph Provides a Solution -The Graph solves this challenge with a decentralized protocol that indexes and enables the efficient and high-performance querying of blockchain data. These APIs (indexed "subgraphs") can then be queried with a standard GraphQL API. +The Graph solves this challenge with a decentralized protocol that indexes and enables the efficient and high-performance querying of blockchain data. These APIs (indexed "Subgraphs") can then be queried with a standard GraphQL API. Today, there is a decentralized protocol that is backed by the open source implementation of [Graph Node](https://github.com/graphprotocol/graph-node) that enables this process. ### How The Graph Functions -Indexing blockchain data is very difficult, but The Graph makes it easy. The Graph learns how to index Ethereum data by using subgraphs. Subgraphs are custom APIs built on blockchain data that extract data from a blockchain, processes it, and stores it so that it can be seamlessly queried via GraphQL. +Indexing blockchain data is very difficult, but The Graph makes it easy. The Graph learns how to index Ethereum data by using Subgraphs. Subgraphs are custom APIs built on blockchain data that extract data from a blockchain, processes it, and stores it so that it can be seamlessly queried via GraphQL. #### Specifics -- The Graph uses subgraph descriptions, which are known as the subgraph manifest inside the subgraph. +- The Graph uses Subgraph descriptions, which are known as the Subgraph manifest inside the Subgraph. -- The subgraph description outlines the smart contracts of interest for a subgraph, the events within those contracts to focus on, and how to map event data to the data that The Graph will store in its database. +- The Subgraph description outlines the smart contracts of interest for a Subgraph, the events within those contracts to focus on, and how to map event data to the data that The Graph will store in its database. -- When creating a subgraph, you need to write a subgraph manifest. +- When creating a Subgraph, you need to write a Subgraph manifest. -- After writing the `subgraph manifest`, you can use the Graph CLI to store the definition in IPFS and instruct an Indexer to start indexing data for that subgraph. +- After writing the `subgraph manifest`, you can use the Graph CLI to store the definition in IPFS and instruct an Indexer to start indexing data for that Subgraph. -The diagram below provides more detailed information about the flow of data after a subgraph manifest has been deployed with Ethereum transactions. +The diagram below provides more detailed information about the flow of data after a Subgraph manifest has been deployed with Ethereum transactions. ![Un gráfico explicando como The Graph usa Graph Node para servir consultas a los consumidores de datos](/img/graph-dataflow.png) @@ -56,12 +56,12 @@ El flujo sigue estos pasos: 1. Una aplicación descentralizada (dapp) añade datos a Ethereum a través de una transacción en un contrato inteligente. 2. El contrato inteligente emite uno o más eventos mientras procesa la transacción. -3. Graph Node escanea continuamente la red de Ethereum en busca de nuevos bloques y los datos de tu subgrafo que puedan contener. -4. Graph Node encuentra los eventos de la red Ethereum, a fin de proveerlos en tu subgrafo mediante estos bloques y ejecuta los mapping handlers que proporcionaste. El mapeo (mapping) es un módulo WASM que crea o actualiza las entidades de datos que Graph Node almacena en respuesta a los eventos de Ethereum. +3. Graph Node continually scans Ethereum for new blocks and the data for your Subgraph they may contain. +4. Graph Node finds Ethereum events for your Subgraph in these blocks and runs the mapping handlers you provided. The mapping is a WASM module that creates or updates the data entities that Graph Node stores in response to Ethereum events. 5. La dapp consulta a través de Graph Node los datos indexados de la blockchain, utilizando el [GraphQL endpoint](https://graphql.org/learn/) del nodo. El Nodo de The Graph, a su vez, traduce las consultas GraphQL en consultas para su almacenamiento de datos subyacentes con el fin de obtener estos datos, haciendo uso de las capacidades de indexación que ofrece el almacenamiento. La dapp muestra estos datos en una interfaz muy completa para el usuario, a fin de que los end users que usan este subgrafo puedan emitir nuevas transacciones en Ethereum. El ciclo se repite. ## Próximos puntos -The following sections provide a more in-depth look at subgraphs, their deployment and data querying. +The following sections provide a more in-depth look at Subgraphs, their deployment and data querying. -Before you write your own subgraph, it's recommended to explore [Graph Explorer](https://thegraph.com/explorer) and review some of the already deployed subgraphs. Each subgraph's page includes a GraphQL playground, allowing you to query its data. +Before you write your own Subgraph, it's recommended to explore [Graph Explorer](https://thegraph.com/explorer) and review some of the already deployed Subgraphs. Each Subgraph's page includes a GraphQL playground, allowing you to query its data. From 581e4cda0319e489bf63f6bbaaa0c05dd1e3bdc4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:08:06 -0500 Subject: [PATCH 0011/1789] New translations about.mdx (Arabic) --- website/src/pages/ar/about.mdx | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/website/src/pages/ar/about.mdx b/website/src/pages/ar/about.mdx index 8005f34aef5f..93dbeb51f658 100644 --- a/website/src/pages/ar/about.mdx +++ b/website/src/pages/ar/about.mdx @@ -30,25 +30,25 @@ Blockchain properties, such as finality, chain reorganizations, and uncled block ## The Graph Provides a Solution -The Graph solves this challenge with a decentralized protocol that indexes and enables the efficient and high-performance querying of blockchain data. These APIs (indexed "subgraphs") can then be queried with a standard GraphQL API. +The Graph solves this challenge with a decentralized protocol that indexes and enables the efficient and high-performance querying of blockchain data. These APIs (indexed "Subgraphs") can then be queried with a standard GraphQL API. Today, there is a decentralized protocol that is backed by the open source implementation of [Graph Node](https://github.com/graphprotocol/graph-node) that enables this process. ### How The Graph Functions -Indexing blockchain data is very difficult, but The Graph makes it easy. The Graph learns how to index Ethereum data by using subgraphs. Subgraphs are custom APIs built on blockchain data that extract data from a blockchain, processes it, and stores it so that it can be seamlessly queried via GraphQL. +Indexing blockchain data is very difficult, but The Graph makes it easy. The Graph learns how to index Ethereum data by using Subgraphs. Subgraphs are custom APIs built on blockchain data that extract data from a blockchain, processes it, and stores it so that it can be seamlessly queried via GraphQL. #### Specifics -- The Graph uses subgraph descriptions, which are known as the subgraph manifest inside the subgraph. +- The Graph uses Subgraph descriptions, which are known as the Subgraph manifest inside the Subgraph. -- The subgraph description outlines the smart contracts of interest for a subgraph, the events within those contracts to focus on, and how to map event data to the data that The Graph will store in its database. +- The Subgraph description outlines the smart contracts of interest for a Subgraph, the events within those contracts to focus on, and how to map event data to the data that The Graph will store in its database. -- When creating a subgraph, you need to write a subgraph manifest. +- When creating a Subgraph, you need to write a Subgraph manifest. -- After writing the `subgraph manifest`, you can use the Graph CLI to store the definition in IPFS and instruct an Indexer to start indexing data for that subgraph. +- After writing the `subgraph manifest`, you can use the Graph CLI to store the definition in IPFS and instruct an Indexer to start indexing data for that Subgraph. -The diagram below provides more detailed information about the flow of data after a subgraph manifest has been deployed with Ethereum transactions. +The diagram below provides more detailed information about the flow of data after a Subgraph manifest has been deployed with Ethereum transactions. ![A graphic explaining how The Graph uses Graph Node to serve queries to data consumers](/img/graph-dataflow.png) @@ -56,12 +56,12 @@ The diagram below provides more detailed information about the flow of data afte 1. A dapp adds data to Ethereum through a transaction on a smart contract. 2. العقد الذكي يصدر حدثا واحدا أو أكثر أثناء معالجة الإجراء. -3. يقوم الـ Graph Node بمسح الـ Ethereum باستمرار بحثا عن الكتل الجديدة وبيانات الـ subgraph الخاص بك. -4. يعثر الـ Graph Node على أحداث الـ Ethereum لـ subgraph الخاص بك في هذه الكتل ويقوم بتشغيل mapping handlers التي قدمتها. الـ mapping عبارة عن وحدة WASM والتي تقوم بإنشاء أو تحديث البيانات التي يخزنها Graph Node استجابة لأحداث الـ Ethereum. +3. Graph Node continually scans Ethereum for new blocks and the data for your Subgraph they may contain. +4. Graph Node finds Ethereum events for your Subgraph in these blocks and runs the mapping handlers you provided. The mapping is a WASM module that creates or updates the data entities that Graph Node stores in response to Ethereum events. 5. The dapp queries the Graph Node for data indexed from the blockchain, using the node's [GraphQL endpoint](https://graphql.org/learn/). The Graph Node in turn translates the GraphQL queries into queries for its underlying data store in order to fetch this data, making use of the store's indexing capabilities. The dapp displays this data in a rich UI for end-users, which they use to issue new transactions on Ethereum. The cycle repeats. ## الخطوات التالية -The following sections provide a more in-depth look at subgraphs, their deployment and data querying. +The following sections provide a more in-depth look at Subgraphs, their deployment and data querying. -Before you write your own subgraph, it's recommended to explore [Graph Explorer](https://thegraph.com/explorer) and review some of the already deployed subgraphs. Each subgraph's page includes a GraphQL playground, allowing you to query its data. +Before you write your own Subgraph, it's recommended to explore [Graph Explorer](https://thegraph.com/explorer) and review some of the already deployed Subgraphs. Each Subgraph's page includes a GraphQL playground, allowing you to query its data. From 39ca4247729cb723fc89400657093d6ae5c45693 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:08:07 -0500 Subject: [PATCH 0012/1789] New translations about.mdx (Czech) --- website/src/pages/cs/about.mdx | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/website/src/pages/cs/about.mdx b/website/src/pages/cs/about.mdx index 256519660a73..1f43c663437f 100644 --- a/website/src/pages/cs/about.mdx +++ b/website/src/pages/cs/about.mdx @@ -30,25 +30,25 @@ Blockchain properties, such as finality, chain reorganizations, and uncled block ## The Graph Provides a Solution -The Graph solves this challenge with a decentralized protocol that indexes and enables the efficient and high-performance querying of blockchain data. These APIs (indexed "subgraphs") can then be queried with a standard GraphQL API. +The Graph solves this challenge with a decentralized protocol that indexes and enables the efficient and high-performance querying of blockchain data. These APIs (indexed "Subgraphs") can then be queried with a standard GraphQL API. Today, there is a decentralized protocol that is backed by the open source implementation of [Graph Node](https://github.com/graphprotocol/graph-node) that enables this process. ### How The Graph Functions -Indexing blockchain data is very difficult, but The Graph makes it easy. The Graph learns how to index Ethereum data by using subgraphs. Subgraphs are custom APIs built on blockchain data that extract data from a blockchain, processes it, and stores it so that it can be seamlessly queried via GraphQL. +Indexing blockchain data is very difficult, but The Graph makes it easy. The Graph learns how to index Ethereum data by using Subgraphs. Subgraphs are custom APIs built on blockchain data that extract data from a blockchain, processes it, and stores it so that it can be seamlessly queried via GraphQL. #### Specifics -- The Graph uses subgraph descriptions, which are known as the subgraph manifest inside the subgraph. +- The Graph uses Subgraph descriptions, which are known as the Subgraph manifest inside the Subgraph. -- The subgraph description outlines the smart contracts of interest for a subgraph, the events within those contracts to focus on, and how to map event data to the data that The Graph will store in its database. +- The Subgraph description outlines the smart contracts of interest for a Subgraph, the events within those contracts to focus on, and how to map event data to the data that The Graph will store in its database. -- When creating a subgraph, you need to write a subgraph manifest. +- When creating a Subgraph, you need to write a Subgraph manifest. -- After writing the `subgraph manifest`, you can use the Graph CLI to store the definition in IPFS and instruct an Indexer to start indexing data for that subgraph. +- After writing the `subgraph manifest`, you can use the Graph CLI to store the definition in IPFS and instruct an Indexer to start indexing data for that Subgraph. -The diagram below provides more detailed information about the flow of data after a subgraph manifest has been deployed with Ethereum transactions. +The diagram below provides more detailed information about the flow of data after a Subgraph manifest has been deployed with Ethereum transactions. ![Grafu vysvětlující, jak Graf používá Uzel grafu k doručování dotazů konzumentům dat](/img/graph-dataflow.png) @@ -56,12 +56,12 @@ Průběh se řídí těmito kroky: 1. Dapp přidává data do Ethereum prostřednictvím transakce na chytrém kontraktu. 2. Chytrý smlouva vysílá při zpracování transakce jednu nebo více událostí. -3. Uzel grafu neustále vyhledává nové bloky Ethereum a data pro váš podgraf, která mohou obsahovat. -4. Uzel grafu v těchto blocích vyhledá události Etherea pro váš podgraf a spustí vámi zadané mapovací obsluhy. Mapování je modul WASM, který vytváří nebo aktualizuje datové entity, které Uzel grafu ukládá v reakci na události Ethereum. +3. Graph Node continually scans Ethereum for new blocks and the data for your Subgraph they may contain. +4. Graph Node finds Ethereum events for your Subgraph in these blocks and runs the mapping handlers you provided. The mapping is a WASM module that creates or updates the data entities that Graph Node stores in response to Ethereum events. 5. Aplikace dapp se dotazuje grafického uzlu na data indexovaná z blockchainu pomocí [GraphQL endpoint](https://graphql.org/learn/). Uzel Grafu zase překládá dotazy GraphQL na dotazy pro své podkladové datové úložiště, aby tato data načetl, přičemž využívá indexovací schopnosti úložiště. Dapp tato data zobrazuje v bohatém UI pro koncové uživatele, kteří je používají k vydávání nových transakcí na platformě Ethereum. Cyklus se opakuje. ## Další kroky -The following sections provide a more in-depth look at subgraphs, their deployment and data querying. +The following sections provide a more in-depth look at Subgraphs, their deployment and data querying. -Before you write your own subgraph, it's recommended to explore [Graph Explorer](https://thegraph.com/explorer) and review some of the already deployed subgraphs. Each subgraph's page includes a GraphQL playground, allowing you to query its data. +Before you write your own Subgraph, it's recommended to explore [Graph Explorer](https://thegraph.com/explorer) and review some of the already deployed Subgraphs. Each Subgraph's page includes a GraphQL playground, allowing you to query its data. From 40568a10ff525167792b6cfa6d6f4932e8dea7ae Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:08:08 -0500 Subject: [PATCH 0013/1789] New translations about.mdx (German) --- website/src/pages/de/about.mdx | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/website/src/pages/de/about.mdx b/website/src/pages/de/about.mdx index 61dbccdd5c84..3479db9413b0 100644 --- a/website/src/pages/de/about.mdx +++ b/website/src/pages/de/about.mdx @@ -30,25 +30,25 @@ Blockchain-Eigenschaften wie Endgültigkeit, Umstrukturierung der Kette und nich ## The Graph bietet eine Lösung -The Graph löst diese Herausforderung mit einem dezentralen Protokoll, das Blockchain-Daten indiziert und eine effiziente und leistungsstarke Abfrage ermöglicht. Diese APIs (indizierte „Subgraphen“) können dann mit einer Standard-GraphQL-API abgefragt werden. +The Graph solves this challenge with a decentralized protocol that indexes and enables the efficient and high-performance querying of blockchain data. These APIs (indexed "Subgraphs") can then be queried with a standard GraphQL API. Heute gibt es ein dezentralisiertes Protokoll, das durch die Open-Source-Implementierung von [Graph Node](https://github.com/graphprotocol/graph-node) unterstützt wird und diesen Prozess ermöglicht. ### Die Funktionsweise von The Graph -Die Indizierung von Blockchain-Daten ist sehr schwierig, aber The Graph macht es einfach. The Graph lernt, wie man Ethereum-Daten mit Hilfe von Subgraphen indiziert. Subgraphs sind benutzerdefinierte APIs, die auf Blockchain-Daten aufgebaut sind. Sie extrahieren Daten aus einer Blockchain, verarbeiten sie und speichern sie so, dass sie nahtlos über GraphQL abgefragt werden können. +Indexing blockchain data is very difficult, but The Graph makes it easy. The Graph learns how to index Ethereum data by using Subgraphs. Subgraphs are custom APIs built on blockchain data that extract data from a blockchain, processes it, and stores it so that it can be seamlessly queried via GraphQL. #### Besonderheiten -- The Graph verwendet Subgraph-Beschreibungen, die als Subgraph Manifest innerhalb des Subgraphen bekannt sind. +- The Graph uses Subgraph descriptions, which are known as the Subgraph manifest inside the Subgraph. -- Die Beschreibung des Subgraphs beschreibt die Smart Contracts, die für einen Subgraph von Interesse sind, die Ereignisse innerhalb dieser Verträge, auf die man sich konzentrieren sollte, und wie man die Ereignisdaten den Daten zuordnet, die The Graph in seiner Datenbank speichern wird. +- The Subgraph description outlines the smart contracts of interest for a Subgraph, the events within those contracts to focus on, and how to map event data to the data that The Graph will store in its database. -- Wenn Sie einen Subgraphen erstellen, müssen Sie ein Subgraph Manifest schreiben. +- When creating a Subgraph, you need to write a Subgraph manifest. -- Nachdem Sie das `Subgraph Manifest` geschrieben haben, können Sie das Graph CLI verwenden, um die Definition im IPFS zu speichern und einen Indexer anzuweisen, mit der Indizierung der Daten für diesen Subgraphen zu beginnen. +- After writing the `subgraph manifest`, you can use the Graph CLI to store the definition in IPFS and instruct an Indexer to start indexing data for that Subgraph. -Das nachstehende Diagramm enthält detailliertere Informationen über den Datenfluss, nachdem ein Subgraph Manifest mit Ethereum-Transaktionen bereitgestellt worden ist. +The diagram below provides more detailed information about the flow of data after a Subgraph manifest has been deployed with Ethereum transactions. ![Eine graphische Darstellung, die erklärt, wie The Graph Graph Node verwendet, um Abfragen an Datenkonsumenten zu stellen](/img/graph-dataflow.png) @@ -56,12 +56,12 @@ Der Ablauf ist wie folgt: 1. Eine Dapp fügt Ethereum durch eine Transaktion auf einem Smart Contract Daten hinzu. 2. Der Smart Contract gibt während der Verarbeitung der Transaktion ein oder mehrere Ereignisse aus. -3. Graph Node scannt Ethereum kontinuierlich nach neuen Blöcken und den darin enthaltenen Daten für Ihren Subgraphen. -4. Graph Node findet Ethereum-Ereignisse für Ihren Subgraphen in diesen Blöcken und führt die von Ihnen bereitgestellten Mapping-Handler aus. Das Mapping ist ein WASM-Modul, das die Dateneinheiten erstellt oder aktualisiert, die Graph Node als Reaktion auf Ethereum-Ereignisse speichert. +3. Graph Node continually scans Ethereum for new blocks and the data for your Subgraph they may contain. +4. Graph Node finds Ethereum events for your Subgraph in these blocks and runs the mapping handlers you provided. The mapping is a WASM module that creates or updates the data entities that Graph Node stores in response to Ethereum events. 5. Die Dapp fragt den Graph Node über den [GraphQL-Endpunkt](https://graphql.org/learn/) des Knotens nach Daten ab, die von der Blockchain indiziert wurden. Der Graph Node wiederum übersetzt die GraphQL-Abfragen in Abfragen für seinen zugrundeliegenden Datenspeicher, um diese Daten abzurufen, wobei er die Indexierungsfunktionen des Speichers nutzt. Die Dapp zeigt diese Daten in einer reichhaltigen Benutzeroberfläche für die Endnutzer an, mit der diese dann neue Transaktionen auf Ethereum durchführen können. Der Zyklus wiederholt sich. ## Nächste Schritte -In den folgenden Abschnitten werden die Subgraphen, ihr Einsatz und die Datenabfrage eingehender behandelt. +The following sections provide a more in-depth look at Subgraphs, their deployment and data querying. -Bevor Sie Ihren eigenen Subgraphen schreiben, sollten Sie den [Graph Explorer](https://thegraph.com/explorer) erkunden und sich einige der bereits vorhandenen Subgraphen ansehen. Die Seite jedes Subgraphen enthält eine GraphQL- Playground, mit der Sie seine Daten abfragen können. +Before you write your own Subgraph, it's recommended to explore [Graph Explorer](https://thegraph.com/explorer) and review some of the already deployed Subgraphs. Each Subgraph's page includes a GraphQL playground, allowing you to query its data. From ddaa710a69bf0c231ab75235ca6616e6295c23ad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:08:09 -0500 Subject: [PATCH 0014/1789] New translations about.mdx (Italian) --- website/src/pages/it/about.mdx | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/website/src/pages/it/about.mdx b/website/src/pages/it/about.mdx index 3060784eac83..62f0bf4d3c61 100644 --- a/website/src/pages/it/about.mdx +++ b/website/src/pages/it/about.mdx @@ -30,25 +30,25 @@ Blockchain properties, such as finality, chain reorganizations, and uncled block ## The Graph Provides a Solution -The Graph solves this challenge with a decentralized protocol that indexes and enables the efficient and high-performance querying of blockchain data. These APIs (indexed "subgraphs") can then be queried with a standard GraphQL API. +The Graph solves this challenge with a decentralized protocol that indexes and enables the efficient and high-performance querying of blockchain data. These APIs (indexed "Subgraphs") can then be queried with a standard GraphQL API. Today, there is a decentralized protocol that is backed by the open source implementation of [Graph Node](https://github.com/graphprotocol/graph-node) that enables this process. ### How The Graph Functions -Indexing blockchain data is very difficult, but The Graph makes it easy. The Graph learns how to index Ethereum data by using subgraphs. Subgraphs are custom APIs built on blockchain data that extract data from a blockchain, processes it, and stores it so that it can be seamlessly queried via GraphQL. +Indexing blockchain data is very difficult, but The Graph makes it easy. The Graph learns how to index Ethereum data by using Subgraphs. Subgraphs are custom APIs built on blockchain data that extract data from a blockchain, processes it, and stores it so that it can be seamlessly queried via GraphQL. #### Specifics -- The Graph uses subgraph descriptions, which are known as the subgraph manifest inside the subgraph. +- The Graph uses Subgraph descriptions, which are known as the Subgraph manifest inside the Subgraph. -- The subgraph description outlines the smart contracts of interest for a subgraph, the events within those contracts to focus on, and how to map event data to the data that The Graph will store in its database. +- The Subgraph description outlines the smart contracts of interest for a Subgraph, the events within those contracts to focus on, and how to map event data to the data that The Graph will store in its database. -- When creating a subgraph, you need to write a subgraph manifest. +- When creating a Subgraph, you need to write a Subgraph manifest. -- After writing the `subgraph manifest`, you can use the Graph CLI to store the definition in IPFS and instruct an Indexer to start indexing data for that subgraph. +- After writing the `subgraph manifest`, you can use the Graph CLI to store the definition in IPFS and instruct an Indexer to start indexing data for that Subgraph. -The diagram below provides more detailed information about the flow of data after a subgraph manifest has been deployed with Ethereum transactions. +The diagram below provides more detailed information about the flow of data after a Subgraph manifest has been deployed with Ethereum transactions. ![Un grafico che spiega come The Graph utilizza Graph Node per servire le query ai consumatori di dati](/img/graph-dataflow.png) @@ -56,12 +56,12 @@ Il flusso segue questi passi: 1. Una dapp aggiunge dati a Ethereum attraverso una transazione su uno smart contract. 2. Lo smart contract emette uno o più eventi durante l'elaborazione della transazione. -3. Graph Node scansiona continuamente Ethereum alla ricerca di nuovi blocchi e dei dati del vostro subgraph che possono contenere. -4. Graph Node trova gli eventi Ethereum per il vostro subgraph in questi blocchi ed esegue i gestori di mappatura che avete fornito. La mappatura è un modulo WASM che crea o aggiorna le entità di dati che Graph Node memorizza in risposta agli eventi Ethereum. +3. Graph Node continually scans Ethereum for new blocks and the data for your Subgraph they may contain. +4. Graph Node finds Ethereum events for your Subgraph in these blocks and runs the mapping handlers you provided. The mapping is a WASM module that creates or updates the data entities that Graph Node stores in response to Ethereum events. 5. La dapp effettua query del Graph Node per ottenere dati indicizzati dalla blockchain, utilizzando il [ GraphQL endpoint del nodo](https://graphql.org/learn/). Il Graph Node a sua volta traduce le query GraphQL in query per il suo archivio dati sottostante, al fine di recuperare questi dati, sfruttando le capacità di indicizzazione dell'archivio. La dapp visualizza questi dati in una ricca interfaccia utente per gli utenti finali, che li utilizzano per emettere nuove transazioni su Ethereum. Il ciclo si ripete. ## I prossimi passi -The following sections provide a more in-depth look at subgraphs, their deployment and data querying. +The following sections provide a more in-depth look at Subgraphs, their deployment and data querying. -Before you write your own subgraph, it's recommended to explore [Graph Explorer](https://thegraph.com/explorer) and review some of the already deployed subgraphs. Each subgraph's page includes a GraphQL playground, allowing you to query its data. +Before you write your own Subgraph, it's recommended to explore [Graph Explorer](https://thegraph.com/explorer) and review some of the already deployed Subgraphs. Each Subgraph's page includes a GraphQL playground, allowing you to query its data. From e8bc0535e5b4bffda23998d3ec27586ddcae7891 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:08:10 -0500 Subject: [PATCH 0015/1789] New translations about.mdx (Japanese) --- website/src/pages/ja/about.mdx | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/website/src/pages/ja/about.mdx b/website/src/pages/ja/about.mdx index c867800369a3..b4462cd3c1c8 100644 --- a/website/src/pages/ja/about.mdx +++ b/website/src/pages/ja/about.mdx @@ -30,25 +30,25 @@ Blockchain properties, such as finality, chain reorganizations, and uncled block ## The Graph Provides a Solution -The Graph solves this challenge with a decentralized protocol that indexes and enables the efficient and high-performance querying of blockchain data. These APIs (indexed "subgraphs") can then be queried with a standard GraphQL API. +The Graph solves this challenge with a decentralized protocol that indexes and enables the efficient and high-performance querying of blockchain data. These APIs (indexed "Subgraphs") can then be queried with a standard GraphQL API. Today, there is a decentralized protocol that is backed by the open source implementation of [Graph Node](https://github.com/graphprotocol/graph-node) that enables this process. ### How The Graph Functions -Indexing blockchain data is very difficult, but The Graph makes it easy. The Graph learns how to index Ethereum data by using subgraphs. Subgraphs are custom APIs built on blockchain data that extract data from a blockchain, processes it, and stores it so that it can be seamlessly queried via GraphQL. +Indexing blockchain data is very difficult, but The Graph makes it easy. The Graph learns how to index Ethereum data by using Subgraphs. Subgraphs are custom APIs built on blockchain data that extract data from a blockchain, processes it, and stores it so that it can be seamlessly queried via GraphQL. #### Specifics -- The Graph uses subgraph descriptions, which are known as the subgraph manifest inside the subgraph. +- The Graph uses Subgraph descriptions, which are known as the Subgraph manifest inside the Subgraph. -- The subgraph description outlines the smart contracts of interest for a subgraph, the events within those contracts to focus on, and how to map event data to the data that The Graph will store in its database. +- The Subgraph description outlines the smart contracts of interest for a Subgraph, the events within those contracts to focus on, and how to map event data to the data that The Graph will store in its database. -- When creating a subgraph, you need to write a subgraph manifest. +- When creating a Subgraph, you need to write a Subgraph manifest. -- After writing the `subgraph manifest`, you can use the Graph CLI to store the definition in IPFS and instruct an Indexer to start indexing data for that subgraph. +- After writing the `subgraph manifest`, you can use the Graph CLI to store the definition in IPFS and instruct an Indexer to start indexing data for that Subgraph. -The diagram below provides more detailed information about the flow of data after a subgraph manifest has been deployed with Ethereum transactions. +The diagram below provides more detailed information about the flow of data after a Subgraph manifest has been deployed with Ethereum transactions. ![グラフがグラフ ノードを使用してデータ コンシューマーにクエリを提供する方法を説明する図](/img/graph-dataflow.png) @@ -56,12 +56,12 @@ The diagram below provides more detailed information about the flow of data afte 1. Dapp は、スマート コントラクトのトランザクションを通じて Ethereum にデータを追加します。 2. スマートコントラクトは、トランザクションの処理中に 1 つまたは複数のイベントを発行します。 -3. Graph Node は、Ethereum の新しいブロックと、それに含まれる自分のサブグラフのデータを継続的にスキャンします。 -4. Graph Node は、これらのブロックの中からあなたのサブグラフの Ethereum イベントを見つけ出し、あなたが提供したマッピングハンドラーを実行します。 マッピングとは、イーサリアムのイベントに対応して Graph Node が保存するデータエンティティを作成または更新する WASM モジュールのことです。 +3. Graph Node continually scans Ethereum for new blocks and the data for your Subgraph they may contain. +4. Graph Node finds Ethereum events for your Subgraph in these blocks and runs the mapping handlers you provided. The mapping is a WASM module that creates or updates the data entities that Graph Node stores in response to Ethereum events. 5. Dapp は、ノードの [GraphQL エンドポイント](https://graphql.org/learn/) を使用して、ブロックチェーンからインデックス付けされたデータをグラフ ノードに照会します。グラフ ノードは、ストアのインデックス作成機能を利用して、このデータを取得するために、GraphQL クエリを基盤となるデータ ストアのクエリに変換します。 dapp は、このデータをエンドユーザー向けの豊富な UI に表示し、エンドユーザーはそれを使用して Ethereum で新しいトランザクションを発行します。サイクルが繰り返されます。 ## 次のステップ -The following sections provide a more in-depth look at subgraphs, their deployment and data querying. +The following sections provide a more in-depth look at Subgraphs, their deployment and data querying. -Before you write your own subgraph, it's recommended to explore [Graph Explorer](https://thegraph.com/explorer) and review some of the already deployed subgraphs. Each subgraph's page includes a GraphQL playground, allowing you to query its data. +Before you write your own Subgraph, it's recommended to explore [Graph Explorer](https://thegraph.com/explorer) and review some of the already deployed Subgraphs. Each Subgraph's page includes a GraphQL playground, allowing you to query its data. From 4f2f21afa66511163a188586d7725e99829c45e2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:08:12 -0500 Subject: [PATCH 0016/1789] New translations about.mdx (Korean) --- website/src/pages/ko/about.mdx | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/website/src/pages/ko/about.mdx b/website/src/pages/ko/about.mdx index 02b29895881f..833b097673d2 100644 --- a/website/src/pages/ko/about.mdx +++ b/website/src/pages/ko/about.mdx @@ -30,25 +30,25 @@ Blockchain properties, such as finality, chain reorganizations, and uncled block ## The Graph Provides a Solution -The Graph solves this challenge with a decentralized protocol that indexes and enables the efficient and high-performance querying of blockchain data. These APIs (indexed "subgraphs") can then be queried with a standard GraphQL API. +The Graph solves this challenge with a decentralized protocol that indexes and enables the efficient and high-performance querying of blockchain data. These APIs (indexed "Subgraphs") can then be queried with a standard GraphQL API. Today, there is a decentralized protocol that is backed by the open source implementation of [Graph Node](https://github.com/graphprotocol/graph-node) that enables this process. ### How The Graph Functions -Indexing blockchain data is very difficult, but The Graph makes it easy. The Graph learns how to index Ethereum data by using subgraphs. Subgraphs are custom APIs built on blockchain data that extract data from a blockchain, processes it, and stores it so that it can be seamlessly queried via GraphQL. +Indexing blockchain data is very difficult, but The Graph makes it easy. The Graph learns how to index Ethereum data by using Subgraphs. Subgraphs are custom APIs built on blockchain data that extract data from a blockchain, processes it, and stores it so that it can be seamlessly queried via GraphQL. #### Specifics -- The Graph uses subgraph descriptions, which are known as the subgraph manifest inside the subgraph. +- The Graph uses Subgraph descriptions, which are known as the Subgraph manifest inside the Subgraph. -- The subgraph description outlines the smart contracts of interest for a subgraph, the events within those contracts to focus on, and how to map event data to the data that The Graph will store in its database. +- The Subgraph description outlines the smart contracts of interest for a Subgraph, the events within those contracts to focus on, and how to map event data to the data that The Graph will store in its database. -- When creating a subgraph, you need to write a subgraph manifest. +- When creating a Subgraph, you need to write a Subgraph manifest. -- After writing the `subgraph manifest`, you can use the Graph CLI to store the definition in IPFS and instruct an Indexer to start indexing data for that subgraph. +- After writing the `subgraph manifest`, you can use the Graph CLI to store the definition in IPFS and instruct an Indexer to start indexing data for that Subgraph. -The diagram below provides more detailed information about the flow of data after a subgraph manifest has been deployed with Ethereum transactions. +The diagram below provides more detailed information about the flow of data after a Subgraph manifest has been deployed with Ethereum transactions. ![A graphic explaining how The Graph uses Graph Node to serve queries to data consumers](/img/graph-dataflow.png) @@ -56,12 +56,12 @@ The flow follows these steps: 1. A dapp adds data to Ethereum through a transaction on a smart contract. 2. The smart contract emits one or more events while processing the transaction. -3. Graph Node continually scans Ethereum for new blocks and the data for your subgraph they may contain. -4. Graph Node finds Ethereum events for your subgraph in these blocks and runs the mapping handlers you provided. The mapping is a WASM module that creates or updates the data entities that Graph Node stores in response to Ethereum events. +3. Graph Node continually scans Ethereum for new blocks and the data for your Subgraph they may contain. +4. Graph Node finds Ethereum events for your Subgraph in these blocks and runs the mapping handlers you provided. The mapping is a WASM module that creates or updates the data entities that Graph Node stores in response to Ethereum events. 5. The dapp queries the Graph Node for data indexed from the blockchain, using the node's [GraphQL endpoint](https://graphql.org/learn/). The Graph Node in turn translates the GraphQL queries into queries for its underlying data store in order to fetch this data, making use of the store's indexing capabilities. The dapp displays this data in a rich UI for end-users, which they use to issue new transactions on Ethereum. The cycle repeats. ## Next Steps -The following sections provide a more in-depth look at subgraphs, their deployment and data querying. +The following sections provide a more in-depth look at Subgraphs, their deployment and data querying. -Before you write your own subgraph, it's recommended to explore [Graph Explorer](https://thegraph.com/explorer) and review some of the already deployed subgraphs. Each subgraph's page includes a GraphQL playground, allowing you to query its data. +Before you write your own Subgraph, it's recommended to explore [Graph Explorer](https://thegraph.com/explorer) and review some of the already deployed Subgraphs. Each Subgraph's page includes a GraphQL playground, allowing you to query its data. From 215d233efe6d0eddd29979939d696626c592914c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:08:13 -0500 Subject: [PATCH 0017/1789] New translations about.mdx (Dutch) --- website/src/pages/nl/about.mdx | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/website/src/pages/nl/about.mdx b/website/src/pages/nl/about.mdx index ab5a9033cdac..7fde3b3d507d 100644 --- a/website/src/pages/nl/about.mdx +++ b/website/src/pages/nl/about.mdx @@ -30,25 +30,25 @@ Blockchain properties, such as finality, chain reorganizations, and uncled block ## The Graph Provides a Solution -The Graph solves this challenge with a decentralized protocol that indexes and enables the efficient and high-performance querying of blockchain data. These APIs (indexed "subgraphs") can then be queried with a standard GraphQL API. +The Graph solves this challenge with a decentralized protocol that indexes and enables the efficient and high-performance querying of blockchain data. These APIs (indexed "Subgraphs") can then be queried with a standard GraphQL API. Today, there is a decentralized protocol that is backed by the open source implementation of [Graph Node](https://github.com/graphprotocol/graph-node) that enables this process. ### How The Graph Functions -Indexing blockchain data is very difficult, but The Graph makes it easy. The Graph learns how to index Ethereum data by using subgraphs. Subgraphs are custom APIs built on blockchain data that extract data from a blockchain, processes it, and stores it so that it can be seamlessly queried via GraphQL. +Indexing blockchain data is very difficult, but The Graph makes it easy. The Graph learns how to index Ethereum data by using Subgraphs. Subgraphs are custom APIs built on blockchain data that extract data from a blockchain, processes it, and stores it so that it can be seamlessly queried via GraphQL. #### Specifics -- The Graph uses subgraph descriptions, which are known as the subgraph manifest inside the subgraph. +- The Graph uses Subgraph descriptions, which are known as the Subgraph manifest inside the Subgraph. -- The subgraph description outlines the smart contracts of interest for a subgraph, the events within those contracts to focus on, and how to map event data to the data that The Graph will store in its database. +- The Subgraph description outlines the smart contracts of interest for a Subgraph, the events within those contracts to focus on, and how to map event data to the data that The Graph will store in its database. -- When creating a subgraph, you need to write a subgraph manifest. +- When creating a Subgraph, you need to write a Subgraph manifest. -- After writing the `subgraph manifest`, you can use the Graph CLI to store the definition in IPFS and instruct an Indexer to start indexing data for that subgraph. +- After writing the `subgraph manifest`, you can use the Graph CLI to store the definition in IPFS and instruct an Indexer to start indexing data for that Subgraph. -The diagram below provides more detailed information about the flow of data after a subgraph manifest has been deployed with Ethereum transactions. +The diagram below provides more detailed information about the flow of data after a Subgraph manifest has been deployed with Ethereum transactions. ![A graphic explaining how The Graph uses Graph Node to serve queries to data consumers](/img/graph-dataflow.png) @@ -56,12 +56,12 @@ The flow follows these steps: 1. A dapp adds data to Ethereum through a transaction on a smart contract. 2. The smart contract emits one or more events while processing the transaction. -3. Graph Node continually scans Ethereum for new blocks and the data for your subgraph they may contain. -4. Graph Node finds Ethereum events for your subgraph in these blocks and runs the mapping handlers you provided. The mapping is a WASM module that creates or updates the data entities that Graph Node stores in response to Ethereum events. +3. Graph Node continually scans Ethereum for new blocks and the data for your Subgraph they may contain. +4. Graph Node finds Ethereum events for your Subgraph in these blocks and runs the mapping handlers you provided. The mapping is a WASM module that creates or updates the data entities that Graph Node stores in response to Ethereum events. 5. The dapp queries the Graph Node for data indexed from the blockchain, using the node's [GraphQL endpoint](https://graphql.org/learn/). The Graph Node in turn translates the GraphQL queries into queries for its underlying data store in order to fetch this data, making use of the store's indexing capabilities. The dapp displays this data in a rich UI for end-users, which they use to issue new transactions on Ethereum. The cycle repeats. ## Next Steps -The following sections provide a more in-depth look at subgraphs, their deployment and data querying. +The following sections provide a more in-depth look at Subgraphs, their deployment and data querying. -Before you write your own subgraph, it's recommended to explore [Graph Explorer](https://thegraph.com/explorer) and review some of the already deployed subgraphs. Each subgraph's page includes a GraphQL playground, allowing you to query its data. +Before you write your own Subgraph, it's recommended to explore [Graph Explorer](https://thegraph.com/explorer) and review some of the already deployed Subgraphs. Each Subgraph's page includes a GraphQL playground, allowing you to query its data. From 53df6c5fd56f88aeab93005309f964d04ffd02bb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:08:14 -0500 Subject: [PATCH 0018/1789] New translations about.mdx (Polish) --- website/src/pages/pl/about.mdx | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/website/src/pages/pl/about.mdx b/website/src/pages/pl/about.mdx index 199bc6a77400..abfc28d9390b 100644 --- a/website/src/pages/pl/about.mdx +++ b/website/src/pages/pl/about.mdx @@ -30,25 +30,25 @@ Blockchain properties, such as finality, chain reorganizations, and uncled block ## The Graph Provides a Solution -The Graph solves this challenge with a decentralized protocol that indexes and enables the efficient and high-performance querying of blockchain data. These APIs (indexed "subgraphs") can then be queried with a standard GraphQL API. +The Graph solves this challenge with a decentralized protocol that indexes and enables the efficient and high-performance querying of blockchain data. These APIs (indexed "Subgraphs") can then be queried with a standard GraphQL API. Today, there is a decentralized protocol that is backed by the open source implementation of [Graph Node](https://github.com/graphprotocol/graph-node) that enables this process. ### How The Graph Functions -Indexing blockchain data is very difficult, but The Graph makes it easy. The Graph learns how to index Ethereum data by using subgraphs. Subgraphs are custom APIs built on blockchain data that extract data from a blockchain, processes it, and stores it so that it can be seamlessly queried via GraphQL. +Indexing blockchain data is very difficult, but The Graph makes it easy. The Graph learns how to index Ethereum data by using Subgraphs. Subgraphs are custom APIs built on blockchain data that extract data from a blockchain, processes it, and stores it so that it can be seamlessly queried via GraphQL. #### Specifics -- The Graph uses subgraph descriptions, which are known as the subgraph manifest inside the subgraph. +- The Graph uses Subgraph descriptions, which are known as the Subgraph manifest inside the Subgraph. -- The subgraph description outlines the smart contracts of interest for a subgraph, the events within those contracts to focus on, and how to map event data to the data that The Graph will store in its database. +- The Subgraph description outlines the smart contracts of interest for a Subgraph, the events within those contracts to focus on, and how to map event data to the data that The Graph will store in its database. -- When creating a subgraph, you need to write a subgraph manifest. +- When creating a Subgraph, you need to write a Subgraph manifest. -- After writing the `subgraph manifest`, you can use the Graph CLI to store the definition in IPFS and instruct an Indexer to start indexing data for that subgraph. +- After writing the `subgraph manifest`, you can use the Graph CLI to store the definition in IPFS and instruct an Indexer to start indexing data for that Subgraph. -The diagram below provides more detailed information about the flow of data after a subgraph manifest has been deployed with Ethereum transactions. +The diagram below provides more detailed information about the flow of data after a Subgraph manifest has been deployed with Ethereum transactions. ![Grafika wyjaśniająca sposób w jaki protokół The Graph wykorzystuje węzeł Graph Node by obsługiwać zapytania dla konsumentów danych](/img/graph-dataflow.png) @@ -56,12 +56,12 @@ Proces ten przebiega według poniższych kroków: 1. Aplikacja dApp dodaje dane do sieci Ethereum za pomocą transakcji w smart kontrakcie. 2. Inteligentny kontrakt emituje jedno lub więcej zdarzeń podczas przetwarzania transakcji. -3. Graph Node nieprzerwanie skanuje sieć Ethereum w poszukiwaniu nowych bloków i danych dla Twojego subgraphu, które mogą one zawierać. -4. Graph Node znajduje zdarzenia Ethereum dla Twojego subgraphu w tych blokach i uruchamia dostarczone przez Ciebie procedury mapowania. Mapowanie to moduł WASM, który tworzy lub aktualizuje jednostki danych przechowywane przez węzeł Graph Node w odpowiedzi na zdarzenia Ethereum. +3. Graph Node continually scans Ethereum for new blocks and the data for your Subgraph they may contain. +4. Graph Node finds Ethereum events for your Subgraph in these blocks and runs the mapping handlers you provided. The mapping is a WASM module that creates or updates the data entities that Graph Node stores in response to Ethereum events. 5. Aplikacja dApp wysyła zapytanie do węzła Graph Node o dane zindeksowane na blockchainie, korzystając z [punktu końcowego GraphQL](https://graphql.org/learn/). Węzeł Graph Node przekształca zapytania GraphQL na zapytania do swojego podstawowego magazynu danych w celu pobrania tych danych, wykorzystując zdolności indeksowania magazynu. Aplikacja dApp wyświetla te dane w interfejsie użytkownika dla użytkowników końcowych, którzy używają go do tworzenia nowych transakcji w sieci Ethereum. Cykl się powtarza. ## Kolejne kroki -The following sections provide a more in-depth look at subgraphs, their deployment and data querying. +The following sections provide a more in-depth look at Subgraphs, their deployment and data querying. -Before you write your own subgraph, it's recommended to explore [Graph Explorer](https://thegraph.com/explorer) and review some of the already deployed subgraphs. Each subgraph's page includes a GraphQL playground, allowing you to query its data. +Before you write your own Subgraph, it's recommended to explore [Graph Explorer](https://thegraph.com/explorer) and review some of the already deployed Subgraphs. Each Subgraph's page includes a GraphQL playground, allowing you to query its data. From 4083afe0e92ff631cad49a4f92a5092b3eebc86f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:08:15 -0500 Subject: [PATCH 0019/1789] New translations about.mdx (Portuguese) --- website/src/pages/pt/about.mdx | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/website/src/pages/pt/about.mdx b/website/src/pages/pt/about.mdx index 6603713efd91..22d7582d014d 100644 --- a/website/src/pages/pt/about.mdx +++ b/website/src/pages/pt/about.mdx @@ -30,25 +30,25 @@ Propriedades de blockchain, como finalidade, reorganizações de chain, ou bloco ## The Graph Providencia uma Solução -O The Graph resolve este desafio com um protocolo descentralizado que indexa e permite queries eficientes e de alto desempenho de dados de blockchain. Estas APIs ("subgraphs" indexados) podem então ser consultados num query com uma API GraphQL padrão. +The Graph solves this challenge with a decentralized protocol that indexes and enables the efficient and high-performance querying of blockchain data. These APIs (indexed "Subgraphs") can then be queried with a standard GraphQL API. Hoje, há um protocolo descentralizado apoiado pela implementação de código aberto do [Graph Node](https://github.com/graphprotocol/graph-node) que facilita este processo. ### Como o The Graph Funciona -Indexar dados em blockchain é um processo difícil, mas facilitado pelo The Graph. O The Graph aprende como indexar dados no Ethereum com o uso de subgraphs. Subgraphs são APIs personalizadas construídas com dados de blockchain, que extraem, processam e armazenam dados de uma blockchain para poderem ser consultadas suavemente via GraphQL. +Indexing blockchain data is very difficult, but The Graph makes it easy. The Graph learns how to index Ethereum data by using Subgraphs. Subgraphs are custom APIs built on blockchain data that extract data from a blockchain, processes it, and stores it so that it can be seamlessly queried via GraphQL. #### Especificações -- O The Graph usa descrições de subgraph, conhecidas como "manifests de subgraph" dentro do subgraph. +- The Graph uses Subgraph descriptions, which are known as the Subgraph manifest inside the Subgraph. -- A descrição do subgraph contorna os contratos inteligentes de interesse para o mesmo, os eventos dentro destes contratos para focar, e como mapear dados de evento para dados que o The Graph armazenará no seu banco de dados. +- The Subgraph description outlines the smart contracts of interest for a Subgraph, the events within those contracts to focus on, and how to map event data to the data that The Graph will store in its database. -- Ao criar um subgraph, primeiro é necessário escrever um manifest de subgraph. +- When creating a Subgraph, you need to write a Subgraph manifest. -- Após escrever o `subgraph manifest`, é possível usar o Graph CLI para armazenar a definição no IPFS e instruir o Indexador para começar a indexar dados para o subgraph. +- After writing the `subgraph manifest`, you can use the Graph CLI to store the definition in IPFS and instruct an Indexer to start indexing data for that Subgraph. -O diagrama abaixo dá informações mais detalhadas sobre o fluxo de dados quando um manifest de subgraph for lançado com transações no Ethereum. +The diagram below provides more detailed information about the flow of data after a Subgraph manifest has been deployed with Ethereum transactions. ![Um gráfico que explica como o The Graph utiliza Graph Nodes para servir queries para consumidores de dados](/img/graph-dataflow.png) @@ -56,12 +56,12 @@ O fluxo segue estes passos: 1. Um dApp adiciona dados à Ethereum através de uma transação em contrato inteligente. 2. O contrato inteligente emite um ou mais eventos enquanto processa a transação. -3. O Graph Node escaneia continuamente a Ethereum por novos blocos e os dados que podem conter para o seu subgraph. -4. O Graph Node encontra eventos na Ethereum para o seu subgraph nestes blocos e executa os handlers de mapeamento que forneceu. O mapeamento é um módulo WASM que cria ou atualiza as entidades de dados que o Graph Node armazena em resposta a eventos na Ethereum. +3. Graph Node continually scans Ethereum for new blocks and the data for your Subgraph they may contain. +4. Graph Node finds Ethereum events for your Subgraph in these blocks and runs the mapping handlers you provided. The mapping is a WASM module that creates or updates the data entities that Graph Node stores in response to Ethereum events. 5. O dApp consulta o Graph Node para dados indexados da blockchain, através do [endpoint GraphQL](https://graphql.org/learn/) do node. O Graph Node, por sua vez, traduz os queries GraphQL em queries para o seu armazenamento subjacente de dados para poder retirar estes dados, com o uso das capacidades de indexação do armazenamento. O dApp exibe estes dados em uma interface rica para utilizadores finais, que eles usam para emitir novas transações na Ethereum. E o ciclo se repete. ## Próximos Passos -As seguintes secções providenciam um olhar mais íntimo nos subgraphs, na sua publicação e no query de dados. +The following sections provide a more in-depth look at Subgraphs, their deployment and data querying. -Antes de escrever o seu próprio subgraph, é recomendado explorar o [Graph Explorer](https://thegraph.com/explorer) e revir alguns dos subgraphs já publicados. A página de todo subgraph inclui um ambiente de teste em GraphQL que lhe permite consultar os dados dele. +Before you write your own Subgraph, it's recommended to explore [Graph Explorer](https://thegraph.com/explorer) and review some of the already deployed Subgraphs. Each Subgraph's page includes a GraphQL playground, allowing you to query its data. From dfb552207d6f8ee891c98ac7b883bb8351da9adb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:08:16 -0500 Subject: [PATCH 0020/1789] New translations about.mdx (Russian) --- website/src/pages/ru/about.mdx | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/website/src/pages/ru/about.mdx b/website/src/pages/ru/about.mdx index 35f9c6efd933..005e9926c3e8 100644 --- a/website/src/pages/ru/about.mdx +++ b/website/src/pages/ru/about.mdx @@ -30,25 +30,25 @@ Alternatively, you have the option to set up your own server, process the transa ## The Graph предлагает решение -The Graph решает эту проблему с помощью децентрализованного протокола, который индексирует и обеспечивает эффективный и высокопроизводительный запрос данных блокчейна. Эти API (индексированные «субграфы») затем могут быть запрошены с помощью стандартного API GraphQL. +The Graph solves this challenge with a decentralized protocol that indexes and enables the efficient and high-performance querying of blockchain data. These APIs (indexed "Subgraphs") can then be queried with a standard GraphQL API. Сегодня существует децентрализованный протокол, поддерживаемый реализацией с открытым исходным кодом [Graph Node](https://github.com/graphprotocol/graph-node), который обеспечивает этот процесс. ### Как функционирует The Graph -Индексирование данных блокчейна очень сложный процесс, но The Graph упрощает его. The Graph учится индексировать данные Ethereum с помощью субграфов. Субграфы — это пользовательские API, построенные на данных блокчейна, которые извлекают данные из блокчейна, обрабатывают их и сохраняют так, чтобы их можно было легко запрашивать через GraphQL. +Indexing blockchain data is very difficult, but The Graph makes it easy. The Graph learns how to index Ethereum data by using Subgraphs. Subgraphs are custom APIs built on blockchain data that extract data from a blockchain, processes it, and stores it so that it can be seamlessly queried via GraphQL. #### Специфические особенности -- В The Graph используются описания субграфов, которые называются манифестами субграфов внутри субграфа. +- The Graph uses Subgraph descriptions, which are known as the Subgraph manifest inside the Subgraph. -- В описании субграфа описываются смарт-контракты, представляющие интерес для субграфа, события в этих контрактах, на которых следует сосредоточиться, а также способы сопоставления данных о событиях с данными, которые The Graph будет хранить в своей базе данных. +- The Subgraph description outlines the smart contracts of interest for a Subgraph, the events within those contracts to focus on, and how to map event data to the data that The Graph will store in its database. -- При создании субграфа Вам необходимо написать манифест субграфа. +- When creating a Subgraph, you need to write a Subgraph manifest. -- После написания `манифеста субграфа` Вы можете использовать Graph CLI для сохранения определения в IPFS и дать команду индексатору начать индексирование данных для этого субграфа. +- After writing the `subgraph manifest`, you can use the Graph CLI to store the definition in IPFS and instruct an Indexer to start indexing data for that Subgraph. -На диаграмме ниже представлена ​​более подробная информация о потоке данных после развертывания манифеста субграфа с транзакциями Ethereum. +The diagram below provides more detailed information about the flow of data after a Subgraph manifest has been deployed with Ethereum transactions. ![График, объясняющий потребителям данных, как The Graph использует Graph Node для обслуживания запросов](/img/graph-dataflow.png) @@ -56,12 +56,12 @@ The Graph решает эту проблему с помощью децентр 1. Dapp добавляет данные в Ethereum через транзакцию в смарт-контракте. 2. Смарт-контракт генерирует одно или несколько событий во время обработки транзакции. -3. Graph Node постоянно сканирует Ethereum на наличие новых блоков и данных для Вашего субграфа, которые они могут содержать. -4. The Graph нода затем разбирает события, относящиеся к Вашему субграфу, которые записаны в данном блоке и структурирует их согласно схеме данных описанной в subgraph используя модуль WASM. Затем данные сохраняются в таблицы базы данных Graph Node. +3. Graph Node continually scans Ethereum for new blocks and the data for your Subgraph they may contain. +4. Graph Node finds Ethereum events for your Subgraph in these blocks and runs the mapping handlers you provided. The mapping is a WASM module that creates or updates the data entities that Graph Node stores in response to Ethereum events. 5. Dapp запрашивает у Graph Node данные, проиндексированные с блокчейна, используя [конечную точку GraphQL](https://graphql.org/learn/) ноды. В свою очередь, Graph Node переводит запросы GraphQL в запросы к его базовому хранилищу данных, чтобы получить эти данные, используя возможности индексации этого хранилища. Dapp отображает эти данные в насыщенном пользовательском интерфейсе для конечных пользователей, который они используют для создания новых транзакций в Ethereum. Цикл повторяется. ## Что далее -В следующих разделах более подробно рассматриваются субграфы, их развертывание и запросы данных. +The following sections provide a more in-depth look at Subgraphs, their deployment and data querying. -Прежде чем писать собственный субграф, рекомендуется ознакомиться с [Graph Explorer](https://thegraph.com/explorer) и изучить некоторые из уже развернутых субграфов. Страница каждого субграфа включает в себя тестовую площадку GraphQL, позволяющую запрашивать его данные. +Before you write your own Subgraph, it's recommended to explore [Graph Explorer](https://thegraph.com/explorer) and review some of the already deployed Subgraphs. Each Subgraph's page includes a GraphQL playground, allowing you to query its data. From 19a94f9589682ea3fb754c13b4d2b38c6d78955b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:08:17 -0500 Subject: [PATCH 0021/1789] New translations about.mdx (Swedish) --- website/src/pages/sv/about.mdx | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/website/src/pages/sv/about.mdx b/website/src/pages/sv/about.mdx index 90c63c0f036d..8f3ae9f1a8e7 100644 --- a/website/src/pages/sv/about.mdx +++ b/website/src/pages/sv/about.mdx @@ -30,25 +30,25 @@ Blockchain properties, such as finality, chain reorganizations, and uncled block ## The Graph Provides a Solution -The Graph solves this challenge with a decentralized protocol that indexes and enables the efficient and high-performance querying of blockchain data. These APIs (indexed "subgraphs") can then be queried with a standard GraphQL API. +The Graph solves this challenge with a decentralized protocol that indexes and enables the efficient and high-performance querying of blockchain data. These APIs (indexed "Subgraphs") can then be queried with a standard GraphQL API. Today, there is a decentralized protocol that is backed by the open source implementation of [Graph Node](https://github.com/graphprotocol/graph-node) that enables this process. ### How The Graph Functions -Indexing blockchain data is very difficult, but The Graph makes it easy. The Graph learns how to index Ethereum data by using subgraphs. Subgraphs are custom APIs built on blockchain data that extract data from a blockchain, processes it, and stores it so that it can be seamlessly queried via GraphQL. +Indexing blockchain data is very difficult, but The Graph makes it easy. The Graph learns how to index Ethereum data by using Subgraphs. Subgraphs are custom APIs built on blockchain data that extract data from a blockchain, processes it, and stores it so that it can be seamlessly queried via GraphQL. #### Specifics -- The Graph uses subgraph descriptions, which are known as the subgraph manifest inside the subgraph. +- The Graph uses Subgraph descriptions, which are known as the Subgraph manifest inside the Subgraph. -- The subgraph description outlines the smart contracts of interest for a subgraph, the events within those contracts to focus on, and how to map event data to the data that The Graph will store in its database. +- The Subgraph description outlines the smart contracts of interest for a Subgraph, the events within those contracts to focus on, and how to map event data to the data that The Graph will store in its database. -- When creating a subgraph, you need to write a subgraph manifest. +- When creating a Subgraph, you need to write a Subgraph manifest. -- After writing the `subgraph manifest`, you can use the Graph CLI to store the definition in IPFS and instruct an Indexer to start indexing data for that subgraph. +- After writing the `subgraph manifest`, you can use the Graph CLI to store the definition in IPFS and instruct an Indexer to start indexing data for that Subgraph. -The diagram below provides more detailed information about the flow of data after a subgraph manifest has been deployed with Ethereum transactions. +The diagram below provides more detailed information about the flow of data after a Subgraph manifest has been deployed with Ethereum transactions. ![En grafik som förklarar hur The Graf använder Graf Node för att servera frågor till datakonsumenter](/img/graph-dataflow.png) @@ -56,12 +56,12 @@ Följande steg följs: 1. En dapp lägger till data i Ethereum genom en transaktion på ett smart kontrakt. 2. Det smarta kontraktet sänder ut en eller flera händelser under bearbetningen av transaktionen. -3. Graf Node skannar kontinuerligt Ethereum efter nya block och den data för din subgraf de kan innehålla. -4. Graf Node hittar Ethereum-händelser för din subgraf i dessa block och kör de kartläggande hanterarna du tillhandahållit. Kartläggningen är en WASM-modul som skapar eller uppdaterar de dataenheter som Graph Node lagrar som svar på Ethereum-händelser. +3. Graph Node continually scans Ethereum for new blocks and the data for your Subgraph they may contain. +4. Graph Node finds Ethereum events for your Subgraph in these blocks and runs the mapping handlers you provided. The mapping is a WASM module that creates or updates the data entities that Graph Node stores in response to Ethereum events. 5. Dappen frågar Graph Node om data som indexerats från blockkedjan med hjälp av nodens [GraphQL-slutpunkt](https://graphql.org/learn/). Graph Node översätter i sin tur GraphQL-frågorna till frågor för sin underliggande datalagring för att hämta dessa data, och använder lagrets indexeringsegenskaper. Dappen visar dessa data i ett användarvänligt gränssnitt för slutanvändare, som de använder för att utfärda nya transaktioner på Ethereum. Cykeln upprepas. ## Nästa steg -The following sections provide a more in-depth look at subgraphs, their deployment and data querying. +The following sections provide a more in-depth look at Subgraphs, their deployment and data querying. -Before you write your own subgraph, it's recommended to explore [Graph Explorer](https://thegraph.com/explorer) and review some of the already deployed subgraphs. Each subgraph's page includes a GraphQL playground, allowing you to query its data. +Before you write your own Subgraph, it's recommended to explore [Graph Explorer](https://thegraph.com/explorer) and review some of the already deployed Subgraphs. Each Subgraph's page includes a GraphQL playground, allowing you to query its data. From b439d55a22697694e86943e30ef7a7771f4103be Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:08:18 -0500 Subject: [PATCH 0022/1789] New translations about.mdx (Turkish) --- website/src/pages/tr/about.mdx | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/website/src/pages/tr/about.mdx b/website/src/pages/tr/about.mdx index 775696c41265..3b1dce5a5617 100644 --- a/website/src/pages/tr/about.mdx +++ b/website/src/pages/tr/about.mdx @@ -30,25 +30,25 @@ Finalite, zincir yeniden organizasyonu ve "uncle" bloklar gibi blokzinciri özel ## The Graph'in Sağladığı Çözüm -The Graph, blokzinciri verilerini endeksleyip verimli, yüksek performanslı sorgulama imkanı sunan merkeziyetsiz bir protokol ile bu zorluğu çözer. Bu endekslenmiş API'lar ("subgraph'ler"), standart bir GraphQL API'ı ile sorgulanabilir. +The Graph solves this challenge with a decentralized protocol that indexes and enables the efficient and high-performance querying of blockchain data. These APIs (indexed "Subgraphs") can then be queried with a standard GraphQL API. Artık, bu süreci mümkün kılan, [Graph Düğümü](https://github.com/graphprotocol/graph-node)'nün açık kaynaklı implementasyonuna dayanan merkeziyetsiz bir protokol mevcut. ### The Graph'in Çalışma Şekli -Blokzinciri verilerini endekslemek oldukça zordur, ancak The Graph bunu kolaylaştırır. The Graph, Ethereum verilerini nasıl endeksleyeceğini subgraph'ler kullanarak öğrenir. Subgraph'ler, blokzinciri verileri üzerine kurulu özel yapım API'lerdir; bu API'ler blokzincirinden veriyi çıkarır, işler ve sorguların GraphQL ile sorunsuz bir şekilde yapılabilmesi için depolar. +Indexing blockchain data is very difficult, but The Graph makes it easy. The Graph learns how to index Ethereum data by using Subgraphs. Subgraphs are custom APIs built on blockchain data that extract data from a blockchain, processes it, and stores it so that it can be seamlessly queried via GraphQL. #### Ayrıntılar -- The Graph, subgraph tanımlarını kullanır; bu tanımlar subgraph içinde subgraph manifestosu olarak bilinir. +- The Graph uses Subgraph descriptions, which are known as the Subgraph manifest inside the Subgraph. -- Subgraph tanımı, bir subgraph için ilgili akıllı sözleşmeleri, bu sözleşmelerde odaklanılacak olayları ve bu olay verilerinin The Graph'in veritabanında depolayacağı verilere nasıl eşleneceğini açıklar. +- The Subgraph description outlines the smart contracts of interest for a Subgraph, the events within those contracts to focus on, and how to map event data to the data that The Graph will store in its database. -- Subgraph oluştururken bir subgraph manifestosu yazmanız gerekir. +- When creating a Subgraph, you need to write a Subgraph manifest. -- `Subgraph manifestosunu` yazdıktan sonra, Graph CLI'yi kullanarak tanımı IPFS'e depolayabilir ve bir Indexer'a bu subgraph için veri endekslemeye başlaması talimatını verebilirsiniz. +- After writing the `subgraph manifest`, you can use the Graph CLI to store the definition in IPFS and instruct an Indexer to start indexing data for that Subgraph. -Aşağıdaki diyagramda, subgraph manifestosunun Ethereum blokzinciri üzerinde yapılan işlemler aracılığıyla yayına alınmasından sonra veri akışının nasıl ilerlediğine dair daha detaylı bilgi bulabilirsiniz. +The diagram below provides more detailed information about the flow of data after a Subgraph manifest has been deployed with Ethereum transactions. ![The Graph'in, Graph Düğümü'nü kullanarak veri tüketicilerine sorgu sunma sürecini açıklayan bir grafik](/img/graph-dataflow.png) @@ -56,12 +56,12 @@ Veri akışı şu şekildedir: 1. Bir dapp, bir akıllı sözleşme üzerinde işlem yaparak Ethereum'a veri ekler. 2. Akıllı sözleşme, işlemi işlerken bir veya daha fazla olay yayımlar. -3. Graph Düğümü, Ethereum blokzincirini yeni blokları sürekli olarak tarar ve blokların subgraph'iniz için endekslenmesi gereken verileri içerip içermediğini kontrol eder. -4. Graph Düğümü, bu bloklarda subgraph'iniz için Ethereum olaylarını bulur ve sağladığınız eşleme işleyicilerini (mapping handler) çalıştırır. Eşleme (mapping), Ethereum olaylarına karşılık olarak Graph Düğümünün depoladığı veri varlıklarını oluşturan veya güncelleyen bir WASM modülüdür. +3. Graph Node continually scans Ethereum for new blocks and the data for your Subgraph they may contain. +4. Graph Node finds Ethereum events for your Subgraph in these blocks and runs the mapping handlers you provided. The mapping is a WASM module that creates or updates the data entities that Graph Node stores in response to Ethereum events. 5. Dapp, blokzincirinden endekslenen veriler için Graph Düğümüne, düğümün [GraphQL uç noktası](https://graphql.org/learn/) üzerinden sorgu gönderir. Graph Düğümü ise veriyi getirmek için bu sorguları kendi veri deposuna yönelik sorgulara çevirir ve depolama sisteminin endeksleme kabiliyetlerini kullanarak bu verileri alır. Dapp, bu verileri son kullanıcılar için zengin bir arayüzde gösterir ve kullanıcılar bu arayüzü kullanarak Ethereum'da yeni işlemler gerçekleştirir. Bu döngü tekrarlanır. ## Sonraki Adımlar -Sonraki bölümler, subgraph'lere, yayına alınmalarına ve veri sorgulama sürecine daha derin bir bakış sunmaktadır. +The following sections provide a more in-depth look at Subgraphs, their deployment and data querying. -Kendi subgraph'inizi yazmadan önce, [Graph Explorer](https://thegraph.com/explorer)'ı keşfetmeniz ve halihazırda yayına alınmış bazı subgraph'leri incelemeniz önerilir. Her subgraph'in sayfasında bir GraphQL playground bulunur. Bu aracı kullanarak subgraph'in verilerine erişebilir ve sorgulamalar yapabilirsiniz. +Before you write your own Subgraph, it's recommended to explore [Graph Explorer](https://thegraph.com/explorer) and review some of the already deployed Subgraphs. Each Subgraph's page includes a GraphQL playground, allowing you to query its data. From c2d4eb344b11ea6e228fc8e565e8576ae714e6d0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:08:19 -0500 Subject: [PATCH 0023/1789] New translations about.mdx (Ukrainian) --- website/src/pages/uk/about.mdx | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/website/src/pages/uk/about.mdx b/website/src/pages/uk/about.mdx index 7d346fa59854..55eb5593f48b 100644 --- a/website/src/pages/uk/about.mdx +++ b/website/src/pages/uk/about.mdx @@ -30,25 +30,25 @@ Blockchain properties, such as finality, chain reorganizations, and uncled block ## The Graph Provides a Solution -The Graph solves this challenge with a decentralized protocol that indexes and enables the efficient and high-performance querying of blockchain data. These APIs (indexed "subgraphs") can then be queried with a standard GraphQL API. +The Graph solves this challenge with a decentralized protocol that indexes and enables the efficient and high-performance querying of blockchain data. These APIs (indexed "Subgraphs") can then be queried with a standard GraphQL API. Today, there is a decentralized protocol that is backed by the open source implementation of [Graph Node](https://github.com/graphprotocol/graph-node) that enables this process. ### How The Graph Functions -Indexing blockchain data is very difficult, but The Graph makes it easy. The Graph learns how to index Ethereum data by using subgraphs. Subgraphs are custom APIs built on blockchain data that extract data from a blockchain, processes it, and stores it so that it can be seamlessly queried via GraphQL. +Indexing blockchain data is very difficult, but The Graph makes it easy. The Graph learns how to index Ethereum data by using Subgraphs. Subgraphs are custom APIs built on blockchain data that extract data from a blockchain, processes it, and stores it so that it can be seamlessly queried via GraphQL. #### Specifics -- The Graph uses subgraph descriptions, which are known as the subgraph manifest inside the subgraph. +- The Graph uses Subgraph descriptions, which are known as the Subgraph manifest inside the Subgraph. -- The subgraph description outlines the smart contracts of interest for a subgraph, the events within those contracts to focus on, and how to map event data to the data that The Graph will store in its database. +- The Subgraph description outlines the smart contracts of interest for a Subgraph, the events within those contracts to focus on, and how to map event data to the data that The Graph will store in its database. -- When creating a subgraph, you need to write a subgraph manifest. +- When creating a Subgraph, you need to write a Subgraph manifest. -- After writing the `subgraph manifest`, you can use the Graph CLI to store the definition in IPFS and instruct an Indexer to start indexing data for that subgraph. +- After writing the `subgraph manifest`, you can use the Graph CLI to store the definition in IPFS and instruct an Indexer to start indexing data for that Subgraph. -The diagram below provides more detailed information about the flow of data after a subgraph manifest has been deployed with Ethereum transactions. +The diagram below provides more detailed information about the flow of data after a Subgraph manifest has been deployed with Ethereum transactions. ![Малюнок, що пояснює, як The Graph використовує Graph Node для обслуговування запитів до споживачів даних](/img/graph-dataflow.png) @@ -56,12 +56,12 @@ The diagram below provides more detailed information about the flow of data afte 1. Додаток відправляє дані в мережу Ethereum через транзакцію в смартконтракті. 2. Під час обробки транзакції смартконтракт видає одну або декілька різних подій. -3. Graph Node постійно сканує Ethereum на наявність нових блоків і даних для вашого підграфа, які вони можуть містити. -4. Graph Node знаходить події на Ethereum для вашого підграфа в цих блоках і запускає надані вами mapping handlers. Mapping - це модуль WASM, який створює або оновлює структуру даних, що зберігаються у Graph Node у відповідь на події на Ethereum. +3. Graph Node continually scans Ethereum for new blocks and the data for your Subgraph they may contain. +4. Graph Node finds Ethereum events for your Subgraph in these blocks and runs the mapping handlers you provided. The mapping is a WASM module that creates or updates the data entities that Graph Node stores in response to Ethereum events. 5. Додаток запитує Graph Node про дані, проіндексовані в блокчейні, використовуючи [кінцеву точку GraphQL](https://graphql.org/learn/). The Graph Node, і собі, переводить запити GraphQL в запити до свого базового сховища даних, щоб отримати ці дані, використовуючи можливості індексації сховища. Dapp відображає ці дані в величезному інтерфейсі для кінцевих користувачів, який вони використовують для створення нових транзакцій на Ethereum. Цикл повторюється. ## Наступні кроки -The following sections provide a more in-depth look at subgraphs, their deployment and data querying. +The following sections provide a more in-depth look at Subgraphs, their deployment and data querying. -Before you write your own subgraph, it's recommended to explore [Graph Explorer](https://thegraph.com/explorer) and review some of the already deployed subgraphs. Each subgraph's page includes a GraphQL playground, allowing you to query its data. +Before you write your own Subgraph, it's recommended to explore [Graph Explorer](https://thegraph.com/explorer) and review some of the already deployed Subgraphs. Each Subgraph's page includes a GraphQL playground, allowing you to query its data. From 03651b97825a14d33c09f339d4ea8759c7c4f5ed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:08:20 -0500 Subject: [PATCH 0024/1789] New translations about.mdx (Chinese Simplified) --- website/src/pages/zh/about.mdx | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/website/src/pages/zh/about.mdx b/website/src/pages/zh/about.mdx index 81c40b3d9f61..b12ff59041cf 100644 --- a/website/src/pages/zh/about.mdx +++ b/website/src/pages/zh/about.mdx @@ -30,25 +30,25 @@ Blockchain properties, such as finality, chain reorganizations, and uncled block ## The Graph Provides a Solution -The Graph solves this challenge with a decentralized protocol that indexes and enables the efficient and high-performance querying of blockchain data. These APIs (indexed "subgraphs") can then be queried with a standard GraphQL API. +The Graph solves this challenge with a decentralized protocol that indexes and enables the efficient and high-performance querying of blockchain data. These APIs (indexed "Subgraphs") can then be queried with a standard GraphQL API. Today, there is a decentralized protocol that is backed by the open source implementation of [Graph Node](https://github.com/graphprotocol/graph-node) that enables this process. ### How The Graph Functions -Indexing blockchain data is very difficult, but The Graph makes it easy. The Graph learns how to index Ethereum data by using subgraphs. Subgraphs are custom APIs built on blockchain data that extract data from a blockchain, processes it, and stores it so that it can be seamlessly queried via GraphQL. +Indexing blockchain data is very difficult, but The Graph makes it easy. The Graph learns how to index Ethereum data by using Subgraphs. Subgraphs are custom APIs built on blockchain data that extract data from a blockchain, processes it, and stores it so that it can be seamlessly queried via GraphQL. #### Specifics -- The Graph uses subgraph descriptions, which are known as the subgraph manifest inside the subgraph. +- The Graph uses Subgraph descriptions, which are known as the Subgraph manifest inside the Subgraph. -- The subgraph description outlines the smart contracts of interest for a subgraph, the events within those contracts to focus on, and how to map event data to the data that The Graph will store in its database. +- The Subgraph description outlines the smart contracts of interest for a Subgraph, the events within those contracts to focus on, and how to map event data to the data that The Graph will store in its database. -- When creating a subgraph, you need to write a subgraph manifest. +- When creating a Subgraph, you need to write a Subgraph manifest. -- After writing the `subgraph manifest`, you can use the Graph CLI to store the definition in IPFS and instruct an Indexer to start indexing data for that subgraph. +- After writing the `subgraph manifest`, you can use the Graph CLI to store the definition in IPFS and instruct an Indexer to start indexing data for that Subgraph. -The diagram below provides more detailed information about the flow of data after a subgraph manifest has been deployed with Ethereum transactions. +The diagram below provides more detailed information about the flow of data after a Subgraph manifest has been deployed with Ethereum transactions. ![一图解释Graph如何使用Graph节点向数据消费者提供查询的图形](/img/graph-dataflow.png) @@ -56,12 +56,12 @@ The diagram below provides more detailed information about the flow of data afte 1. 一个去中心化的应用程序通过智能合约上的交易向以太坊添加数据。 2. 智能合约在处理交易时,会发出一个或多个事件。 -3. Graph 节点不断扫描以太坊的新区块和它们可能包含的子图的数据。 -4. Graph 节点在这些区块中为你的子图找到以太坊事件并运行你提供的映射处理程序。 映射是一个 WASM 模块,它创建或更新 Graph 节点存储的数据实体,以响应以太坊事件。 +3. Graph Node continually scans Ethereum for new blocks and the data for your Subgraph they may contain. +4. Graph Node finds Ethereum events for your Subgraph in these blocks and runs the mapping handlers you provided. The mapping is a WASM module that creates or updates the data entities that Graph Node stores in response to Ethereum events. 5. 去中心化的应用程序使用Graph节点的[GraphQL 端点](https://graphql.org/learn/),从区块链的索引中查询 Graph 节点的数据。 Graph 节点反过来将 GraphQL 查询转化为对其底层数据存储的查询,以便利用存储的索引功能来获取这些数据。 去中心化的应用程序在一个丰富的用户界面中为终端用户显示这些数据,他们用这些数据在以太坊上发行新的交易。 就这样周而复始。 ## 下一步 -The following sections provide a more in-depth look at subgraphs, their deployment and data querying. +The following sections provide a more in-depth look at Subgraphs, their deployment and data querying. -Before you write your own subgraph, it's recommended to explore [Graph Explorer](https://thegraph.com/explorer) and review some of the already deployed subgraphs. Each subgraph's page includes a GraphQL playground, allowing you to query its data. +Before you write your own Subgraph, it's recommended to explore [Graph Explorer](https://thegraph.com/explorer) and review some of the already deployed Subgraphs. Each Subgraph's page includes a GraphQL playground, allowing you to query its data. From 6c186522cf460fbb1e0241e39f4762ea6e45c7c0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:08:21 -0500 Subject: [PATCH 0025/1789] New translations about.mdx (Urdu (Pakistan)) --- website/src/pages/ur/about.mdx | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/website/src/pages/ur/about.mdx b/website/src/pages/ur/about.mdx index 75dd5e34c6e0..d737a0994ad9 100644 --- a/website/src/pages/ur/about.mdx +++ b/website/src/pages/ur/about.mdx @@ -30,25 +30,25 @@ Blockchain properties, such as finality, chain reorganizations, and uncled block ## The Graph Provides a Solution -The Graph solves this challenge with a decentralized protocol that indexes and enables the efficient and high-performance querying of blockchain data. These APIs (indexed "subgraphs") can then be queried with a standard GraphQL API. +The Graph solves this challenge with a decentralized protocol that indexes and enables the efficient and high-performance querying of blockchain data. These APIs (indexed "Subgraphs") can then be queried with a standard GraphQL API. Today, there is a decentralized protocol that is backed by the open source implementation of [Graph Node](https://github.com/graphprotocol/graph-node) that enables this process. ### How The Graph Functions -Indexing blockchain data is very difficult, but The Graph makes it easy. The Graph learns how to index Ethereum data by using subgraphs. Subgraphs are custom APIs built on blockchain data that extract data from a blockchain, processes it, and stores it so that it can be seamlessly queried via GraphQL. +Indexing blockchain data is very difficult, but The Graph makes it easy. The Graph learns how to index Ethereum data by using Subgraphs. Subgraphs are custom APIs built on blockchain data that extract data from a blockchain, processes it, and stores it so that it can be seamlessly queried via GraphQL. #### Specifics -- The Graph uses subgraph descriptions, which are known as the subgraph manifest inside the subgraph. +- The Graph uses Subgraph descriptions, which are known as the Subgraph manifest inside the Subgraph. -- The subgraph description outlines the smart contracts of interest for a subgraph, the events within those contracts to focus on, and how to map event data to the data that The Graph will store in its database. +- The Subgraph description outlines the smart contracts of interest for a Subgraph, the events within those contracts to focus on, and how to map event data to the data that The Graph will store in its database. -- When creating a subgraph, you need to write a subgraph manifest. +- When creating a Subgraph, you need to write a Subgraph manifest. -- After writing the `subgraph manifest`, you can use the Graph CLI to store the definition in IPFS and instruct an Indexer to start indexing data for that subgraph. +- After writing the `subgraph manifest`, you can use the Graph CLI to store the definition in IPFS and instruct an Indexer to start indexing data for that Subgraph. -The diagram below provides more detailed information about the flow of data after a subgraph manifest has been deployed with Ethereum transactions. +The diagram below provides more detailed information about the flow of data after a Subgraph manifest has been deployed with Ethereum transactions. ![ایک گرافک یہ بتاتا ہے کہ گراف کس طرح ڈیٹا صارفین کو کیوریز پیش کرنے کے لیے گراف نوڈ کا استعمال کرتا ہے](/img/graph-dataflow.png) @@ -56,12 +56,12 @@ The diagram below provides more detailed information about the flow of data afte 1. ایک ڈیپ سمارٹ کنٹریکٹ پر ٹرانزیکشن کے ذریعے سے ایتھیریم میں ڈیٹا کا اضافہ کرتی ہے. 2. سمارٹ کنٹریکٹ ٹرانزیکشن پر کارروائی کے دوران ایک یا ایک سے زیادہ واقعات کا اخراج کرتا ہے. -3. گراف نوڈ ایتھیریم کو نئے بلاکس اور آپ کے سب گراف کے ڈیٹا کے لیے مسلسل سکین کرتا ہے. -4. گراف نوڈ ان بلاکس میں آپ کے سب گراف کے لیے ایتھریم ایونٹس تلاش کرتا ہے اور آپ کے فراہم کردہ میپنگ ہینڈلرز کو چلاتا ہے. میپنگ ایک WASM ماڈیول ہے جو ڈیٹا ہستیوں کو تخلیق یا اپ ڈیٹ کرتا ہے جو ایتھیریم ایونٹس کے جواب میں گراف نوڈ ذخیرہ کرتا ہے. +3. Graph Node continually scans Ethereum for new blocks and the data for your Subgraph they may contain. +4. Graph Node finds Ethereum events for your Subgraph in these blocks and runs the mapping handlers you provided. The mapping is a WASM module that creates or updates the data entities that Graph Node stores in response to Ethereum events. 5. ڈیپ بلاکچین سے انڈیکس کردہ ڈیٹا کے لیے گراف نوڈ کو کیوری کرتی ہے, نوڈ کے [GraphQL اینڈ پوائنٹ](https://graphql.org/learn/) کا استعمال کرتے ہوئے. گراف نوڈ بدلے میں اس ڈیٹا کو حاصل کرنے کے لیے GraphQL کی کیوریز کو اپنے بنیادی ڈیٹا اسٹور کی کیوریز میں تبدیل کرتا ہے, سٹور کی انڈیکسنگ کی صلاحیتوں کا استعمال کرتے ہوئے. ڈیسینٹرلائزڈ ایپلیکیشن اس ڈیٹا کو صارفین کے لیے ایک بھرپور UI میں دکھاتی ہے, جسے وہ ایتھیریم پر نئی ٹرانزیکشنز جاری کرنے کے لیے استعمال کرتے ہیں. یہ سلسلہ دہرایا جاتا ہے. ## اگلے مراحل -The following sections provide a more in-depth look at subgraphs, their deployment and data querying. +The following sections provide a more in-depth look at Subgraphs, their deployment and data querying. -Before you write your own subgraph, it's recommended to explore [Graph Explorer](https://thegraph.com/explorer) and review some of the already deployed subgraphs. Each subgraph's page includes a GraphQL playground, allowing you to query its data. +Before you write your own Subgraph, it's recommended to explore [Graph Explorer](https://thegraph.com/explorer) and review some of the already deployed Subgraphs. Each Subgraph's page includes a GraphQL playground, allowing you to query its data. From 3d016bb3b04516b8904eb0bd62480e81fdb65ce4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:08:22 -0500 Subject: [PATCH 0026/1789] New translations about.mdx (Vietnamese) --- website/src/pages/vi/about.mdx | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/website/src/pages/vi/about.mdx b/website/src/pages/vi/about.mdx index 917e7817b3a7..dbcf77b348c9 100644 --- a/website/src/pages/vi/about.mdx +++ b/website/src/pages/vi/about.mdx @@ -30,25 +30,25 @@ Blockchain properties, such as finality, chain reorganizations, and uncled block ## The Graph Provides a Solution -The Graph solves this challenge with a decentralized protocol that indexes and enables the efficient and high-performance querying of blockchain data. These APIs (indexed "subgraphs") can then be queried with a standard GraphQL API. +The Graph solves this challenge with a decentralized protocol that indexes and enables the efficient and high-performance querying of blockchain data. These APIs (indexed "Subgraphs") can then be queried with a standard GraphQL API. Today, there is a decentralized protocol that is backed by the open source implementation of [Graph Node](https://github.com/graphprotocol/graph-node) that enables this process. ### How The Graph Functions -Indexing blockchain data is very difficult, but The Graph makes it easy. The Graph learns how to index Ethereum data by using subgraphs. Subgraphs are custom APIs built on blockchain data that extract data from a blockchain, processes it, and stores it so that it can be seamlessly queried via GraphQL. +Indexing blockchain data is very difficult, but The Graph makes it easy. The Graph learns how to index Ethereum data by using Subgraphs. Subgraphs are custom APIs built on blockchain data that extract data from a blockchain, processes it, and stores it so that it can be seamlessly queried via GraphQL. #### Specifics -- The Graph uses subgraph descriptions, which are known as the subgraph manifest inside the subgraph. +- The Graph uses Subgraph descriptions, which are known as the Subgraph manifest inside the Subgraph. -- The subgraph description outlines the smart contracts of interest for a subgraph, the events within those contracts to focus on, and how to map event data to the data that The Graph will store in its database. +- The Subgraph description outlines the smart contracts of interest for a Subgraph, the events within those contracts to focus on, and how to map event data to the data that The Graph will store in its database. -- When creating a subgraph, you need to write a subgraph manifest. +- When creating a Subgraph, you need to write a Subgraph manifest. -- After writing the `subgraph manifest`, you can use the Graph CLI to store the definition in IPFS and instruct an Indexer to start indexing data for that subgraph. +- After writing the `subgraph manifest`, you can use the Graph CLI to store the definition in IPFS and instruct an Indexer to start indexing data for that Subgraph. -The diagram below provides more detailed information about the flow of data after a subgraph manifest has been deployed with Ethereum transactions. +The diagram below provides more detailed information about the flow of data after a Subgraph manifest has been deployed with Ethereum transactions. ![A graphic explaining how The Graph uses Graph Node to serve queries to data consumers](/img/graph-dataflow.png) @@ -56,12 +56,12 @@ Quy trình thực hiện theo các bước sau: 1. A dapp adds data to Ethereum through a transaction on a smart contract. 2. Hợp đồng thông minh phát ra một hoặc nhiều sự kiện trong khi xử lý giao dịch. -3. Graph Node liên tục quét Ethereum để tìm các khối mới và dữ liệu cho subgraph của bạn mà chúng có thể chứa. -4. Graph Node tìm các sự kiện Ethereum cho subgraph của bạn trong các khối này và chạy các trình xử lý ánh xạ mà bạn đã cung cấp. Ánh xạ là một mô-đun WASM tạo hoặc cập nhật các thực thể dữ liệu mà Graph Node lưu trữ để đáp ứng với các sự kiện Ethereum. +3. Graph Node continually scans Ethereum for new blocks and the data for your Subgraph they may contain. +4. Graph Node finds Ethereum events for your Subgraph in these blocks and runs the mapping handlers you provided. The mapping is a WASM module that creates or updates the data entities that Graph Node stores in response to Ethereum events. 5. The dapp queries the Graph Node for data indexed from the blockchain, using the node's [GraphQL endpoint](https://graphql.org/learn/). The Graph Node in turn translates the GraphQL queries into queries for its underlying data store in order to fetch this data, making use of the store's indexing capabilities. The dapp displays this data in a rich UI for end-users, which they use to issue new transactions on Ethereum. The cycle repeats. ## Bước tiếp theo -The following sections provide a more in-depth look at subgraphs, their deployment and data querying. +The following sections provide a more in-depth look at Subgraphs, their deployment and data querying. -Before you write your own subgraph, it's recommended to explore [Graph Explorer](https://thegraph.com/explorer) and review some of the already deployed subgraphs. Each subgraph's page includes a GraphQL playground, allowing you to query its data. +Before you write your own Subgraph, it's recommended to explore [Graph Explorer](https://thegraph.com/explorer) and review some of the already deployed Subgraphs. Each Subgraph's page includes a GraphQL playground, allowing you to query its data. From 15902ea725f5e688fa5cb71b56e3baf8b51da85e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:08:25 -0500 Subject: [PATCH 0027/1789] New translations about.mdx (Marathi) --- website/src/pages/mr/about.mdx | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/website/src/pages/mr/about.mdx b/website/src/pages/mr/about.mdx index 6ec630cd8e4e..9597ecb03bb2 100644 --- a/website/src/pages/mr/about.mdx +++ b/website/src/pages/mr/about.mdx @@ -30,25 +30,25 @@ Blockchain properties, such as finality, chain reorganizations, and uncled block ## The Graph Provides a Solution -The Graph solves this challenge with a decentralized protocol that indexes and enables the efficient and high-performance querying of blockchain data. These APIs (indexed "subgraphs") can then be queried with a standard GraphQL API. +The Graph solves this challenge with a decentralized protocol that indexes and enables the efficient and high-performance querying of blockchain data. These APIs (indexed "Subgraphs") can then be queried with a standard GraphQL API. Today, there is a decentralized protocol that is backed by the open source implementation of [Graph Node](https://github.com/graphprotocol/graph-node) that enables this process. ### How The Graph Functions -Indexing blockchain data is very difficult, but The Graph makes it easy. The Graph learns how to index Ethereum data by using subgraphs. Subgraphs are custom APIs built on blockchain data that extract data from a blockchain, processes it, and stores it so that it can be seamlessly queried via GraphQL. +Indexing blockchain data is very difficult, but The Graph makes it easy. The Graph learns how to index Ethereum data by using Subgraphs. Subgraphs are custom APIs built on blockchain data that extract data from a blockchain, processes it, and stores it so that it can be seamlessly queried via GraphQL. #### Specifics -- The Graph uses subgraph descriptions, which are known as the subgraph manifest inside the subgraph. +- The Graph uses Subgraph descriptions, which are known as the Subgraph manifest inside the Subgraph. -- The subgraph description outlines the smart contracts of interest for a subgraph, the events within those contracts to focus on, and how to map event data to the data that The Graph will store in its database. +- The Subgraph description outlines the smart contracts of interest for a Subgraph, the events within those contracts to focus on, and how to map event data to the data that The Graph will store in its database. -- When creating a subgraph, you need to write a subgraph manifest. +- When creating a Subgraph, you need to write a Subgraph manifest. -- After writing the `subgraph manifest`, you can use the Graph CLI to store the definition in IPFS and instruct an Indexer to start indexing data for that subgraph. +- After writing the `subgraph manifest`, you can use the Graph CLI to store the definition in IPFS and instruct an Indexer to start indexing data for that Subgraph. -The diagram below provides more detailed information about the flow of data after a subgraph manifest has been deployed with Ethereum transactions. +The diagram below provides more detailed information about the flow of data after a Subgraph manifest has been deployed with Ethereum transactions. ![ग्राफिक डेटा ग्राहकांना प्रश्न देण्यासाठी ग्राफ नोड कसा वापरतो हे स्पष्ट करणारे ग्राफिक](/img/graph-dataflow.png) @@ -56,12 +56,12 @@ The diagram below provides more detailed information about the flow of data afte 1. A dapp स्मार्ट करारावरील व्यवहाराद्वारे इथरियममध्ये डेटा जोडते. 2. व्यवहारावर प्रक्रिया करताना स्मार्ट करार एक किंवा अधिक इव्हेंट सोडतो. -3. ग्राफ नोड सतत नवीन ब्लॉक्ससाठी इथरियम स्कॅन करतो आणि तुमच्या सबग्राफचा डेटा त्यात असू शकतो. -4. ग्राफ नोड या ब्लॉक्समध्ये तुमच्या सबग्राफसाठी इथरियम इव्हेंट शोधतो आणि तुम्ही प्रदान केलेले मॅपिंग हँडलर चालवतो. मॅपिंग हे WASM मॉड्यूल आहे जे इथरियम इव्हेंट्सच्या प्रतिसादात ग्राफ नोड संचयित केलेल्या डेटा घटक तयार करते किंवा अद्यतनित करते. +3. Graph Node continually scans Ethereum for new blocks and the data for your Subgraph they may contain. +4. Graph Node finds Ethereum events for your Subgraph in these blocks and runs the mapping handlers you provided. The mapping is a WASM module that creates or updates the data entities that Graph Node stores in response to Ethereum events. 5. नोडचा [GraphQL एंडपॉइंट](https://graphql.org/learn/) वापरून ब्लॉकचेन वरून अनुक्रमित केलेल्या डेटासाठी dapp ग्राफ नोडची क्वेरी करते. ग्राफ नोड यामधून, स्टोअरच्या इंडेक्सिंग क्षमतांचा वापर करून, हा डेटा मिळविण्यासाठी त्याच्या अंतर्निहित डेटा स्टोअरच्या क्वेरींमध्ये GraphQL क्वेरीचे भाषांतर करतो. dapp हा डेटा अंतिम वापरकर्त्यांसाठी समृद्ध UI मध्ये प्रदर्शित करते, जो ते Ethereum वर नवीन व्यवहार जारी करण्यासाठी वापरतात. चक्राची पुनरावृत्ती होते. ## पुढील पायऱ्या -The following sections provide a more in-depth look at subgraphs, their deployment and data querying. +The following sections provide a more in-depth look at Subgraphs, their deployment and data querying. -Before you write your own subgraph, it's recommended to explore [Graph Explorer](https://thegraph.com/explorer) and review some of the already deployed subgraphs. Each subgraph's page includes a GraphQL playground, allowing you to query its data. +Before you write your own Subgraph, it's recommended to explore [Graph Explorer](https://thegraph.com/explorer) and review some of the already deployed Subgraphs. Each Subgraph's page includes a GraphQL playground, allowing you to query its data. From 308cd4a5584c011fcb953bc56910338d094825e0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:08:27 -0500 Subject: [PATCH 0028/1789] New translations about.mdx (Hindi) --- website/src/pages/hi/about.mdx | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/website/src/pages/hi/about.mdx b/website/src/pages/hi/about.mdx index 7f9feff0a53e..53b13b3188a9 100644 --- a/website/src/pages/hi/about.mdx +++ b/website/src/pages/hi/about.mdx @@ -28,27 +28,27 @@ Alternatively, you have the option to set up your own server, process the transa ब्लॉकचेन की विशेषताएँ, जैसे अंतिमता, चेन पुनर्गठन, और अंकल ब्लॉक्स, प्रक्रिया में जटिलता जोड़ती हैं, जिससे ब्लॉकचेन डेटा से सटीक क्वेरी परिणाम प्राप्त करना समय लेने वाला और अवधारणात्मक रूप से चुनौतीपूर्ण हो जाता है। -## The Graph एक समाधान प्रदान करता है +## The Graph एक समाधान प्रदान करता है -The Graph इस चुनौती को एक विकेन्द्रीकृत प्रोटोकॉल के माध्यम से हल करता है जो ब्लॉकचेन डेटा को इंडेक्स करता है और उसकी कुशल और उच्च-प्रदर्शन वाली क्वेरी करने की सुविधा प्रदान करता है। ये एपीआई (इंडेक्स किए गए "सबग्राफ") फिर एक मानक GraphQL एपीआई के साथ क्वेरी की जा सकती हैं। +The Graph solves this challenge with a decentralized protocol that indexes and enables the efficient and high-performance querying of blockchain data. These APIs (indexed "Subgraphs") can then be queried with a standard GraphQL API. आज एक विकेंद्रीकृत प्रोटोकॉल है, जो [Graph Node](https://github.com/graphprotocol/graph-node) के ओपन सोर्स इम्प्लीमेंटेशन द्वारा समर्थित है, जो इस प्रक्रिया को सक्षम बनाता है। ### The Graph कैसे काम करता है -ब्लॉकचेन डेटा को इंडेक्स करना बहुत मुश्किल होता है, लेकिन The Graph इसे आसान बना देता है। The Graph सबग्राफ्स का उपयोग करके एथेरियम डेटा को इंडेक्स करना सीखता है। सबग्राफ्स ब्लॉकचेन डेटा पर बनाए गए कस्टम एपीआई होते हैं, जो ब्लॉकचेन से डेटा निकालते हैं, उसे प्रोसेस करते हैं, और उसे इस तरह स्टोर करते हैं ताकि उसे GraphQL के माध्यम से आसानी से क्वेरी किया जा सके। +Indexing blockchain data is very difficult, but The Graph makes it easy. The Graph learns how to index Ethereum data by using Subgraphs. Subgraphs are custom APIs built on blockchain data that extract data from a blockchain, processes it, and stores it so that it can be seamlessly queried via GraphQL. #### विशिष्टताएँ -- The Graph का उपयोग subgraph विवरणों के लिए करता है, जिन्हें subgraph के अंदर subgraph manifest के रूप में जाना जाता है। +- The Graph uses Subgraph descriptions, which are known as the Subgraph manifest inside the Subgraph. -- सबग्राफ विवरण उन स्मार्ट कॉन्ट्रैक्ट्स की रूपरेखा प्रदान करता है जो एक सबग्राफ के लिए महत्वपूर्ण हैं, उन कॉन्ट्रैक्ट्स के भीतर कौन-कौन सी घटनाओं पर ध्यान केंद्रित करना है, और घटना डेटा को उस डेटा से कैसे मैप करना है जिसे The Graph अपने डेटाबेस में संग्रहीत करेगा। +- The Subgraph description outlines the smart contracts of interest for a Subgraph, the events within those contracts to focus on, and how to map event data to the data that The Graph will store in its database. -- जब आप एक subgraph बना रहे होते हैं, तो आपको एक subgraph मैनिफेस्ट लिखने की आवश्यकता होती है। +- When creating a Subgraph, you need to write a Subgraph manifest. -- `Subgraph manifest` लिखने के बाद, आप Graph CLI का उपयोग करके परिभाषा को IPFS में संग्रहीत कर सकते हैं और एक Indexer को उस subgraph के लिए डेटा को इंडेक्स करने का निर्देश दे सकते हैं। +- After writing the `subgraph manifest`, you can use the Graph CLI to store the definition in IPFS and instruct an Indexer to start indexing data for that Subgraph. -नीचे दिया गया आरेख Ethereum लेनदेन के साथ subgraph मैनिफेस्ट को डिप्लॉय करने के बाद डेटा के प्रवाह के बारे में अधिक विस्तृत जानकारी प्रदान करता है। +The diagram below provides more detailed information about the flow of data after a Subgraph manifest has been deployed with Ethereum transactions. ![एक ग्राफ़िक समझाता है कि कैसे ग्राफ़ डेटा उपभोक्ताओं को क्वेरीज़ प्रदान करने के लिए ग्राफ़ नोड का उपयोग करता है](/img/graph-dataflow.png) @@ -56,12 +56,12 @@ The Graph इस चुनौती को एक विकेन्द्री 1. एक विकेंद्रीकृत एप्लिकेशन स्मार्ट अनुबंध पर लेनदेन के माध्यम से एथेरियम में डेटा जोड़ता है। 2. लेन-देन संसाधित करते समय स्मार्ट अनुबंध एक या अधिक घटनाओं का उत्सर्जन करता है। -3. ग्राफ़ नोड लगातार नए ब्लॉकों के लिए एथेरियम को स्कैन करता है और आपके सबग्राफ के डेटा में शामिल हो सकता है। -4. ग्राफ नोड इन ब्लॉकों में आपके सबग्राफ के लिए एथेरियम ईवेंट ढूंढता है और आपके द्वारा प्रदान किए गए मैपिंग हैंडलर को चलाता है। मैपिंग एक WASM मॉड्यूल है जो एथेरियम घटनाओं के जवाब में ग्राफ़ नोड द्वारा संग्रहीत डेटा संस्थाओं को बनाता या अपडेट करता है। +3. Graph Node continually scans Ethereum for new blocks and the data for your Subgraph they may contain. +4. Graph Node finds Ethereum events for your Subgraph in these blocks and runs the mapping handlers you provided. The mapping is a WASM module that creates or updates the data entities that Graph Node stores in response to Ethereum events. 5. नोड के [GraphQL समापन बिंदु](https://graphql.org/learn/) का उपयोग करते हुए, विकेन्द्रीकृत एप्लिकेशन ब्लॉकचैन से अनुक्रमित डेटा के लिए ग्राफ़ नोड से पूछताछ करता है। ग्राफ़ नोड बदले में इस डेटा को प्राप्त करने के लिए, स्टोर की इंडेक्सिंग क्षमताओं का उपयोग करते हुए, अपने अंतर्निहित डेटा स्टोर के लिए ग्राफ़कॉल प्रश्नों का अनुवाद करता है। विकेंद्रीकृत एप्लिकेशन इस डेटा को एंड-यूजर्स के लिए एक समृद्ध यूआई में प्रदर्शित करता है, जिसका उपयोग वे एथेरियम पर नए लेनदेन जारी करने के लिए करते हैं। चक्र दोहराता है। ## अगले कदम -निम्नलिखित अनुभागों में subgraphs, उनके डिप्लॉयमेंट और डेटा क्वेरी करने के तरीके पर अधिक गहराई से जानकारी दी गई है। +The following sections provide a more in-depth look at Subgraphs, their deployment and data querying. -अपना खुद का subgraph लिखने से पहले, यह अनुशंसा की जाती है कि आप [Graph Explorer](https://thegraph.com/explorer) को एक्सप्लोर करें और पहले से डिप्लॉय किए गए कुछ subgraphs की समीक्षा करें। प्रत्येक subgraph के पेज में एक GraphQL प्लेग्राउंड शामिल होता है, जिससे आप उसके डेटा को क्वेरी कर सकते हैं। +Before you write your own Subgraph, it's recommended to explore [Graph Explorer](https://thegraph.com/explorer) and review some of the already deployed Subgraphs. Each Subgraph's page includes a GraphQL playground, allowing you to query its data. From 3148225ff4a9343f2f35e2950d41cf5a41743e88 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:08:28 -0500 Subject: [PATCH 0029/1789] New translations about.mdx (Swahili) --- website/src/pages/sw/about.mdx | 67 ++++++++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) create mode 100644 website/src/pages/sw/about.mdx diff --git a/website/src/pages/sw/about.mdx b/website/src/pages/sw/about.mdx new file mode 100644 index 000000000000..833b097673d2 --- /dev/null +++ b/website/src/pages/sw/about.mdx @@ -0,0 +1,67 @@ +--- +title: About The Graph +--- + +## What is The Graph? + +The Graph is a powerful decentralized protocol that enables seamless querying and indexing of blockchain data. It simplifies the complex process of querying blockchain data, making dapp development faster and easier. + +## Understanding the Basics + +Projects with complex smart contracts such as [Uniswap](https://uniswap.org/) and NFTs initiatives like [Bored Ape Yacht Club](https://boredapeyachtclub.com/) store data on the Ethereum blockchain, making it very difficult to read anything other than basic data directly from the blockchain. + +### Challenges Without The Graph + +In the case of the example listed above, Bored Ape Yacht Club, you can perform basic read operations on [the contract](https://etherscan.io/address/0xbc4ca0eda7647a8ab7c2061c2e118a18a936f13d#code). You can read the owner of a certain Ape, read the content URI of an Ape based on their ID, or read the total supply. + +- This can be done because these read operations are programmed directly into the smart contract itself. However, more advanced, specific, and real-world queries and operations like aggregation, search, relationships, and non-trivial filtering, **are not possible**. + +- For instance, if you want to inquire about Apes owned by a specific address and refine your search based on a particular characteristic, you would not be able to obtain that information by directly interacting with the contract itself. + +- To get more data, you would have to process every single [`transfer`](https://etherscan.io/address/0xbc4ca0eda7647a8ab7c2061c2e118a18a936f13d#code#L1746) event ever emitted, read the metadata from IPFS using the Token ID and IPFS hash, and then aggregate it. + +### Why is this a problem? + +It would take **hours or even days** for a decentralized application (dapp) running in a browser to get an answer to these simple questions. + +Alternatively, you have the option to set up your own server, process the transactions, store them in a database, and create an API endpoint to query the data. However, this option is [resource intensive](/resources/benefits/), needs maintenance, presents a single point of failure, and breaks important security properties required for decentralization. + +Blockchain properties, such as finality, chain reorganizations, and uncled blocks, add complexity to the process, making it time-consuming and conceptually challenging to retrieve accurate query results from blockchain data. + +## The Graph Provides a Solution + +The Graph solves this challenge with a decentralized protocol that indexes and enables the efficient and high-performance querying of blockchain data. These APIs (indexed "Subgraphs") can then be queried with a standard GraphQL API. + +Today, there is a decentralized protocol that is backed by the open source implementation of [Graph Node](https://github.com/graphprotocol/graph-node) that enables this process. + +### How The Graph Functions + +Indexing blockchain data is very difficult, but The Graph makes it easy. The Graph learns how to index Ethereum data by using Subgraphs. Subgraphs are custom APIs built on blockchain data that extract data from a blockchain, processes it, and stores it so that it can be seamlessly queried via GraphQL. + +#### Specifics + +- The Graph uses Subgraph descriptions, which are known as the Subgraph manifest inside the Subgraph. + +- The Subgraph description outlines the smart contracts of interest for a Subgraph, the events within those contracts to focus on, and how to map event data to the data that The Graph will store in its database. + +- When creating a Subgraph, you need to write a Subgraph manifest. + +- After writing the `subgraph manifest`, you can use the Graph CLI to store the definition in IPFS and instruct an Indexer to start indexing data for that Subgraph. + +The diagram below provides more detailed information about the flow of data after a Subgraph manifest has been deployed with Ethereum transactions. + +![A graphic explaining how The Graph uses Graph Node to serve queries to data consumers](/img/graph-dataflow.png) + +The flow follows these steps: + +1. A dapp adds data to Ethereum through a transaction on a smart contract. +2. The smart contract emits one or more events while processing the transaction. +3. Graph Node continually scans Ethereum for new blocks and the data for your Subgraph they may contain. +4. Graph Node finds Ethereum events for your Subgraph in these blocks and runs the mapping handlers you provided. The mapping is a WASM module that creates or updates the data entities that Graph Node stores in response to Ethereum events. +5. The dapp queries the Graph Node for data indexed from the blockchain, using the node's [GraphQL endpoint](https://graphql.org/learn/). The Graph Node in turn translates the GraphQL queries into queries for its underlying data store in order to fetch this data, making use of the store's indexing capabilities. The dapp displays this data in a rich UI for end-users, which they use to issue new transactions on Ethereum. The cycle repeats. + +## Next Steps + +The following sections provide a more in-depth look at Subgraphs, their deployment and data querying. + +Before you write your own Subgraph, it's recommended to explore [Graph Explorer](https://thegraph.com/explorer) and review some of the already deployed Subgraphs. Each Subgraph's page includes a GraphQL playground, allowing you to query its data. From 2dabb88521229b4e5719643e96e4470600d58f9b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:08:29 -0500 Subject: [PATCH 0030/1789] New translations starting-your-subgraph.mdx (Romanian) --- .../creating/starting-your-subgraph.mdx | 24 ++++++++++++++----- 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/website/src/pages/ro/subgraphs/developing/creating/starting-your-subgraph.mdx b/website/src/pages/ro/subgraphs/developing/creating/starting-your-subgraph.mdx index 4823231d9a40..4931e6b1fd34 100644 --- a/website/src/pages/ro/subgraphs/developing/creating/starting-your-subgraph.mdx +++ b/website/src/pages/ro/subgraphs/developing/creating/starting-your-subgraph.mdx @@ -4,20 +4,32 @@ title: Starting Your Subgraph ## Overview -The Graph is home to thousands of subgraphs already available for query, so check [The Graph Explorer](https://thegraph.com/explorer) and find one that already matches your needs. +The Graph is home to thousands of Subgraphs already available for query, so check [The Graph Explorer](https://thegraph.com/explorer) and find one that already matches your needs. -When you create a [subgraph](/subgraphs/developing/subgraphs/), you create a custom open API that extracts data from a blockchain, processes it, stores it, and makes it easy to query via GraphQL. +When you create a [Subgraph](/subgraphs/developing/subgraphs/), you create a custom open API that extracts data from a blockchain, processes it, stores it, and makes it easy to query via GraphQL. -Subgraph development ranges from simple scaffold subgraphs to advanced, specifically tailored subgraphs. +Subgraph development ranges from simple scaffold Subgraphs to advanced, specifically tailored Subgraphs. ### Start Building -Start the process and build a subgraph that matches your needs: +Start the process and build a Subgraph that matches your needs: 1. [Install the CLI](/subgraphs/developing/creating/install-the-cli/) - Set up your infrastructure -2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a subgraph's key component +2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a Subgraph's key component 3. [The GraphQL Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema 4. [Writing AssemblyScript Mappings](/subgraphs/developing/creating/assemblyscript-mappings/) - Write your mappings -5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your subgraph with advanced features +5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your Subgraph with advanced features Explore additional [resources for APIs](/subgraphs/developing/creating/graph-ts/README/) and conduct local testing with [Matchstick](/subgraphs/developing/creating/unit-testing-framework/). + +| Version | Release notes | +| :-----: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune Subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From 931bc34304d857faebfd95dfc23dc115ede71d4b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:08:30 -0500 Subject: [PATCH 0031/1789] New translations starting-your-subgraph.mdx (French) --- .../creating/starting-your-subgraph.mdx | 24 ++++++++++++++----- 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/website/src/pages/fr/subgraphs/developing/creating/starting-your-subgraph.mdx b/website/src/pages/fr/subgraphs/developing/creating/starting-your-subgraph.mdx index 4030093310a4..f7b4c6dfd5de 100644 --- a/website/src/pages/fr/subgraphs/developing/creating/starting-your-subgraph.mdx +++ b/website/src/pages/fr/subgraphs/developing/creating/starting-your-subgraph.mdx @@ -4,20 +4,32 @@ title: Démarrer votre subgraph ## Aperçu -The Graph contient des milliers de subgraphs déjà disponibles pour des requêtes. Consultez [The Graph Explorer](https://thegraph.com/explorer) et trouvez-en un qui correspond déjà à vos besoins. +The Graph is home to thousands of Subgraphs already available for query, so check [The Graph Explorer](https://thegraph.com/explorer) and find one that already matches your needs. -Lorsque vous créez un [subgraph](/subgraphs/developing/subgraphs/), vous créez une API ouverte personnalisée qui extrait des données d'une blockchain, les traite, les stocke et les rend faciles à interroger via GraphQL. +When you create a [Subgraph](/subgraphs/developing/subgraphs/), you create a custom open API that extracts data from a blockchain, processes it, stores it, and makes it easy to query via GraphQL. -Le développement de subgraphs peut aller de simples modèles « scaffold » à des subgraphs avancés, spécialement adaptés à vos besoins. +Subgraph development ranges from simple scaffold Subgraphs to advanced, specifically tailored Subgraphs. ### Commencez à développer -Lancez le processus et construisez un subgraph qui correspond à vos besoins : +Start the process and build a Subgraph that matches your needs: 1. [Installer la CLI](/subgraphs/developing/creating/install-the-cli/) - Configurez votre infrastructure -2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Comprenez le composant clé d'un subgraph +2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a Subgraph's key component 3. [Le schéma GraphQL](/subgraphs/developing/creating/ql-schema/) - Écrivez votre schéma 4. [Écrire les mappings AssemblyScript](/subgraphs/developing/creating/assemblyscript-mappings/) - Rédigez vos mappings -5. [Fonctionnalités avancées](/subgraphs/developing/creating/advanced/) - Personnalisez votre subgraphs avec des fonctionnalités avancées +5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your Subgraph with advanced features Explorez d'autres [ressources pour les API](/subgraphs/developing/creating/graph-ts/README/) et effectuez des tests en local avec [Matchstick](/subgraphs/developing/creating/unit-testing-framework/). + +| Version | Notes de version | +| :-----: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune Subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From 9cf590ec1b590906a0d84468d1a6c0c9cadec63f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:08:31 -0500 Subject: [PATCH 0032/1789] New translations starting-your-subgraph.mdx (Spanish) --- .../creating/starting-your-subgraph.mdx | 24 ++++++++++++++----- 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/website/src/pages/es/subgraphs/developing/creating/starting-your-subgraph.mdx b/website/src/pages/es/subgraphs/developing/creating/starting-your-subgraph.mdx index 76ff7db16bba..669a29583ee8 100644 --- a/website/src/pages/es/subgraphs/developing/creating/starting-your-subgraph.mdx +++ b/website/src/pages/es/subgraphs/developing/creating/starting-your-subgraph.mdx @@ -4,20 +4,32 @@ title: Starting Your Subgraph ## Descripción -The Graph is home to thousands of subgraphs already available for query, so check [The Graph Explorer](https://thegraph.com/explorer) and find one that already matches your needs. +The Graph is home to thousands of Subgraphs already available for query, so check [The Graph Explorer](https://thegraph.com/explorer) and find one that already matches your needs. -When you create a [subgraph](/subgraphs/developing/subgraphs/), you create a custom open API that extracts data from a blockchain, processes it, stores it, and makes it easy to query via GraphQL. +When you create a [Subgraph](/subgraphs/developing/subgraphs/), you create a custom open API that extracts data from a blockchain, processes it, stores it, and makes it easy to query via GraphQL. -Subgraph development ranges from simple scaffold subgraphs to advanced, specifically tailored subgraphs. +Subgraph development ranges from simple scaffold Subgraphs to advanced, specifically tailored Subgraphs. ### Start Building -Start the process and build a subgraph that matches your needs: +Start the process and build a Subgraph that matches your needs: 1. [Install the CLI](/subgraphs/developing/creating/install-the-cli/) - Set up your infrastructure -2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a subgraph's key component +2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a Subgraph's key component 3. [The GraphQL Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema 4. [Writing AssemblyScript Mappings](/subgraphs/developing/creating/assemblyscript-mappings/) - Write your mappings -5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your subgraph with advanced features +5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your Subgraph with advanced features Explore additional [resources for APIs](/subgraphs/developing/creating/graph-ts/README/) and conduct local testing with [Matchstick](/subgraphs/developing/creating/unit-testing-framework/). + +| Version | Notas del lanzamiento | +| :-----: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune Subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From f2f9eab63fdb1e5caebc889288303ffee3a23453 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:08:32 -0500 Subject: [PATCH 0033/1789] New translations starting-your-subgraph.mdx (Arabic) --- .../creating/starting-your-subgraph.mdx | 24 ++++++++++++++----- 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/website/src/pages/ar/subgraphs/developing/creating/starting-your-subgraph.mdx b/website/src/pages/ar/subgraphs/developing/creating/starting-your-subgraph.mdx index 8f2e787688c2..b7d5f7168427 100644 --- a/website/src/pages/ar/subgraphs/developing/creating/starting-your-subgraph.mdx +++ b/website/src/pages/ar/subgraphs/developing/creating/starting-your-subgraph.mdx @@ -4,20 +4,32 @@ title: Starting Your Subgraph ## نظره عامة -The Graph is home to thousands of subgraphs already available for query, so check [The Graph Explorer](https://thegraph.com/explorer) and find one that already matches your needs. +The Graph is home to thousands of Subgraphs already available for query, so check [The Graph Explorer](https://thegraph.com/explorer) and find one that already matches your needs. -When you create a [subgraph](/subgraphs/developing/subgraphs/), you create a custom open API that extracts data from a blockchain, processes it, stores it, and makes it easy to query via GraphQL. +When you create a [Subgraph](/subgraphs/developing/subgraphs/), you create a custom open API that extracts data from a blockchain, processes it, stores it, and makes it easy to query via GraphQL. -Subgraph development ranges from simple scaffold subgraphs to advanced, specifically tailored subgraphs. +Subgraph development ranges from simple scaffold Subgraphs to advanced, specifically tailored Subgraphs. ### Start Building -Start the process and build a subgraph that matches your needs: +Start the process and build a Subgraph that matches your needs: 1. [Install the CLI](/subgraphs/developing/creating/install-the-cli/) - Set up your infrastructure -2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a subgraph's key component +2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a Subgraph's key component 3. [The GraphQL Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema 4. [Writing AssemblyScript Mappings](/subgraphs/developing/creating/assemblyscript-mappings/) - Write your mappings -5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your subgraph with advanced features +5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your Subgraph with advanced features Explore additional [resources for APIs](/subgraphs/developing/creating/graph-ts/README/) and conduct local testing with [Matchstick](/subgraphs/developing/creating/unit-testing-framework/). + +| الاصدار | ملاحظات الإصدار | +| :-----: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune Subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From b128dc90b811515bb449412f8652e7cd5a989978 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:08:33 -0500 Subject: [PATCH 0034/1789] New translations starting-your-subgraph.mdx (Czech) --- .../creating/starting-your-subgraph.mdx | 24 ++++++++++++++----- 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/website/src/pages/cs/subgraphs/developing/creating/starting-your-subgraph.mdx b/website/src/pages/cs/subgraphs/developing/creating/starting-your-subgraph.mdx index 436b407a19ba..04f1eee28246 100644 --- a/website/src/pages/cs/subgraphs/developing/creating/starting-your-subgraph.mdx +++ b/website/src/pages/cs/subgraphs/developing/creating/starting-your-subgraph.mdx @@ -4,20 +4,32 @@ title: Starting Your Subgraph ## Přehled -The Graph is home to thousands of subgraphs already available for query, so check [The Graph Explorer](https://thegraph.com/explorer) and find one that already matches your needs. +The Graph is home to thousands of Subgraphs already available for query, so check [The Graph Explorer](https://thegraph.com/explorer) and find one that already matches your needs. -When you create a [subgraph](/subgraphs/developing/subgraphs/), you create a custom open API that extracts data from a blockchain, processes it, stores it, and makes it easy to query via GraphQL. +When you create a [Subgraph](/subgraphs/developing/subgraphs/), you create a custom open API that extracts data from a blockchain, processes it, stores it, and makes it easy to query via GraphQL. -Subgraph development ranges from simple scaffold subgraphs to advanced, specifically tailored subgraphs. +Subgraph development ranges from simple scaffold Subgraphs to advanced, specifically tailored Subgraphs. ### Start Building -Start the process and build a subgraph that matches your needs: +Start the process and build a Subgraph that matches your needs: 1. [Install the CLI](/subgraphs/developing/creating/install-the-cli/) - Set up your infrastructure -2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a subgraph's key component +2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a Subgraph's key component 3. [The GraphQL Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema 4. [Writing AssemblyScript Mappings](/subgraphs/developing/creating/assemblyscript-mappings/) - Write your mappings -5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your subgraph with advanced features +5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your Subgraph with advanced features Explore additional [resources for APIs](/subgraphs/developing/creating/graph-ts/README/) and conduct local testing with [Matchstick](/subgraphs/developing/creating/unit-testing-framework/). + +| Verze | Poznámky vydání | +| :---: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune Subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From aa88e317e7af255dffb6c4c8c83c575d404cb501 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:08:34 -0500 Subject: [PATCH 0035/1789] New translations starting-your-subgraph.mdx (German) --- .../creating/starting-your-subgraph.mdx | 24 ++++++++++++++----- 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/website/src/pages/de/subgraphs/developing/creating/starting-your-subgraph.mdx b/website/src/pages/de/subgraphs/developing/creating/starting-your-subgraph.mdx index dbffb92cfc5e..2a4fa8433e59 100644 --- a/website/src/pages/de/subgraphs/developing/creating/starting-your-subgraph.mdx +++ b/website/src/pages/de/subgraphs/developing/creating/starting-your-subgraph.mdx @@ -4,20 +4,32 @@ title: Starten Ihres Subgraphen ## Überblick -The Graph beherbergt Tausende von Subgraphen, die bereits für Abfragen zur Verfügung stehen. Schauen Sie also in [The Graph Explorer] (https://thegraph.com/explorer) nach und finden Sie einen, der Ihren Anforderungen entspricht. +The Graph is home to thousands of Subgraphs already available for query, so check [The Graph Explorer](https://thegraph.com/explorer) and find one that already matches your needs. -Wenn Sie einen [Subgraphen](/subgraphs/developing/subgraphs/) erstellen, erstellen Sie eine benutzerdefinierte offene API, die Daten aus einer Blockchain extrahiert, verarbeitet, speichert und über GraphQL einfach abfragen lässt. +When you create a [Subgraph](/subgraphs/developing/subgraphs/), you create a custom open API that extracts data from a blockchain, processes it, stores it, and makes it easy to query via GraphQL. -Die Entwicklung von Subgraphen reicht von einfachen Gerüst-Subgraphen bis hin zu fortgeschrittenen, speziell zugeschnittenen Subgraphen. +Subgraph development ranges from simple scaffold Subgraphs to advanced, specifically tailored Subgraphs. ### Start des Erstellens -Starten Sie den Prozess und erstellen Sie einen Subgraphen, der Ihren Anforderungen entspricht: +Start the process and build a Subgraph that matches your needs: 1. [Installieren der CLI](/subgraphs/developing/creating/install-the-cli/) - Richten Sie Ihre Infrastruktur ein -2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Verstehen der wichtigsten Komponenten eines Subgraphen +2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a Subgraph's key component 3. [Das GraphQL-Schema](/subgraphs/developing/creating/ql-schema/) - Schreiben Sie Ihr Schema 4. [Schreiben von AssemblyScript-Mappings](/subgraphs/developing/creating/assemblyscript-mappings/) - Schreiben Sie Ihre Mappings -5. [Erweiterte Funktionen](/subgraphs/developing/creating/advanced/) - Passen Sie Ihren Subgraph mit erweiterten Funktionen an +5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your Subgraph with advanced features Erkunden Sie zusätzliche [Ressourcen für APIs](/subgraphs/developing/creating/graph-ts/README/) und führen Sie lokale Tests mit [Matchstick](/subgraphs/developing/creating/unit-testing-framework/) durch. + +| Version | Release notes | +| :-----: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune Subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From 0665d5d922b45d5b6611cd2a61cc74543c7efb7b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:08:35 -0500 Subject: [PATCH 0036/1789] New translations starting-your-subgraph.mdx (Italian) --- .../creating/starting-your-subgraph.mdx | 24 ++++++++++++++----- 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/website/src/pages/it/subgraphs/developing/creating/starting-your-subgraph.mdx b/website/src/pages/it/subgraphs/developing/creating/starting-your-subgraph.mdx index 6b6247b0ce50..5b0ac052a82d 100644 --- a/website/src/pages/it/subgraphs/developing/creating/starting-your-subgraph.mdx +++ b/website/src/pages/it/subgraphs/developing/creating/starting-your-subgraph.mdx @@ -4,20 +4,32 @@ title: Starting Your Subgraph ## Panoramica -The Graph is home to thousands of subgraphs already available for query, so check [The Graph Explorer](https://thegraph.com/explorer) and find one that already matches your needs. +The Graph is home to thousands of Subgraphs already available for query, so check [The Graph Explorer](https://thegraph.com/explorer) and find one that already matches your needs. -When you create a [subgraph](/subgraphs/developing/subgraphs/), you create a custom open API that extracts data from a blockchain, processes it, stores it, and makes it easy to query via GraphQL. +When you create a [Subgraph](/subgraphs/developing/subgraphs/), you create a custom open API that extracts data from a blockchain, processes it, stores it, and makes it easy to query via GraphQL. -Subgraph development ranges from simple scaffold subgraphs to advanced, specifically tailored subgraphs. +Subgraph development ranges from simple scaffold Subgraphs to advanced, specifically tailored Subgraphs. ### Start Building -Start the process and build a subgraph that matches your needs: +Start the process and build a Subgraph that matches your needs: 1. [Install the CLI](/subgraphs/developing/creating/install-the-cli/) - Set up your infrastructure -2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a subgraph's key component +2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a Subgraph's key component 3. [The GraphQL Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema 4. [Writing AssemblyScript Mappings](/subgraphs/developing/creating/assemblyscript-mappings/) - Write your mappings -5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your subgraph with advanced features +5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your Subgraph with advanced features Explore additional [resources for APIs](/subgraphs/developing/creating/graph-ts/README/) and conduct local testing with [Matchstick](/subgraphs/developing/creating/unit-testing-framework/). + +| Versione | Note di rilascio | +| :------: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune Subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From 0e7643270063c3889bc35019bbea951af6f3e4a6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:08:36 -0500 Subject: [PATCH 0037/1789] New translations starting-your-subgraph.mdx (Japanese) --- .../creating/starting-your-subgraph.mdx | 24 ++++++++++++++----- 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/website/src/pages/ja/subgraphs/developing/creating/starting-your-subgraph.mdx b/website/src/pages/ja/subgraphs/developing/creating/starting-your-subgraph.mdx index c2dcb7ad1d68..3c40e48ef42d 100644 --- a/website/src/pages/ja/subgraphs/developing/creating/starting-your-subgraph.mdx +++ b/website/src/pages/ja/subgraphs/developing/creating/starting-your-subgraph.mdx @@ -4,20 +4,32 @@ title: Starting Your Subgraph ## 概要 -The Graph is home to thousands of subgraphs already available for query, so check [The Graph Explorer](https://thegraph.com/explorer) and find one that already matches your needs. +The Graph is home to thousands of Subgraphs already available for query, so check [The Graph Explorer](https://thegraph.com/explorer) and find one that already matches your needs. -When you create a [subgraph](/subgraphs/developing/subgraphs/), you create a custom open API that extracts data from a blockchain, processes it, stores it, and makes it easy to query via GraphQL. +When you create a [Subgraph](/subgraphs/developing/subgraphs/), you create a custom open API that extracts data from a blockchain, processes it, stores it, and makes it easy to query via GraphQL. -Subgraph development ranges from simple scaffold subgraphs to advanced, specifically tailored subgraphs. +Subgraph development ranges from simple scaffold Subgraphs to advanced, specifically tailored Subgraphs. ### Start Building -Start the process and build a subgraph that matches your needs: +Start the process and build a Subgraph that matches your needs: 1. [Install the CLI](/subgraphs/developing/creating/install-the-cli/) - Set up your infrastructure -2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a subgraph's key component +2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a Subgraph's key component 3. [The GraphQL Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema 4. [Writing AssemblyScript Mappings](/subgraphs/developing/creating/assemblyscript-mappings/) - Write your mappings -5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your subgraph with advanced features +5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your Subgraph with advanced features Explore additional [resources for APIs](/subgraphs/developing/creating/graph-ts/README/) and conduct local testing with [Matchstick](/subgraphs/developing/creating/unit-testing-framework/). + +| バージョン | リリースノート | +| :---: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune Subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From 3334e4e1596f3091cbf5f3fb942ac9f4a70116ab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:08:37 -0500 Subject: [PATCH 0038/1789] New translations starting-your-subgraph.mdx (Korean) --- .../creating/starting-your-subgraph.mdx | 24 ++++++++++++++----- 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/website/src/pages/ko/subgraphs/developing/creating/starting-your-subgraph.mdx b/website/src/pages/ko/subgraphs/developing/creating/starting-your-subgraph.mdx index 4823231d9a40..4931e6b1fd34 100644 --- a/website/src/pages/ko/subgraphs/developing/creating/starting-your-subgraph.mdx +++ b/website/src/pages/ko/subgraphs/developing/creating/starting-your-subgraph.mdx @@ -4,20 +4,32 @@ title: Starting Your Subgraph ## Overview -The Graph is home to thousands of subgraphs already available for query, so check [The Graph Explorer](https://thegraph.com/explorer) and find one that already matches your needs. +The Graph is home to thousands of Subgraphs already available for query, so check [The Graph Explorer](https://thegraph.com/explorer) and find one that already matches your needs. -When you create a [subgraph](/subgraphs/developing/subgraphs/), you create a custom open API that extracts data from a blockchain, processes it, stores it, and makes it easy to query via GraphQL. +When you create a [Subgraph](/subgraphs/developing/subgraphs/), you create a custom open API that extracts data from a blockchain, processes it, stores it, and makes it easy to query via GraphQL. -Subgraph development ranges from simple scaffold subgraphs to advanced, specifically tailored subgraphs. +Subgraph development ranges from simple scaffold Subgraphs to advanced, specifically tailored Subgraphs. ### Start Building -Start the process and build a subgraph that matches your needs: +Start the process and build a Subgraph that matches your needs: 1. [Install the CLI](/subgraphs/developing/creating/install-the-cli/) - Set up your infrastructure -2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a subgraph's key component +2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a Subgraph's key component 3. [The GraphQL Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema 4. [Writing AssemblyScript Mappings](/subgraphs/developing/creating/assemblyscript-mappings/) - Write your mappings -5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your subgraph with advanced features +5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your Subgraph with advanced features Explore additional [resources for APIs](/subgraphs/developing/creating/graph-ts/README/) and conduct local testing with [Matchstick](/subgraphs/developing/creating/unit-testing-framework/). + +| Version | Release notes | +| :-----: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune Subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From a039960eb37cefd899ae8173fffe9e6f8b4ad8a0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:08:38 -0500 Subject: [PATCH 0039/1789] New translations starting-your-subgraph.mdx (Dutch) --- .../creating/starting-your-subgraph.mdx | 24 ++++++++++++++----- 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/website/src/pages/nl/subgraphs/developing/creating/starting-your-subgraph.mdx b/website/src/pages/nl/subgraphs/developing/creating/starting-your-subgraph.mdx index 4823231d9a40..4931e6b1fd34 100644 --- a/website/src/pages/nl/subgraphs/developing/creating/starting-your-subgraph.mdx +++ b/website/src/pages/nl/subgraphs/developing/creating/starting-your-subgraph.mdx @@ -4,20 +4,32 @@ title: Starting Your Subgraph ## Overview -The Graph is home to thousands of subgraphs already available for query, so check [The Graph Explorer](https://thegraph.com/explorer) and find one that already matches your needs. +The Graph is home to thousands of Subgraphs already available for query, so check [The Graph Explorer](https://thegraph.com/explorer) and find one that already matches your needs. -When you create a [subgraph](/subgraphs/developing/subgraphs/), you create a custom open API that extracts data from a blockchain, processes it, stores it, and makes it easy to query via GraphQL. +When you create a [Subgraph](/subgraphs/developing/subgraphs/), you create a custom open API that extracts data from a blockchain, processes it, stores it, and makes it easy to query via GraphQL. -Subgraph development ranges from simple scaffold subgraphs to advanced, specifically tailored subgraphs. +Subgraph development ranges from simple scaffold Subgraphs to advanced, specifically tailored Subgraphs. ### Start Building -Start the process and build a subgraph that matches your needs: +Start the process and build a Subgraph that matches your needs: 1. [Install the CLI](/subgraphs/developing/creating/install-the-cli/) - Set up your infrastructure -2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a subgraph's key component +2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a Subgraph's key component 3. [The GraphQL Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema 4. [Writing AssemblyScript Mappings](/subgraphs/developing/creating/assemblyscript-mappings/) - Write your mappings -5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your subgraph with advanced features +5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your Subgraph with advanced features Explore additional [resources for APIs](/subgraphs/developing/creating/graph-ts/README/) and conduct local testing with [Matchstick](/subgraphs/developing/creating/unit-testing-framework/). + +| Version | Release notes | +| :-----: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune Subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From e93d3be3550587e83a67abfb561812140f3323fe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:08:39 -0500 Subject: [PATCH 0040/1789] New translations starting-your-subgraph.mdx (Polish) --- .../creating/starting-your-subgraph.mdx | 24 ++++++++++++++----- 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/website/src/pages/pl/subgraphs/developing/creating/starting-your-subgraph.mdx b/website/src/pages/pl/subgraphs/developing/creating/starting-your-subgraph.mdx index 4823231d9a40..4931e6b1fd34 100644 --- a/website/src/pages/pl/subgraphs/developing/creating/starting-your-subgraph.mdx +++ b/website/src/pages/pl/subgraphs/developing/creating/starting-your-subgraph.mdx @@ -4,20 +4,32 @@ title: Starting Your Subgraph ## Overview -The Graph is home to thousands of subgraphs already available for query, so check [The Graph Explorer](https://thegraph.com/explorer) and find one that already matches your needs. +The Graph is home to thousands of Subgraphs already available for query, so check [The Graph Explorer](https://thegraph.com/explorer) and find one that already matches your needs. -When you create a [subgraph](/subgraphs/developing/subgraphs/), you create a custom open API that extracts data from a blockchain, processes it, stores it, and makes it easy to query via GraphQL. +When you create a [Subgraph](/subgraphs/developing/subgraphs/), you create a custom open API that extracts data from a blockchain, processes it, stores it, and makes it easy to query via GraphQL. -Subgraph development ranges from simple scaffold subgraphs to advanced, specifically tailored subgraphs. +Subgraph development ranges from simple scaffold Subgraphs to advanced, specifically tailored Subgraphs. ### Start Building -Start the process and build a subgraph that matches your needs: +Start the process and build a Subgraph that matches your needs: 1. [Install the CLI](/subgraphs/developing/creating/install-the-cli/) - Set up your infrastructure -2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a subgraph's key component +2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a Subgraph's key component 3. [The GraphQL Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema 4. [Writing AssemblyScript Mappings](/subgraphs/developing/creating/assemblyscript-mappings/) - Write your mappings -5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your subgraph with advanced features +5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your Subgraph with advanced features Explore additional [resources for APIs](/subgraphs/developing/creating/graph-ts/README/) and conduct local testing with [Matchstick](/subgraphs/developing/creating/unit-testing-framework/). + +| Version | Release notes | +| :-----: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune Subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From 46e98eb8f6d883b45353f9bf2f30d1d387c49e8b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:08:40 -0500 Subject: [PATCH 0041/1789] New translations starting-your-subgraph.mdx (Portuguese) --- .../creating/starting-your-subgraph.mdx | 24 ++++++++++++++----- 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/website/src/pages/pt/subgraphs/developing/creating/starting-your-subgraph.mdx b/website/src/pages/pt/subgraphs/developing/creating/starting-your-subgraph.mdx index 1b70a2ec98ad..01c6e53f2822 100644 --- a/website/src/pages/pt/subgraphs/developing/creating/starting-your-subgraph.mdx +++ b/website/src/pages/pt/subgraphs/developing/creating/starting-your-subgraph.mdx @@ -4,20 +4,32 @@ title: Starting Your Subgraph ## Visão geral -The Graph is home to thousands of subgraphs already available for query, so check [The Graph Explorer](https://thegraph.com/explorer) and find one that already matches your needs. +The Graph is home to thousands of Subgraphs already available for query, so check [The Graph Explorer](https://thegraph.com/explorer) and find one that already matches your needs. -When you create a [subgraph](/subgraphs/developing/subgraphs/), you create a custom open API that extracts data from a blockchain, processes it, stores it, and makes it easy to query via GraphQL. +When you create a [Subgraph](/subgraphs/developing/subgraphs/), you create a custom open API that extracts data from a blockchain, processes it, stores it, and makes it easy to query via GraphQL. -Subgraph development ranges from simple scaffold subgraphs to advanced, specifically tailored subgraphs. +Subgraph development ranges from simple scaffold Subgraphs to advanced, specifically tailored Subgraphs. ### Start Building -Start the process and build a subgraph that matches your needs: +Start the process and build a Subgraph that matches your needs: 1. [Install the CLI](/subgraphs/developing/creating/install-the-cli/) - Set up your infrastructure -2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a subgraph's key component +2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a Subgraph's key component 3. [The GraphQL Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema 4. [Writing AssemblyScript Mappings](/subgraphs/developing/creating/assemblyscript-mappings/) - Write your mappings -5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your subgraph with advanced features +5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your Subgraph with advanced features Explore additional [resources for APIs](/subgraphs/developing/creating/graph-ts/README/) and conduct local testing with [Matchstick](/subgraphs/developing/creating/unit-testing-framework/). + +| Versão | Notas de atualização | +| :----: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune Subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From 68216dd3e137e830f260cc10898ddc8ad0e89284 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:08:41 -0500 Subject: [PATCH 0042/1789] New translations starting-your-subgraph.mdx (Russian) --- .../creating/starting-your-subgraph.mdx | 24 ++++++++++++++----- 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/website/src/pages/ru/subgraphs/developing/creating/starting-your-subgraph.mdx b/website/src/pages/ru/subgraphs/developing/creating/starting-your-subgraph.mdx index 8136fb559cff..60fcbd1a8dd9 100644 --- a/website/src/pages/ru/subgraphs/developing/creating/starting-your-subgraph.mdx +++ b/website/src/pages/ru/subgraphs/developing/creating/starting-your-subgraph.mdx @@ -4,20 +4,32 @@ title: Starting Your Subgraph ## Обзор -The Graph is home to thousands of subgraphs already available for query, so check [The Graph Explorer](https://thegraph.com/explorer) and find one that already matches your needs. +The Graph is home to thousands of Subgraphs already available for query, so check [The Graph Explorer](https://thegraph.com/explorer) and find one that already matches your needs. -When you create a [subgraph](/subgraphs/developing/subgraphs/), you create a custom open API that extracts data from a blockchain, processes it, stores it, and makes it easy to query via GraphQL. +When you create a [Subgraph](/subgraphs/developing/subgraphs/), you create a custom open API that extracts data from a blockchain, processes it, stores it, and makes it easy to query via GraphQL. -Subgraph development ranges from simple scaffold subgraphs to advanced, specifically tailored subgraphs. +Subgraph development ranges from simple scaffold Subgraphs to advanced, specifically tailored Subgraphs. ### Start Building -Start the process and build a subgraph that matches your needs: +Start the process and build a Subgraph that matches your needs: 1. [Install the CLI](/subgraphs/developing/creating/install-the-cli/) - Set up your infrastructure -2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a subgraph's key component +2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a Subgraph's key component 3. [The GraphQL Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema 4. [Writing AssemblyScript Mappings](/subgraphs/developing/creating/assemblyscript-mappings/) - Write your mappings -5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your subgraph with advanced features +5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your Subgraph with advanced features Explore additional [resources for APIs](/subgraphs/developing/creating/graph-ts/README/) and conduct local testing with [Matchstick](/subgraphs/developing/creating/unit-testing-framework/). + +| Версия | Примечания к релизу | +| :----: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune Subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From 9e2d933ad2336dcf1446278a550ddf44ffdc95d0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:08:42 -0500 Subject: [PATCH 0043/1789] New translations starting-your-subgraph.mdx (Swedish) --- .../creating/starting-your-subgraph.mdx | 24 ++++++++++++++----- 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/website/src/pages/sv/subgraphs/developing/creating/starting-your-subgraph.mdx b/website/src/pages/sv/subgraphs/developing/creating/starting-your-subgraph.mdx index 9f06ce8fcd1d..3c7846394f04 100644 --- a/website/src/pages/sv/subgraphs/developing/creating/starting-your-subgraph.mdx +++ b/website/src/pages/sv/subgraphs/developing/creating/starting-your-subgraph.mdx @@ -4,20 +4,32 @@ title: Starting Your Subgraph ## Översikt -The Graph is home to thousands of subgraphs already available for query, so check [The Graph Explorer](https://thegraph.com/explorer) and find one that already matches your needs. +The Graph is home to thousands of Subgraphs already available for query, so check [The Graph Explorer](https://thegraph.com/explorer) and find one that already matches your needs. -When you create a [subgraph](/subgraphs/developing/subgraphs/), you create a custom open API that extracts data from a blockchain, processes it, stores it, and makes it easy to query via GraphQL. +When you create a [Subgraph](/subgraphs/developing/subgraphs/), you create a custom open API that extracts data from a blockchain, processes it, stores it, and makes it easy to query via GraphQL. -Subgraph development ranges from simple scaffold subgraphs to advanced, specifically tailored subgraphs. +Subgraph development ranges from simple scaffold Subgraphs to advanced, specifically tailored Subgraphs. ### Start Building -Start the process and build a subgraph that matches your needs: +Start the process and build a Subgraph that matches your needs: 1. [Install the CLI](/subgraphs/developing/creating/install-the-cli/) - Set up your infrastructure -2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a subgraph's key component +2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a Subgraph's key component 3. [The GraphQL Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema 4. [Writing AssemblyScript Mappings](/subgraphs/developing/creating/assemblyscript-mappings/) - Write your mappings -5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your subgraph with advanced features +5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your Subgraph with advanced features Explore additional [resources for APIs](/subgraphs/developing/creating/graph-ts/README/) and conduct local testing with [Matchstick](/subgraphs/developing/creating/unit-testing-framework/). + +| Version | Versionsanteckningar | +| :-----: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune Subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From 89066f8fd4170046890127943c74780de547b587 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:08:43 -0500 Subject: [PATCH 0044/1789] New translations starting-your-subgraph.mdx (Turkish) --- .../creating/starting-your-subgraph.mdx | 24 ++++++++++++++----- 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/website/src/pages/tr/subgraphs/developing/creating/starting-your-subgraph.mdx b/website/src/pages/tr/subgraphs/developing/creating/starting-your-subgraph.mdx index c10f6facbb0d..a2c3694d4261 100644 --- a/website/src/pages/tr/subgraphs/developing/creating/starting-your-subgraph.mdx +++ b/website/src/pages/tr/subgraphs/developing/creating/starting-your-subgraph.mdx @@ -4,20 +4,32 @@ title: Starting Your Subgraph ## Genel Bakış -The Graph is home to thousands of subgraphs already available for query, so check [The Graph Explorer](https://thegraph.com/explorer) and find one that already matches your needs. +The Graph is home to thousands of Subgraphs already available for query, so check [The Graph Explorer](https://thegraph.com/explorer) and find one that already matches your needs. -When you create a [subgraph](/subgraphs/developing/subgraphs/), you create a custom open API that extracts data from a blockchain, processes it, stores it, and makes it easy to query via GraphQL. +When you create a [Subgraph](/subgraphs/developing/subgraphs/), you create a custom open API that extracts data from a blockchain, processes it, stores it, and makes it easy to query via GraphQL. -Subgraph development ranges from simple scaffold subgraphs to advanced, specifically tailored subgraphs. +Subgraph development ranges from simple scaffold Subgraphs to advanced, specifically tailored Subgraphs. ### Start Building -Start the process and build a subgraph that matches your needs: +Start the process and build a Subgraph that matches your needs: 1. [Install the CLI](/subgraphs/developing/creating/install-the-cli/) - Set up your infrastructure -2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a subgraph's key component +2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a Subgraph's key component 3. [The GraphQL Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema 4. [Writing AssemblyScript Mappings](/subgraphs/developing/creating/assemblyscript-mappings/) - Write your mappings -5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your subgraph with advanced features +5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your Subgraph with advanced features Explore additional [resources for APIs](/subgraphs/developing/creating/graph-ts/README/) and conduct local testing with [Matchstick](/subgraphs/developing/creating/unit-testing-framework/). + +| Sürüm | Sürüm Notları | +| :---: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune Subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From f7df466e1413c1f17a742c0e0794cd50340d28f5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:08:44 -0500 Subject: [PATCH 0045/1789] New translations starting-your-subgraph.mdx (Ukrainian) --- .../creating/starting-your-subgraph.mdx | 24 ++++++++++++++----- 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/website/src/pages/uk/subgraphs/developing/creating/starting-your-subgraph.mdx b/website/src/pages/uk/subgraphs/developing/creating/starting-your-subgraph.mdx index 4823231d9a40..4931e6b1fd34 100644 --- a/website/src/pages/uk/subgraphs/developing/creating/starting-your-subgraph.mdx +++ b/website/src/pages/uk/subgraphs/developing/creating/starting-your-subgraph.mdx @@ -4,20 +4,32 @@ title: Starting Your Subgraph ## Overview -The Graph is home to thousands of subgraphs already available for query, so check [The Graph Explorer](https://thegraph.com/explorer) and find one that already matches your needs. +The Graph is home to thousands of Subgraphs already available for query, so check [The Graph Explorer](https://thegraph.com/explorer) and find one that already matches your needs. -When you create a [subgraph](/subgraphs/developing/subgraphs/), you create a custom open API that extracts data from a blockchain, processes it, stores it, and makes it easy to query via GraphQL. +When you create a [Subgraph](/subgraphs/developing/subgraphs/), you create a custom open API that extracts data from a blockchain, processes it, stores it, and makes it easy to query via GraphQL. -Subgraph development ranges from simple scaffold subgraphs to advanced, specifically tailored subgraphs. +Subgraph development ranges from simple scaffold Subgraphs to advanced, specifically tailored Subgraphs. ### Start Building -Start the process and build a subgraph that matches your needs: +Start the process and build a Subgraph that matches your needs: 1. [Install the CLI](/subgraphs/developing/creating/install-the-cli/) - Set up your infrastructure -2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a subgraph's key component +2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a Subgraph's key component 3. [The GraphQL Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema 4. [Writing AssemblyScript Mappings](/subgraphs/developing/creating/assemblyscript-mappings/) - Write your mappings -5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your subgraph with advanced features +5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your Subgraph with advanced features Explore additional [resources for APIs](/subgraphs/developing/creating/graph-ts/README/) and conduct local testing with [Matchstick](/subgraphs/developing/creating/unit-testing-framework/). + +| Version | Release notes | +| :-----: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune Subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From 99a9f69a361b6155fa7cd521f060ca3ac76bfeaa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:08:45 -0500 Subject: [PATCH 0046/1789] New translations starting-your-subgraph.mdx (Chinese Simplified) --- .../creating/starting-your-subgraph.mdx | 26 ++++++++++++++----- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/website/src/pages/zh/subgraphs/developing/creating/starting-your-subgraph.mdx b/website/src/pages/zh/subgraphs/developing/creating/starting-your-subgraph.mdx index d00c872abc59..8ccc637892a4 100644 --- a/website/src/pages/zh/subgraphs/developing/creating/starting-your-subgraph.mdx +++ b/website/src/pages/zh/subgraphs/developing/creating/starting-your-subgraph.mdx @@ -2,22 +2,34 @@ title: Starting Your Subgraph --- -## 概述 +## Overview -The Graph is home to thousands of subgraphs already available for query, so check [The Graph Explorer](https://thegraph.com/explorer) and find one that already matches your needs. +The Graph is home to thousands of Subgraphs already available for query, so check [The Graph Explorer](https://thegraph.com/explorer) and find one that already matches your needs. -When you create a [subgraph](/subgraphs/developing/subgraphs/), you create a custom open API that extracts data from a blockchain, processes it, stores it, and makes it easy to query via GraphQL. +When you create a [Subgraph](/subgraphs/developing/subgraphs/), you create a custom open API that extracts data from a blockchain, processes it, stores it, and makes it easy to query via GraphQL. -Subgraph development ranges from simple scaffold subgraphs to advanced, specifically tailored subgraphs. +Subgraph development ranges from simple scaffold Subgraphs to advanced, specifically tailored Subgraphs. ### Start Building -Start the process and build a subgraph that matches your needs: +Start the process and build a Subgraph that matches your needs: 1. [Install the CLI](/subgraphs/developing/creating/install-the-cli/) - Set up your infrastructure -2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a subgraph's key component +2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a Subgraph's key component 3. [The GraphQL Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema 4. [Writing AssemblyScript Mappings](/subgraphs/developing/creating/assemblyscript-mappings/) - Write your mappings -5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your subgraph with advanced features +5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your Subgraph with advanced features Explore additional [resources for APIs](/subgraphs/developing/creating/graph-ts/README/) and conduct local testing with [Matchstick](/subgraphs/developing/creating/unit-testing-framework/). + +| 版本 | Release 说明 | +| :---: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune Subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From 79eb86540f5d8c93fc2cbb38b337d1f73b34f004 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:08:46 -0500 Subject: [PATCH 0047/1789] New translations starting-your-subgraph.mdx (Urdu (Pakistan)) --- .../creating/starting-your-subgraph.mdx | 24 ++++++++++++++----- 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/website/src/pages/ur/subgraphs/developing/creating/starting-your-subgraph.mdx b/website/src/pages/ur/subgraphs/developing/creating/starting-your-subgraph.mdx index 3f0d9b8cde40..6361010625cb 100644 --- a/website/src/pages/ur/subgraphs/developing/creating/starting-your-subgraph.mdx +++ b/website/src/pages/ur/subgraphs/developing/creating/starting-your-subgraph.mdx @@ -4,20 +4,32 @@ title: Starting Your Subgraph ## جائزہ -The Graph is home to thousands of subgraphs already available for query, so check [The Graph Explorer](https://thegraph.com/explorer) and find one that already matches your needs. +The Graph is home to thousands of Subgraphs already available for query, so check [The Graph Explorer](https://thegraph.com/explorer) and find one that already matches your needs. -When you create a [subgraph](/subgraphs/developing/subgraphs/), you create a custom open API that extracts data from a blockchain, processes it, stores it, and makes it easy to query via GraphQL. +When you create a [Subgraph](/subgraphs/developing/subgraphs/), you create a custom open API that extracts data from a blockchain, processes it, stores it, and makes it easy to query via GraphQL. -Subgraph development ranges from simple scaffold subgraphs to advanced, specifically tailored subgraphs. +Subgraph development ranges from simple scaffold Subgraphs to advanced, specifically tailored Subgraphs. ### Start Building -Start the process and build a subgraph that matches your needs: +Start the process and build a Subgraph that matches your needs: 1. [Install the CLI](/subgraphs/developing/creating/install-the-cli/) - Set up your infrastructure -2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a subgraph's key component +2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a Subgraph's key component 3. [The GraphQL Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema 4. [Writing AssemblyScript Mappings](/subgraphs/developing/creating/assemblyscript-mappings/) - Write your mappings -5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your subgraph with advanced features +5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your Subgraph with advanced features Explore additional [resources for APIs](/subgraphs/developing/creating/graph-ts/README/) and conduct local testing with [Matchstick](/subgraphs/developing/creating/unit-testing-framework/). + +| ورزن | جاری کردہ نوٹس | +| :---: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune Subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From f90b52206a4446722eff3a483c9ff22cf0da50c0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:08:47 -0500 Subject: [PATCH 0048/1789] New translations starting-your-subgraph.mdx (Vietnamese) --- .../creating/starting-your-subgraph.mdx | 24 ++++++++++++++----- 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/website/src/pages/vi/subgraphs/developing/creating/starting-your-subgraph.mdx b/website/src/pages/vi/subgraphs/developing/creating/starting-your-subgraph.mdx index f7427e79c81a..ccca7d43804a 100644 --- a/website/src/pages/vi/subgraphs/developing/creating/starting-your-subgraph.mdx +++ b/website/src/pages/vi/subgraphs/developing/creating/starting-your-subgraph.mdx @@ -4,20 +4,32 @@ title: Starting Your Subgraph ## Tổng quan -The Graph is home to thousands of subgraphs already available for query, so check [The Graph Explorer](https://thegraph.com/explorer) and find one that already matches your needs. +The Graph is home to thousands of Subgraphs already available for query, so check [The Graph Explorer](https://thegraph.com/explorer) and find one that already matches your needs. -When you create a [subgraph](/subgraphs/developing/subgraphs/), you create a custom open API that extracts data from a blockchain, processes it, stores it, and makes it easy to query via GraphQL. +When you create a [Subgraph](/subgraphs/developing/subgraphs/), you create a custom open API that extracts data from a blockchain, processes it, stores it, and makes it easy to query via GraphQL. -Subgraph development ranges from simple scaffold subgraphs to advanced, specifically tailored subgraphs. +Subgraph development ranges from simple scaffold Subgraphs to advanced, specifically tailored Subgraphs. ### Start Building -Start the process and build a subgraph that matches your needs: +Start the process and build a Subgraph that matches your needs: 1. [Install the CLI](/subgraphs/developing/creating/install-the-cli/) - Set up your infrastructure -2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a subgraph's key component +2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a Subgraph's key component 3. [The GraphQL Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema 4. [Writing AssemblyScript Mappings](/subgraphs/developing/creating/assemblyscript-mappings/) - Write your mappings -5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your subgraph with advanced features +5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your Subgraph with advanced features Explore additional [resources for APIs](/subgraphs/developing/creating/graph-ts/README/) and conduct local testing with [Matchstick](/subgraphs/developing/creating/unit-testing-framework/). + +| Phiên bản | Ghi chú phát hành | +| :-------: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune Subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From 0181647938ebdede321ef3ec6493060caa094b93 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:08:48 -0500 Subject: [PATCH 0049/1789] New translations starting-your-subgraph.mdx (Marathi) --- .../creating/starting-your-subgraph.mdx | 24 ++++++++++++++----- 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/website/src/pages/mr/subgraphs/developing/creating/starting-your-subgraph.mdx b/website/src/pages/mr/subgraphs/developing/creating/starting-your-subgraph.mdx index 946093ef308b..8b40bdfde4fc 100644 --- a/website/src/pages/mr/subgraphs/developing/creating/starting-your-subgraph.mdx +++ b/website/src/pages/mr/subgraphs/developing/creating/starting-your-subgraph.mdx @@ -4,20 +4,32 @@ title: Starting Your Subgraph ## सविश्लेषण -The Graph is home to thousands of subgraphs already available for query, so check [The Graph Explorer](https://thegraph.com/explorer) and find one that already matches your needs. +The Graph is home to thousands of Subgraphs already available for query, so check [The Graph Explorer](https://thegraph.com/explorer) and find one that already matches your needs. -When you create a [subgraph](/subgraphs/developing/subgraphs/), you create a custom open API that extracts data from a blockchain, processes it, stores it, and makes it easy to query via GraphQL. +When you create a [Subgraph](/subgraphs/developing/subgraphs/), you create a custom open API that extracts data from a blockchain, processes it, stores it, and makes it easy to query via GraphQL. -Subgraph development ranges from simple scaffold subgraphs to advanced, specifically tailored subgraphs. +Subgraph development ranges from simple scaffold Subgraphs to advanced, specifically tailored Subgraphs. ### Start Building -Start the process and build a subgraph that matches your needs: +Start the process and build a Subgraph that matches your needs: 1. [Install the CLI](/subgraphs/developing/creating/install-the-cli/) - Set up your infrastructure -2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a subgraph's key component +2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a Subgraph's key component 3. [The GraphQL Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema 4. [Writing AssemblyScript Mappings](/subgraphs/developing/creating/assemblyscript-mappings/) - Write your mappings -5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your subgraph with advanced features +5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your Subgraph with advanced features Explore additional [resources for APIs](/subgraphs/developing/creating/graph-ts/README/) and conduct local testing with [Matchstick](/subgraphs/developing/creating/unit-testing-framework/). + +| आवृत्ती | रिलीझ नोट्स | +| :-----: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune Subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From 854ff8a43950d3856851921cac06f1029a91d0ab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:08:49 -0500 Subject: [PATCH 0050/1789] New translations starting-your-subgraph.mdx (Hindi) --- .../creating/starting-your-subgraph.mdx | 24 ++++++++++++++----- 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/website/src/pages/hi/subgraphs/developing/creating/starting-your-subgraph.mdx b/website/src/pages/hi/subgraphs/developing/creating/starting-your-subgraph.mdx index a162f802cf9c..829ba4c10444 100644 --- a/website/src/pages/hi/subgraphs/developing/creating/starting-your-subgraph.mdx +++ b/website/src/pages/hi/subgraphs/developing/creating/starting-your-subgraph.mdx @@ -4,20 +4,32 @@ title: Starting Your Subgraph ## अवलोकन -ग्राफ़ में पहले से ही हजारों सबग्राफ उपलब्ध हैं, जिन्हें क्वेरी के लिए उपयोग किया जा सकता है, तो The Graph Explorer(https://thegraph.com/explorer) को चेक करें और ऐसा कोई Subgraph ढूंढें जो पहले से आपकी ज़रूरतों से मेल खाता हो। +The Graph is home to thousands of Subgraphs already available for query, so check [The Graph Explorer](https://thegraph.com/explorer) and find one that already matches your needs. -जब आप एक [सबग्राफ](/subgraphs/developing/subgraphs/)बनाते हैं, तो आप एक कस्टम ओपन API बनाते हैं जो ब्लॉकचेन से डेटा निकालता है, उसे प्रोसेस करता है, स्टोर करता है और इसे GraphQL के माध्यम से क्वेरी करना आसान बनाता है। +When you create a [Subgraph](/subgraphs/developing/subgraphs/), you create a custom open API that extracts data from a blockchain, processes it, stores it, and makes it easy to query via GraphQL. -Subgraph development ranges from simple scaffold subgraphs to advanced, specifically tailored subgraphs. +Subgraph development ranges from simple scaffold Subgraphs to advanced, specifically tailored Subgraphs. ### Start Building -Start the process and build a subgraph that matches your needs: +Start the process and build a Subgraph that matches your needs: 1. [Install the CLI](/subgraphs/developing/creating/install-the-cli/) - Set up your infrastructure -2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a subgraph's key component +2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a Subgraph's key component 3. [The GraphQL Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema 4. [Writing AssemblyScript Mappings](/subgraphs/developing/creating/assemblyscript-mappings/) - Write your mappings -5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your subgraph with advanced features +5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your Subgraph with advanced features Explore additional [resources for APIs](/subgraphs/developing/creating/graph-ts/README/) and conduct local testing with [Matchstick](/subgraphs/developing/creating/unit-testing-framework/). + +| Version | Release notes | +| :-----: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune Subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From 805a0da5d816ee2461169dbd0cc7942a811b9823 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:08:50 -0500 Subject: [PATCH 0051/1789] New translations starting-your-subgraph.mdx (Swahili) --- .../creating/starting-your-subgraph.mdx | 35 +++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100644 website/src/pages/sw/subgraphs/developing/creating/starting-your-subgraph.mdx diff --git a/website/src/pages/sw/subgraphs/developing/creating/starting-your-subgraph.mdx b/website/src/pages/sw/subgraphs/developing/creating/starting-your-subgraph.mdx new file mode 100644 index 000000000000..4931e6b1fd34 --- /dev/null +++ b/website/src/pages/sw/subgraphs/developing/creating/starting-your-subgraph.mdx @@ -0,0 +1,35 @@ +--- +title: Starting Your Subgraph +--- + +## Overview + +The Graph is home to thousands of Subgraphs already available for query, so check [The Graph Explorer](https://thegraph.com/explorer) and find one that already matches your needs. + +When you create a [Subgraph](/subgraphs/developing/subgraphs/), you create a custom open API that extracts data from a blockchain, processes it, stores it, and makes it easy to query via GraphQL. + +Subgraph development ranges from simple scaffold Subgraphs to advanced, specifically tailored Subgraphs. + +### Start Building + +Start the process and build a Subgraph that matches your needs: + +1. [Install the CLI](/subgraphs/developing/creating/install-the-cli/) - Set up your infrastructure +2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a Subgraph's key component +3. [The GraphQL Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema +4. [Writing AssemblyScript Mappings](/subgraphs/developing/creating/assemblyscript-mappings/) - Write your mappings +5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your Subgraph with advanced features + +Explore additional [resources for APIs](/subgraphs/developing/creating/graph-ts/README/) and conduct local testing with [Matchstick](/subgraphs/developing/creating/unit-testing-framework/). + +| Version | Release notes | +| :-----: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune Subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From e285c0e942e27f7c15d581a4e04e398527af958c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:08:51 -0500 Subject: [PATCH 0052/1789] New translations overview.mdx (Romanian) --- website/src/pages/ro/indexing/overview.mdx | 98 +++++++++++----------- 1 file changed, 49 insertions(+), 49 deletions(-) diff --git a/website/src/pages/ro/indexing/overview.mdx b/website/src/pages/ro/indexing/overview.mdx index 914b04e0bf47..8223b3cd348f 100644 --- a/website/src/pages/ro/indexing/overview.mdx +++ b/website/src/pages/ro/indexing/overview.mdx @@ -7,7 +7,7 @@ Indexers are node operators in The Graph Network that stake Graph Tokens (GRT) i GRT that is staked in the protocol is subject to a thawing period and can be slashed if Indexers are malicious and serve incorrect data to applications or if they index incorrectly. Indexers also earn rewards for delegated stake from Delegators, to contribute to the network. -Indexers select subgraphs to index based on the subgraph’s curation signal, where Curators stake GRT in order to indicate which subgraphs are high-quality and should be prioritized. Consumers (eg. applications) can also set parameters for which Indexers process queries for their subgraphs and set preferences for query fee pricing. +Indexers select Subgraphs to index based on the Subgraph’s curation signal, where Curators stake GRT in order to indicate which Subgraphs are high-quality and should be prioritized. Consumers (eg. applications) can also set parameters for which Indexers process queries for their Subgraphs and set preferences for query fee pricing. ## FAQ @@ -19,17 +19,17 @@ The minimum stake for an Indexer is currently set to 100K GRT. **Query fee rebates** - Payments for serving queries on the network. These payments are mediated via state channels between an Indexer and a gateway. Each query request from a gateway contains a payment and the corresponding response a proof of query result validity. -**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing subgraph deployments for the network. +**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing Subgraph deployments for the network. ### How are indexing rewards distributed? -Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** +Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across Subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that Subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** Numerous tools have been created by the community for calculating rewards; you'll find a collection of them organized in the [Community Guides collection](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). You can also find an up to date list of tools in the #Delegators and #Indexers channels on the [Discord server](https://discord.gg/graphprotocol). Here we link a [recommended allocation optimiser](https://github.com/graphprotocol/allocation-optimizer) integrated with the indexer software stack. ### What is a proof of indexing (POI)? -POIs are used in the network to verify that an Indexer is indexing the subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific subgraph deployment up to and including that block. +POIs are used in the network to verify that an Indexer is indexing the Subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific Subgraph deployment up to and including that block. ### When are indexing rewards distributed? @@ -41,7 +41,7 @@ The RewardsManager contract has a read-only [getRewards](https://github.com/grap Many of the community-made dashboards include pending rewards values and they can be easily checked manually by following these steps: -1. Query the [mainnet subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: +1. Query the [mainnet Subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: ```graphql query indexerAllocations { @@ -91,31 +91,31 @@ The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that - **indexingRewardCut** - the % of indexing rewards that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards when an allocation is closed and the Delegators will split the other 5%. -### How do Indexers know which subgraphs to index? +### How do Indexers know which Subgraphs to index? -Indexers may differentiate themselves by applying advanced techniques for making subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate subgraphs in the network: +Indexers may differentiate themselves by applying advanced techniques for making Subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate Subgraphs in the network: -- **Curation signal** - The proportion of network curation signal applied to a particular subgraph is a good indicator of the interest in that subgraph, especially during the bootstrap phase when query voluming is ramping up. +- **Curation signal** - The proportion of network curation signal applied to a particular Subgraph is a good indicator of the interest in that Subgraph, especially during the bootstrap phase when query voluming is ramping up. -- **Query fees collected** - The historical data for volume of query fees collected for a specific subgraph is a good indicator of future demand. +- **Query fees collected** - The historical data for volume of query fees collected for a specific Subgraph is a good indicator of future demand. -- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific subgraphs can allow an Indexer to monitor the supply side for subgraph queries to identify subgraphs that the network is showing confidence in or subgraphs that may show a need for more supply. +- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific Subgraphs can allow an Indexer to monitor the supply side for Subgraph queries to identify Subgraphs that the network is showing confidence in or Subgraphs that may show a need for more supply. -- **Subgraphs with no indexing rewards** - Some subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a subgraph if it is not generating indexing rewards. +- **Subgraphs with no indexing rewards** - Some Subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a Subgraph if it is not generating indexing rewards. ### What are the hardware requirements? -- **Small** - Enough to get started indexing several subgraphs, will likely need to be expanded. +- **Small** - Enough to get started indexing several Subgraphs, will likely need to be expanded. - **Standard** - Default setup, this is what is used in the example k8s/terraform deployment manifests. -- **Medium** - Production Indexer supporting 100 subgraphs and 200-500 requests per second. -- **Large** - Prepared to index all currently used subgraphs and serve requests for the related traffic. +- **Medium** - Production Indexer supporting 100 Subgraphs and 200-500 requests per second. +- **Large** - Prepared to index all currently used Subgraphs and serve requests for the related traffic. -| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | -| --- | :-: | :-: | :-: | :-: | :-: | -| Small | 4 | 8 | 1 | 4 | 16 | -| Standard | 8 | 30 | 1 | 12 | 48 | -| Medium | 16 | 64 | 2 | 32 | 64 | -| Large | 72 | 468 | 3.5 | 48 | 184 | +| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | +| -------- | :------------------: | :---------------------------: | :-------------------------: | :-------------: | :----------------------: | +| Small | 4 | 8 | 1 | 4 | 16 | +| Standard | 8 | 30 | 1 | 12 | 48 | +| Medium | 16 | 64 | 2 | 32 | 64 | +| Large | 72 | 468 | 3.5 | 48 | 184 | ### What are some basic security precautions an Indexer should take? @@ -125,17 +125,17 @@ Indexers may differentiate themselves by applying advanced techniques for making ## Infrastructure -At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. +At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a Subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. -- **PostgreSQL database** - The main store for the Graph Node, this is where subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. +- **PostgreSQL database** - The main store for the Graph Node, this is where Subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. -- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. +- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain Subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. -- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. +- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during Subgraph deployment to fetch the Subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. - **Indexer service** - Handles all required external communications with the network. Shares cost models and indexing statuses, passes query requests from gateways on to a Graph Node, and manages the query payments via state channels with the gateway. -- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing subgraph deployments to its Graph Node/s, and managing allocations. +- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing Subgraph deployments to its Graph Node/s, and managing allocations. - **Prometheus metrics server** - The Graph Node and Indexer components log their metrics to the metrics server. @@ -147,20 +147,20 @@ Note: To support agile scaling, it is recommended that query and indexing concer #### Graph Node -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | -| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | -| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | -| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for Subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for Subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Service -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 7600 | GraphQL HTTP server
(for paid subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | -| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------------------------------------- | ----------------------------------------------------------- | --------------- | ---------------------- | +| 7600 | GraphQL HTTP server
(for paid Subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | +| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Agent @@ -295,7 +295,7 @@ Deploy all resources with `kubectl apply -k $dir`. ### Graph Node -[Graph Node](https://github.com/graphprotocol/graph-node) is an open source Rust implementation that event sources the Ethereum blockchain to deterministically update a data store that can be queried via the GraphQL endpoint. Developers use subgraphs to define their schema, and a set of mappings for transforming the data sourced from the blockchain and the Graph Node handles syncing the entire chain, monitoring for new blocks, and serving it via a GraphQL endpoint. +[Graph Node](https://github.com/graphprotocol/graph-node) is an open source Rust implementation that event sources the Ethereum blockchain to deterministically update a data store that can be queried via the GraphQL endpoint. Developers use Subgraphs to define their schema, and a set of mappings for transforming the data sourced from the blockchain and the Graph Node handles syncing the entire chain, monitoring for new blocks, and serving it via a GraphQL endpoint. #### Getting started from source @@ -365,9 +365,9 @@ docker-compose up To successfully participate in the network requires almost constant monitoring and interaction, so we've built a suite of Typescript applications for facilitating an Indexers network participation. There are three Indexer components: -- **Indexer agent** - The agent monitors the network and the Indexer's own infrastructure and manages which subgraph deployments are indexed and allocated towards onchain and how much is allocated towards each. +- **Indexer agent** - The agent monitors the network and the Indexer's own infrastructure and manages which Subgraph deployments are indexed and allocated towards onchain and how much is allocated towards each. -- **Indexer service** - The only component that needs to be exposed externally, the service passes on subgraph queries to the graph node, manages state channels for query payments, shares important decision making information to clients like the gateways. +- **Indexer service** - The only component that needs to be exposed externally, the service passes on Subgraph queries to the graph node, manages state channels for query payments, shares important decision making information to clients like the gateways. - **Indexer CLI** - The command line interface for managing the Indexer agent. It allows Indexers to manage cost models, manual allocations, actions queue, and indexing rules. @@ -525,7 +525,7 @@ graph indexer status #### Indexer management using Indexer CLI -The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. +The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking Subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. #### Usage @@ -537,7 +537,7 @@ The **Indexer CLI** connects to the Indexer agent, typically through port-forwar - `graph indexer rules set [options] ...` - Set one or more indexing rules. -- `graph indexer rules start [options] ` - Start indexing a subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available subgraphs on the network will be indexed. +- `graph indexer rules start [options] ` - Start indexing a Subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available Subgraphs on the network will be indexed. - `graph indexer rules stop [options] ` - Stop indexing a deployment and set its `decisionBasis` to never, so it will skip this deployment when deciding on deployments to index. @@ -561,9 +561,9 @@ All commands which display rules in the output can choose between the supported #### Indexing rules -Indexing rules can either be applied as global defaults or for specific subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. +Indexing rules can either be applied as global defaults or for specific Subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the Subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. -For example, if the global rule has a `minStake` of **5** (GRT), any subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. +For example, if the global rule has a `minStake` of **5** (GRT), any Subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. Data model: @@ -679,7 +679,7 @@ graph indexer actions execute approve Note that supported action types for allocation management have different input requirements: -- `Allocate` - allocate stake to a specific subgraph deployment +- `Allocate` - allocate stake to a specific Subgraph deployment - required action params: - deploymentID @@ -694,7 +694,7 @@ Note that supported action types for allocation management have different input - poi - force (forces using the provided POI even if it doesn’t match what the graph-node provides) -- `Reallocate` - atomically close allocation and open a fresh allocation for the same subgraph deployment +- `Reallocate` - atomically close allocation and open a fresh allocation for the same Subgraph deployment - required action params: - allocationID @@ -706,7 +706,7 @@ Note that supported action types for allocation management have different input #### Cost models -Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. +Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each Subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. #### Agora @@ -782,7 +782,7 @@ Once an Indexer has staked GRT in the protocol, the [Indexer components](/indexi 6. Call `stake()` to stake GRT in the protocol. -7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. +7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on Subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. 8. (Optional) In order to control the distribution of rewards and strategically attract Delegators Indexers can update their delegation parameters by updating their indexingRewardCut (parts per million), queryFeeCut (parts per million), and cooldownBlocks (number of blocks). To do so call `setDelegationParameters()`. The following example sets the queryFeeCut to distribute 95% of query rebates to the Indexer and 5% to Delegators, set the indexingRewardCutto distribute 60% of indexing rewards to the Indexer and 40% to Delegators, and set `thecooldownBlocks` period to 500 blocks. @@ -810,8 +810,8 @@ To set the delegation parameters using Graph Explorer interface, follow these st After being created by an Indexer a healthy allocation goes through two states. -- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. +- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a Subgraph deployment, which allows them to claim indexing rewards and serve queries for that Subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. - **Closed** - An Indexer is free to close an allocation once 1 epoch has passed ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L335)) or their Indexer agent will automatically close the allocation after the **maxAllocationEpochs** (currently 28 days). When an allocation is closed with a valid proof of indexing (POI) their indexing rewards are distributed to the Indexer and its Delegators ([learn more](/indexing/overview/#how-are-indexing-rewards-distributed)). -Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. +Indexers are recommended to utilize offchain syncing functionality to sync Subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for Subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. From 949d7baec8669e958a3aacc2e596e92ac0ff5f53 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:08:53 -0500 Subject: [PATCH 0053/1789] New translations overview.mdx (French) --- website/src/pages/fr/indexing/overview.mdx | 98 +++++++++++----------- 1 file changed, 49 insertions(+), 49 deletions(-) diff --git a/website/src/pages/fr/indexing/overview.mdx b/website/src/pages/fr/indexing/overview.mdx index aedc3415a442..b692292c9d93 100644 --- a/website/src/pages/fr/indexing/overview.mdx +++ b/website/src/pages/fr/indexing/overview.mdx @@ -7,7 +7,7 @@ Les indexeurs sont des opérateurs de nœuds dans The Graph Network qui mettent Le GRT intégré au protocole est soumis à une période de décongélation et peut être réduit si les indexeurs sont malveillants et fournissent des données incorrectes aux applications ou s'ils indexent de manière incorrecte. Les indexeurs gagnent également des récompenses pour la participation déléguée des délégués, afin de contribuer au réseau. -Les indexeurs sélectionnent les subgraphs à indexer en fonction du signal de curation du subgraph, où les curateurs misent du GRT afin d'indiquer quels subgraphs sont de haute qualité et doivent être priorisés. Les consommateurs (par exemple les applications) peuvent également définir les paramètres pour lesquels les indexeurs traitent les requêtes pour leurs subgraphs et définir les préférences pour la tarification des frais de requête. +Indexers select Subgraphs to index based on the Subgraph’s curation signal, where Curators stake GRT in order to indicate which Subgraphs are high-quality and should be prioritized. Consumers (eg. applications) can also set parameters for which Indexers process queries for their Subgraphs and set preferences for query fee pricing. ## FAQ @@ -19,17 +19,17 @@ The minimum stake for an Indexer is currently set to 100K GRT. **Query fee rebates** - Payments for serving queries on the network. These payments are mediated via state channels between an Indexer and a gateway. Each query request from a gateway contains a payment and the corresponding response a proof of query result validity. -**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing subgraph deployments for the network. +**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing Subgraph deployments for the network. ### How are indexing rewards distributed? -Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** +Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across Subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that Subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** Numerous tools have been created by the community for calculating rewards; you'll find a collection of them organized in the [Community Guides collection](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). You can also find an up to date list of tools in the #Delegators and #Indexers channels on the [Discord server](https://discord.gg/graphprotocol). Here we link a [recommended allocation optimiser](https://github.com/graphprotocol/allocation-optimizer) integrated with the indexer software stack. ### What is a proof of indexing (POI)? -POIs are used in the network to verify that an Indexer is indexing the subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific subgraph deployment up to and including that block. +POIs are used in the network to verify that an Indexer is indexing the Subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific Subgraph deployment up to and including that block. ### When are indexing rewards distributed? @@ -41,7 +41,7 @@ The RewardsManager contract has a read-only [getRewards](https://github.com/grap Many of the community-made dashboards include pending rewards values and they can be easily checked manually by following these steps: -1. Query the [mainnet subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: +1. Query the [mainnet Subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: ```graphql query indexerAllocations { @@ -91,31 +91,31 @@ The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that - **indexingRewardCut** - the % of indexing rewards that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards when an allocation is closed and the Delegators will split the other 5%. -### How do Indexers know which subgraphs to index? +### How do Indexers know which Subgraphs to index? -Indexers may differentiate themselves by applying advanced techniques for making subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate subgraphs in the network: +Indexers may differentiate themselves by applying advanced techniques for making Subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate Subgraphs in the network: -- **Curation signal** - The proportion of network curation signal applied to a particular subgraph is a good indicator of the interest in that subgraph, especially during the bootstrap phase when query voluming is ramping up. +- **Curation signal** - The proportion of network curation signal applied to a particular Subgraph is a good indicator of the interest in that Subgraph, especially during the bootstrap phase when query voluming is ramping up. -- **Query fees collected** - The historical data for volume of query fees collected for a specific subgraph is a good indicator of future demand. +- **Query fees collected** - The historical data for volume of query fees collected for a specific Subgraph is a good indicator of future demand. -- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific subgraphs can allow an Indexer to monitor the supply side for subgraph queries to identify subgraphs that the network is showing confidence in or subgraphs that may show a need for more supply. +- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific Subgraphs can allow an Indexer to monitor the supply side for Subgraph queries to identify Subgraphs that the network is showing confidence in or Subgraphs that may show a need for more supply. -- **Subgraphs with no indexing rewards** - Some subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a subgraph if it is not generating indexing rewards. +- **Subgraphs with no indexing rewards** - Some Subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a Subgraph if it is not generating indexing rewards. ### What are the hardware requirements? -- **Small** - Enough to get started indexing several subgraphs, will likely need to be expanded. +- **Small** - Enough to get started indexing several Subgraphs, will likely need to be expanded. - **Standard** - Default setup, this is what is used in the example k8s/terraform deployment manifests. -- **Medium** - Production Indexer supporting 100 subgraphs and 200-500 requests per second. -- **Large** - Prepared to index all currently used subgraphs and serve requests for the related traffic. +- **Medium** - Production Indexer supporting 100 Subgraphs and 200-500 requests per second. +- **Large** - Prepared to index all currently used Subgraphs and serve requests for the related traffic. -| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | -| --- | :-: | :-: | :-: | :-: | :-: | -| Small | 4 | 8 | 1 | 4 | 16 | -| Standard | 8 | 30 | 1 | 12 | 48 | -| Medium | 16 | 64 | 2 | 32 | 64 | -| Large | 72 | 468 | 3.5 | 48 | 184 | +| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | +| -------- | :------------------: | :---------------------------: | :-------------------------: | :-------------: | :----------------------: | +| Small | 4 | 8 | 1 | 4 | 16 | +| Standard | 8 | 30 | 1 | 12 | 48 | +| Medium | 16 | 64 | 2 | 32 | 64 | +| Large | 72 | 468 | 3.5 | 48 | 184 | ### What are some basic security precautions an Indexer should take? @@ -125,17 +125,17 @@ Indexers may differentiate themselves by applying advanced techniques for making ## Infrastructure -At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. +At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a Subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. -- **PostgreSQL database** - The main store for the Graph Node, this is where subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. +- **PostgreSQL database** - The main store for the Graph Node, this is where Subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. -- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. +- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain Subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. -- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. +- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during Subgraph deployment to fetch the Subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. - **Indexer service** - Handles all required external communications with the network. Shares cost models and indexing statuses, passes query requests from gateways on to a Graph Node, and manages the query payments via state channels with the gateway. -- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing subgraph deployments to its Graph Node/s, and managing allocations. +- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing Subgraph deployments to its Graph Node/s, and managing allocations. - **Prometheus metrics server** - The Graph Node and Indexer components log their metrics to the metrics server. @@ -147,20 +147,20 @@ Note: To support agile scaling, it is recommended that query and indexing concer #### Nœud de The Graph -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | -| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | -| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | -| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for Subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for Subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Service -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 7600 | GraphQL HTTP server
(for paid subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | -| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------------------------------------- | ----------------------------------------------------------- | --------------- | ---------------------- | +| 7600 | GraphQL HTTP server
(for paid Subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | +| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Agent @@ -295,7 +295,7 @@ Deploy all resources with `kubectl apply -k $dir`. ### Nœud de The Graph -[Graph Node](https://github.com/graphprotocol/graph-node) is an open source Rust implementation that event sources the Ethereum blockchain to deterministically update a data store that can be queried via the GraphQL endpoint. Developers use subgraphs to define their schema, and a set of mappings for transforming the data sourced from the blockchain and the Graph Node handles syncing the entire chain, monitoring for new blocks, and serving it via a GraphQL endpoint. +[Graph Node](https://github.com/graphprotocol/graph-node) is an open source Rust implementation that event sources the Ethereum blockchain to deterministically update a data store that can be queried via the GraphQL endpoint. Developers use Subgraphs to define their schema, and a set of mappings for transforming the data sourced from the blockchain and the Graph Node handles syncing the entire chain, monitoring for new blocks, and serving it via a GraphQL endpoint. #### Getting started from source @@ -365,9 +365,9 @@ docker-compose up To successfully participate in the network requires almost constant monitoring and interaction, so we've built a suite of Typescript applications for facilitating an Indexers network participation. There are three Indexer components: -- **Indexer agent** - The agent monitors the network and the Indexer's own infrastructure and manages which subgraph deployments are indexed and allocated towards onchain and how much is allocated towards each. +- **Indexer agent** - The agent monitors the network and the Indexer's own infrastructure and manages which Subgraph deployments are indexed and allocated towards onchain and how much is allocated towards each. -- **Indexer service** - The only component that needs to be exposed externally, the service passes on subgraph queries to the graph node, manages state channels for query payments, shares important decision making information to clients like the gateways. +- **Indexer service** - The only component that needs to be exposed externally, the service passes on Subgraph queries to the graph node, manages state channels for query payments, shares important decision making information to clients like the gateways. - **Indexer CLI** - The command line interface for managing the Indexer agent. It allows Indexers to manage cost models, manual allocations, actions queue, and indexing rules. @@ -525,7 +525,7 @@ graph indexer status #### Indexer management using Indexer CLI -The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. +The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking Subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. #### Usage @@ -537,7 +537,7 @@ The **Indexer CLI** connects to the Indexer agent, typically through port-forwar - `graph indexer rules set [options] ...` - Set one or more indexing rules. -- `graph indexer rules start [options] ` - Start indexing a subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available subgraphs on the network will be indexed. +- `graph indexer rules start [options] ` - Start indexing a Subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available Subgraphs on the network will be indexed. - `graph indexer rules stop [options] ` - Stop indexing a deployment and set its `decisionBasis` to never, so it will skip this deployment when deciding on deployments to index. @@ -561,9 +561,9 @@ All commands which display rules in the output can choose between the supported #### Indexing rules -Indexing rules can either be applied as global defaults or for specific subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. +Indexing rules can either be applied as global defaults or for specific Subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the Subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. -For example, if the global rule has a `minStake` of **5** (GRT), any subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. +For example, if the global rule has a `minStake` of **5** (GRT), any Subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. Data model: @@ -679,7 +679,7 @@ graph indexer actions execute approve Note that supported action types for allocation management have different input requirements: -- `Allocate` - allocate stake to a specific subgraph deployment +- `Allocate` - allocate stake to a specific Subgraph deployment - required action params: - deploymentID @@ -694,7 +694,7 @@ Note that supported action types for allocation management have different input - poi - force (forces using the provided POI even if it doesn’t match what the graph-node provides) -- `Reallocate` - atomically close allocation and open a fresh allocation for the same subgraph deployment +- `Reallocate` - atomically close allocation and open a fresh allocation for the same Subgraph deployment - required action params: - allocationID @@ -706,7 +706,7 @@ Note that supported action types for allocation management have different input #### Cost models -Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. +Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each Subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. #### Agora @@ -782,7 +782,7 @@ Once an Indexer has staked GRT in the protocol, the [Indexer components](/indexi 6. Call `stake()` to stake GRT in the protocol. -7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. +7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on Subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. 8. (Optional) In order to control the distribution of rewards and strategically attract Delegators Indexers can update their delegation parameters by updating their indexingRewardCut (parts per million), queryFeeCut (parts per million), and cooldownBlocks (number of blocks). To do so call `setDelegationParameters()`. The following example sets the queryFeeCut to distribute 95% of query rebates to the Indexer and 5% to Delegators, set the indexingRewardCutto distribute 60% of indexing rewards to the Indexer and 40% to Delegators, and set `thecooldownBlocks` period to 500 blocks. @@ -810,8 +810,8 @@ To set the delegation parameters using Graph Explorer interface, follow these st After being created by an Indexer a healthy allocation goes through two states. -- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. +- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a Subgraph deployment, which allows them to claim indexing rewards and serve queries for that Subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. - **Closed** - An Indexer is free to close an allocation once 1 epoch has passed ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L335)) or their Indexer agent will automatically close the allocation after the **maxAllocationEpochs** (currently 28 days). When an allocation is closed with a valid proof of indexing (POI) their indexing rewards are distributed to the Indexer and its Delegators ([learn more](/indexing/overview/#how-are-indexing-rewards-distributed)). -Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. +Indexers are recommended to utilize offchain syncing functionality to sync Subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for Subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. From 3806b909ae47f1638eae7ab8576cfa212b2f1fd8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:08:54 -0500 Subject: [PATCH 0054/1789] New translations overview.mdx (Spanish) --- website/src/pages/es/indexing/overview.mdx | 98 +++++++++++----------- 1 file changed, 49 insertions(+), 49 deletions(-) diff --git a/website/src/pages/es/indexing/overview.mdx b/website/src/pages/es/indexing/overview.mdx index 43b74287044a..ae07d8ec3c7c 100644 --- a/website/src/pages/es/indexing/overview.mdx +++ b/website/src/pages/es/indexing/overview.mdx @@ -7,7 +7,7 @@ Indexers are node operators in The Graph Network that stake Graph Tokens (GRT) i Los GRT que se depositan en stake en el protocolo está sujeto a un periodo de desbloqueo y puede incurrir en slashing (ser reducidos) si los Indexadores son maliciosos y sirven datos incorrectos a las aplicaciones o si indexan incorrectamente. Los Indexadores también obtienen recompensas por stake delegados de los Delegadores, para contribuir a la red. -Los Indexadores seleccionan subgrafos para indexar basados en la señal de curación del subgrafo, donde los Curadores realizan stake de sus GRT para indicar qué subgrafos son de mejor calidad y deben tener prioridad para ser indexados. Los consumidores (por ejemplo, aplicaciones, clientes) también pueden establecer parámetros para los cuales los Indexadores procesan consultas para sus subgrafos y establecen preferencias para el precio asignado a cada consulta. +Indexers select Subgraphs to index based on the Subgraph’s curation signal, where Curators stake GRT in order to indicate which Subgraphs are high-quality and should be prioritized. Consumers (eg. applications) can also set parameters for which Indexers process queries for their Subgraphs and set preferences for query fee pricing. ## FAQ @@ -19,17 +19,17 @@ The minimum stake for an Indexer is currently set to 100K GRT. **Query fee rebates** - Payments for serving queries on the network. These payments are mediated via state channels between an Indexer and a gateway. Each query request from a gateway contains a payment and the corresponding response a proof of query result validity. -**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing subgraph deployments for the network. +**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing Subgraph deployments for the network. ### How are indexing rewards distributed? -Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** +Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across Subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that Subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** Numerous tools have been created by the community for calculating rewards; you'll find a collection of them organized in the [Community Guides collection](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). You can also find an up to date list of tools in the #Delegators and #Indexers channels on the [Discord server](https://discord.gg/graphprotocol). Here we link a [recommended allocation optimiser](https://github.com/graphprotocol/allocation-optimizer) integrated with the indexer software stack. ### What is a proof of indexing (POI)? -POIs are used in the network to verify that an Indexer is indexing the subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific subgraph deployment up to and including that block. +POIs are used in the network to verify that an Indexer is indexing the Subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific Subgraph deployment up to and including that block. ### When are indexing rewards distributed? @@ -41,7 +41,7 @@ The RewardsManager contract has a read-only [getRewards](https://github.com/grap Many of the community-made dashboards include pending rewards values and they can be easily checked manually by following these steps: -1. Query the [mainnet subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: +1. Query the [mainnet Subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: ```graphql query indexerAllocations { @@ -91,31 +91,31 @@ The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that - **indexingRewardCut** - the % of indexing rewards that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards when an allocation is closed and the Delegators will split the other 5%. -### How do Indexers know which subgraphs to index? +### How do Indexers know which Subgraphs to index? -Indexers may differentiate themselves by applying advanced techniques for making subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate subgraphs in the network: +Indexers may differentiate themselves by applying advanced techniques for making Subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate Subgraphs in the network: -- **Curation signal** - The proportion of network curation signal applied to a particular subgraph is a good indicator of the interest in that subgraph, especially during the bootstrap phase when query voluming is ramping up. +- **Curation signal** - The proportion of network curation signal applied to a particular Subgraph is a good indicator of the interest in that Subgraph, especially during the bootstrap phase when query voluming is ramping up. -- **Query fees collected** - The historical data for volume of query fees collected for a specific subgraph is a good indicator of future demand. +- **Query fees collected** - The historical data for volume of query fees collected for a specific Subgraph is a good indicator of future demand. -- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific subgraphs can allow an Indexer to monitor the supply side for subgraph queries to identify subgraphs that the network is showing confidence in or subgraphs that may show a need for more supply. +- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific Subgraphs can allow an Indexer to monitor the supply side for Subgraph queries to identify Subgraphs that the network is showing confidence in or Subgraphs that may show a need for more supply. -- **Subgraphs with no indexing rewards** - Some subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a subgraph if it is not generating indexing rewards. +- **Subgraphs with no indexing rewards** - Some Subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a Subgraph if it is not generating indexing rewards. ### What are the hardware requirements? -- **Small** - Enough to get started indexing several subgraphs, will likely need to be expanded. +- **Small** - Enough to get started indexing several Subgraphs, will likely need to be expanded. - **Standard** - Default setup, this is what is used in the example k8s/terraform deployment manifests. -- **Medium** - Production Indexer supporting 100 subgraphs and 200-500 requests per second. -- **Large** - Prepared to index all currently used subgraphs and serve requests for the related traffic. +- **Medium** - Production Indexer supporting 100 Subgraphs and 200-500 requests per second. +- **Large** - Prepared to index all currently used Subgraphs and serve requests for the related traffic. -| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | -| --- | :-: | :-: | :-: | :-: | :-: | -| Small | 4 | 8 | 1 | 4 | 16 | -| Standard | 8 | 30 | 1 | 12 | 48 | -| Medium | 16 | 64 | 2 | 32 | 64 | -| Large | 72 | 468 | 3.5 | 48 | 184 | +| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | +| -------- | :------------------: | :---------------------------: | :-------------------------: | :-------------: | :----------------------: | +| Small | 4 | 8 | 1 | 4 | 16 | +| Standard | 8 | 30 | 1 | 12 | 48 | +| Medium | 16 | 64 | 2 | 32 | 64 | +| Large | 72 | 468 | 3.5 | 48 | 184 | ### What are some basic security precautions an Indexer should take? @@ -125,17 +125,17 @@ Indexers may differentiate themselves by applying advanced techniques for making ## Infrastructure -At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. +At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a Subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. -- **PostgreSQL database** - The main store for the Graph Node, this is where subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. +- **PostgreSQL database** - The main store for the Graph Node, this is where Subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. -- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. +- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain Subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. -- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. +- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during Subgraph deployment to fetch the Subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. - **Indexer service** - Handles all required external communications with the network. Shares cost models and indexing statuses, passes query requests from gateways on to a Graph Node, and manages the query payments via state channels with the gateway. -- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing subgraph deployments to its Graph Node/s, and managing allocations. +- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing Subgraph deployments to its Graph Node/s, and managing allocations. - **Prometheus metrics server** - The Graph Node and Indexer components log their metrics to the metrics server. @@ -147,20 +147,20 @@ Note: To support agile scaling, it is recommended that query and indexing concer #### Graph Node -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | -| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | -| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | -| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for Subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for Subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Service -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 7600 | GraphQL HTTP server
(for paid subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | -| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------------------------------------- | ----------------------------------------------------------- | --------------- | ---------------------- | +| 7600 | GraphQL HTTP server
(for paid Subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | +| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Agent @@ -295,7 +295,7 @@ Deploy all resources with `kubectl apply -k $dir`. ### Graph Node -[Graph Node](https://github.com/graphprotocol/graph-node) is an open source Rust implementation that event sources the Ethereum blockchain to deterministically update a data store that can be queried via the GraphQL endpoint. Developers use subgraphs to define their schema, and a set of mappings for transforming the data sourced from the blockchain and the Graph Node handles syncing the entire chain, monitoring for new blocks, and serving it via a GraphQL endpoint. +[Graph Node](https://github.com/graphprotocol/graph-node) is an open source Rust implementation that event sources the Ethereum blockchain to deterministically update a data store that can be queried via the GraphQL endpoint. Developers use Subgraphs to define their schema, and a set of mappings for transforming the data sourced from the blockchain and the Graph Node handles syncing the entire chain, monitoring for new blocks, and serving it via a GraphQL endpoint. #### Getting started from source @@ -365,9 +365,9 @@ docker-compose up To successfully participate in the network requires almost constant monitoring and interaction, so we've built a suite of Typescript applications for facilitating an Indexers network participation. There are three Indexer components: -- **Indexer agent** - The agent monitors the network and the Indexer's own infrastructure and manages which subgraph deployments are indexed and allocated towards onchain and how much is allocated towards each. +- **Indexer agent** - The agent monitors the network and the Indexer's own infrastructure and manages which Subgraph deployments are indexed and allocated towards onchain and how much is allocated towards each. -- **Indexer service** - The only component that needs to be exposed externally, the service passes on subgraph queries to the graph node, manages state channels for query payments, shares important decision making information to clients like the gateways. +- **Indexer service** - The only component that needs to be exposed externally, the service passes on Subgraph queries to the graph node, manages state channels for query payments, shares important decision making information to clients like the gateways. - **Indexer CLI** - The command line interface for managing the Indexer agent. It allows Indexers to manage cost models, manual allocations, actions queue, and indexing rules. @@ -525,7 +525,7 @@ graph indexer status #### Indexer management using Indexer CLI -The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. +The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking Subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. #### Usage @@ -537,7 +537,7 @@ The **Indexer CLI** connects to the Indexer agent, typically through port-forwar - `graph indexer rules set [options] ...` - Set one or more indexing rules. -- `graph indexer rules start [options] ` - Start indexing a subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available subgraphs on the network will be indexed. +- `graph indexer rules start [options] ` - Start indexing a Subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available Subgraphs on the network will be indexed. - `graph indexer rules stop [options] ` - Stop indexing a deployment and set its `decisionBasis` to never, so it will skip this deployment when deciding on deployments to index. @@ -561,9 +561,9 @@ All commands which display rules in the output can choose between the supported #### Indexing rules -Indexing rules can either be applied as global defaults or for specific subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. +Indexing rules can either be applied as global defaults or for specific Subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the Subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. -For example, if the global rule has a `minStake` of **5** (GRT), any subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. +For example, if the global rule has a `minStake` of **5** (GRT), any Subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. Data model: @@ -679,7 +679,7 @@ graph indexer actions execute approve Note that supported action types for allocation management have different input requirements: -- `Allocate` - allocate stake to a specific subgraph deployment +- `Allocate` - allocate stake to a specific Subgraph deployment - required action params: - deploymentID @@ -694,7 +694,7 @@ Note that supported action types for allocation management have different input - poi - force (forces using the provided POI even if it doesn’t match what the graph-node provides) -- `Reallocate` - atomically close allocation and open a fresh allocation for the same subgraph deployment +- `Reallocate` - atomically close allocation and open a fresh allocation for the same Subgraph deployment - required action params: - allocationID @@ -706,7 +706,7 @@ Note that supported action types for allocation management have different input #### Cost models -Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. +Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each Subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. #### Agora @@ -782,7 +782,7 @@ Once an Indexer has staked GRT in the protocol, the [Indexer components](/indexi 6. Call `stake()` to stake GRT in the protocol. -7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. +7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on Subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. 8. (Optional) In order to control the distribution of rewards and strategically attract Delegators Indexers can update their delegation parameters by updating their indexingRewardCut (parts per million), queryFeeCut (parts per million), and cooldownBlocks (number of blocks). To do so call `setDelegationParameters()`. The following example sets the queryFeeCut to distribute 95% of query rebates to the Indexer and 5% to Delegators, set the indexingRewardCutto distribute 60% of indexing rewards to the Indexer and 40% to Delegators, and set `thecooldownBlocks` period to 500 blocks. @@ -810,8 +810,8 @@ To set the delegation parameters using Graph Explorer interface, follow these st After being created by an Indexer a healthy allocation goes through two states. -- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. +- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a Subgraph deployment, which allows them to claim indexing rewards and serve queries for that Subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. - **Closed** - An Indexer is free to close an allocation once 1 epoch has passed ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L335)) or their Indexer agent will automatically close the allocation after the **maxAllocationEpochs** (currently 28 days). When an allocation is closed with a valid proof of indexing (POI) their indexing rewards are distributed to the Indexer and its Delegators ([learn more](/indexing/overview/#how-are-indexing-rewards-distributed)). -Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. +Indexers are recommended to utilize offchain syncing functionality to sync Subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for Subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. From b89d80910baa8bf94573ae4895b5c3647cdee8e8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:08:55 -0500 Subject: [PATCH 0055/1789] New translations overview.mdx (Arabic) --- website/src/pages/ar/indexing/overview.mdx | 98 +++++++++++----------- 1 file changed, 49 insertions(+), 49 deletions(-) diff --git a/website/src/pages/ar/indexing/overview.mdx b/website/src/pages/ar/indexing/overview.mdx index 3bfd1cc210c3..e8225e42b0a8 100644 --- a/website/src/pages/ar/indexing/overview.mdx +++ b/website/src/pages/ar/indexing/overview.mdx @@ -7,7 +7,7 @@ Indexers are node operators in The Graph Network that stake Graph Tokens (GRT) i GRT that is staked in the protocol is subject to a thawing period and can be slashed if Indexers are malicious and serve incorrect data to applications or if they index incorrectly. Indexers also earn rewards for delegated stake from Delegators, to contribute to the network. -يختار المفهرسون subgraphs للقيام بالفهرسة بناء على إشارة تنسيق subgraphs ، حيث أن المنسقون يقومون ب staking ل GRT وذلك للإشارة ل Subgraphs عالية الجودة. يمكن أيضا للعملاء (مثل التطبيقات) تعيين بارامترات حيث يقوم المفهرسون بمعالجة الاستعلامات ل Subgraphs وتسعير رسوم الاستعلام. +Indexers select Subgraphs to index based on the Subgraph’s curation signal, where Curators stake GRT in order to indicate which Subgraphs are high-quality and should be prioritized. Consumers (eg. applications) can also set parameters for which Indexers process queries for their Subgraphs and set preferences for query fee pricing. ## FAQ @@ -19,17 +19,17 @@ The minimum stake for an Indexer is currently set to 100K GRT. **Query fee rebates** - Payments for serving queries on the network. These payments are mediated via state channels between an Indexer and a gateway. Each query request from a gateway contains a payment and the corresponding response a proof of query result validity. -**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing subgraph deployments for the network. +**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing Subgraph deployments for the network. ### How are indexing rewards distributed? -Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** +Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across Subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that Subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** Numerous tools have been created by the community for calculating rewards; you'll find a collection of them organized in the [Community Guides collection](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). You can also find an up to date list of tools in the #Delegators and #Indexers channels on the [Discord server](https://discord.gg/graphprotocol). Here we link a [recommended allocation optimiser](https://github.com/graphprotocol/allocation-optimizer) integrated with the indexer software stack. ### What is a proof of indexing (POI)? -POIs are used in the network to verify that an Indexer is indexing the subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific subgraph deployment up to and including that block. +POIs are used in the network to verify that an Indexer is indexing the Subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific Subgraph deployment up to and including that block. ### When are indexing rewards distributed? @@ -41,7 +41,7 @@ The RewardsManager contract has a read-only [getRewards](https://github.com/grap Many of the community-made dashboards include pending rewards values and they can be easily checked manually by following these steps: -1. Query the [mainnet subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: +1. Query the [mainnet Subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: ```graphql query indexerAllocations { @@ -91,31 +91,31 @@ The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that - **indexingRewardCut** - the % of indexing rewards that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards when an allocation is closed and the Delegators will split the other 5%. -### How do Indexers know which subgraphs to index? +### How do Indexers know which Subgraphs to index? -Indexers may differentiate themselves by applying advanced techniques for making subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate subgraphs in the network: +Indexers may differentiate themselves by applying advanced techniques for making Subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate Subgraphs in the network: -- **Curation signal** - The proportion of network curation signal applied to a particular subgraph is a good indicator of the interest in that subgraph, especially during the bootstrap phase when query voluming is ramping up. +- **Curation signal** - The proportion of network curation signal applied to a particular Subgraph is a good indicator of the interest in that Subgraph, especially during the bootstrap phase when query voluming is ramping up. -- **Query fees collected** - The historical data for volume of query fees collected for a specific subgraph is a good indicator of future demand. +- **Query fees collected** - The historical data for volume of query fees collected for a specific Subgraph is a good indicator of future demand. -- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific subgraphs can allow an Indexer to monitor the supply side for subgraph queries to identify subgraphs that the network is showing confidence in or subgraphs that may show a need for more supply. +- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific Subgraphs can allow an Indexer to monitor the supply side for Subgraph queries to identify Subgraphs that the network is showing confidence in or Subgraphs that may show a need for more supply. -- **Subgraphs with no indexing rewards** - Some subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a subgraph if it is not generating indexing rewards. +- **Subgraphs with no indexing rewards** - Some Subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a Subgraph if it is not generating indexing rewards. ### What are the hardware requirements? -- **Small** - Enough to get started indexing several subgraphs, will likely need to be expanded. +- **Small** - Enough to get started indexing several Subgraphs, will likely need to be expanded. - **Standard** - Default setup, this is what is used in the example k8s/terraform deployment manifests. -- **Medium** - Production Indexer supporting 100 subgraphs and 200-500 requests per second. -- **Large** - Prepared to index all currently used subgraphs and serve requests for the related traffic. +- **Medium** - Production Indexer supporting 100 Subgraphs and 200-500 requests per second. +- **Large** - Prepared to index all currently used Subgraphs and serve requests for the related traffic. -| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | -| --- | :-: | :-: | :-: | :-: | :-: | -| Small | 4 | 8 | 1 | 4 | 16 | -| Standard | 8 | 30 | 1 | 12 | 48 | -| Medium | 16 | 64 | 2 | 32 | 64 | -| Large | 72 | 468 | 3.5 | 48 | 184 | +| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | +| -------- | :------------------: | :---------------------------: | :-------------------------: | :-------------: | :----------------------: | +| Small | 4 | 8 | 1 | 4 | 16 | +| Standard | 8 | 30 | 1 | 12 | 48 | +| Medium | 16 | 64 | 2 | 32 | 64 | +| Large | 72 | 468 | 3.5 | 48 | 184 | ### What are some basic security precautions an Indexer should take? @@ -125,17 +125,17 @@ Indexers may differentiate themselves by applying advanced techniques for making ## Infrastructure -At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. +At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a Subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. -- **PostgreSQL database** - The main store for the Graph Node, this is where subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. +- **PostgreSQL database** - The main store for the Graph Node, this is where Subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. -- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. +- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain Subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. -- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. +- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during Subgraph deployment to fetch the Subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. - **Indexer service** - Handles all required external communications with the network. Shares cost models and indexing statuses, passes query requests from gateways on to a Graph Node, and manages the query payments via state channels with the gateway. -- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing subgraph deployments to its Graph Node/s, and managing allocations. +- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing Subgraph deployments to its Graph Node/s, and managing allocations. - **Prometheus metrics server** - The Graph Node and Indexer components log their metrics to the metrics server. @@ -147,20 +147,20 @@ Note: To support agile scaling, it is recommended that query and indexing concer #### Graph Node -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | -| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | -| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | -| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for Subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for Subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Service -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 7600 | GraphQL HTTP server
(for paid subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | -| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------------------------------------- | ----------------------------------------------------------- | --------------- | ---------------------- | +| 7600 | GraphQL HTTP server
(for paid Subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | +| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Agent @@ -295,7 +295,7 @@ Deploy all resources with `kubectl apply -k $dir`. ### Graph Node -[Graph Node](https://github.com/graphprotocol/graph-node) is an open source Rust implementation that event sources the Ethereum blockchain to deterministically update a data store that can be queried via the GraphQL endpoint. Developers use subgraphs to define their schema, and a set of mappings for transforming the data sourced from the blockchain and the Graph Node handles syncing the entire chain, monitoring for new blocks, and serving it via a GraphQL endpoint. +[Graph Node](https://github.com/graphprotocol/graph-node) is an open source Rust implementation that event sources the Ethereum blockchain to deterministically update a data store that can be queried via the GraphQL endpoint. Developers use Subgraphs to define their schema, and a set of mappings for transforming the data sourced from the blockchain and the Graph Node handles syncing the entire chain, monitoring for new blocks, and serving it via a GraphQL endpoint. #### Getting started from source @@ -365,9 +365,9 @@ docker-compose up To successfully participate in the network requires almost constant monitoring and interaction, so we've built a suite of Typescript applications for facilitating an Indexers network participation. There are three Indexer components: -- **Indexer agent** - The agent monitors the network and the Indexer's own infrastructure and manages which subgraph deployments are indexed and allocated towards onchain and how much is allocated towards each. +- **Indexer agent** - The agent monitors the network and the Indexer's own infrastructure and manages which Subgraph deployments are indexed and allocated towards onchain and how much is allocated towards each. -- **Indexer service** - The only component that needs to be exposed externally, the service passes on subgraph queries to the graph node, manages state channels for query payments, shares important decision making information to clients like the gateways. +- **Indexer service** - The only component that needs to be exposed externally, the service passes on Subgraph queries to the graph node, manages state channels for query payments, shares important decision making information to clients like the gateways. - **Indexer CLI** - The command line interface for managing the Indexer agent. It allows Indexers to manage cost models, manual allocations, actions queue, and indexing rules. @@ -525,7 +525,7 @@ graph indexer status #### Indexer management using Indexer CLI -The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. +The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking Subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. #### Usage @@ -537,7 +537,7 @@ The **Indexer CLI** connects to the Indexer agent, typically through port-forwar - `graph indexer rules set [options] ...` - Set one or more indexing rules. -- `graph indexer rules start [options] ` - Start indexing a subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available subgraphs on the network will be indexed. +- `graph indexer rules start [options] ` - Start indexing a Subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available Subgraphs on the network will be indexed. - `graph indexer rules stop [options] ` - Stop indexing a deployment and set its `decisionBasis` to never, so it will skip this deployment when deciding on deployments to index. @@ -561,9 +561,9 @@ All commands which display rules in the output can choose between the supported #### Indexing rules -Indexing rules can either be applied as global defaults or for specific subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. +Indexing rules can either be applied as global defaults or for specific Subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the Subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. -For example, if the global rule has a `minStake` of **5** (GRT), any subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. +For example, if the global rule has a `minStake` of **5** (GRT), any Subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. Data model: @@ -679,7 +679,7 @@ graph indexer actions execute approve Note that supported action types for allocation management have different input requirements: -- `Allocate` - allocate stake to a specific subgraph deployment +- `Allocate` - allocate stake to a specific Subgraph deployment - required action params: - deploymentID @@ -694,7 +694,7 @@ Note that supported action types for allocation management have different input - poi - force (forces using the provided POI even if it doesn’t match what the graph-node provides) -- `Reallocate` - atomically close allocation and open a fresh allocation for the same subgraph deployment +- `Reallocate` - atomically close allocation and open a fresh allocation for the same Subgraph deployment - required action params: - allocationID @@ -706,7 +706,7 @@ Note that supported action types for allocation management have different input #### Cost models -Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. +Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each Subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. #### Agora @@ -782,7 +782,7 @@ Once an Indexer has staked GRT in the protocol, the [Indexer components](/indexi 6. Call `stake()` to stake GRT in the protocol. -7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. +7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on Subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. 8. (Optional) In order to control the distribution of rewards and strategically attract Delegators Indexers can update their delegation parameters by updating their indexingRewardCut (parts per million), queryFeeCut (parts per million), and cooldownBlocks (number of blocks). To do so call `setDelegationParameters()`. The following example sets the queryFeeCut to distribute 95% of query rebates to the Indexer and 5% to Delegators, set the indexingRewardCutto distribute 60% of indexing rewards to the Indexer and 40% to Delegators, and set `thecooldownBlocks` period to 500 blocks. @@ -810,8 +810,8 @@ To set the delegation parameters using Graph Explorer interface, follow these st After being created by an Indexer a healthy allocation goes through two states. -- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. +- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a Subgraph deployment, which allows them to claim indexing rewards and serve queries for that Subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. - **Closed** - An Indexer is free to close an allocation once 1 epoch has passed ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L335)) or their Indexer agent will automatically close the allocation after the **maxAllocationEpochs** (currently 28 days). When an allocation is closed with a valid proof of indexing (POI) their indexing rewards are distributed to the Indexer and its Delegators ([learn more](/indexing/overview/#how-are-indexing-rewards-distributed)). -Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. +Indexers are recommended to utilize offchain syncing functionality to sync Subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for Subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. From b34f97966c676b6096a94e6f4624dea96286aaf6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:08:57 -0500 Subject: [PATCH 0056/1789] New translations overview.mdx (Czech) --- website/src/pages/cs/indexing/overview.mdx | 98 +++++++++++----------- 1 file changed, 49 insertions(+), 49 deletions(-) diff --git a/website/src/pages/cs/indexing/overview.mdx b/website/src/pages/cs/indexing/overview.mdx index 52eda54899f1..8bdd81ecfb9d 100644 --- a/website/src/pages/cs/indexing/overview.mdx +++ b/website/src/pages/cs/indexing/overview.mdx @@ -7,7 +7,7 @@ Indexery jsou operátoři uzlů v síti Graf, kteří sázejí graf tokeny (GRT) GRT, který je v protokolu založen, podléhá období rozmrazování a může být zkrácen, pokud jsou indexátory škodlivé a poskytují aplikacím nesprávná data nebo pokud indexují nesprávně. Indexátoři také získávají odměny za delegované sázky od delegátů, aby přispěli do sítě. -Indexátory vybírají podgrafy k indexování na základě signálu kurátorů podgrafů, přičemž kurátoři sázejí na GRT, aby určili, které podgrafy jsou vysoce kvalitní a měly by být upřednostněny. Spotřebitelé (např. aplikace) mohou také nastavit parametry, podle kterých indexátoři zpracovávají dotazy pro jejich podgrafy, a nastavit preference pro stanovení ceny poplatků za dotazy. +Indexers select Subgraphs to index based on the Subgraph’s curation signal, where Curators stake GRT in order to indicate which Subgraphs are high-quality and should be prioritized. Consumers (eg. applications) can also set parameters for which Indexers process queries for their Subgraphs and set preferences for query fee pricing. ## FAQ @@ -19,17 +19,17 @@ The minimum stake for an Indexer is currently set to 100K GRT. **Query fee rebates** - Payments for serving queries on the network. These payments are mediated via state channels between an Indexer and a gateway. Each query request from a gateway contains a payment and the corresponding response a proof of query result validity. -**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing subgraph deployments for the network. +**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing Subgraph deployments for the network. ### How are indexing rewards distributed? -Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** +Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across Subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that Subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** Numerous tools have been created by the community for calculating rewards; you'll find a collection of them organized in the [Community Guides collection](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). You can also find an up to date list of tools in the #Delegators and #Indexers channels on the [Discord server](https://discord.gg/graphprotocol). Here we link a [recommended allocation optimiser](https://github.com/graphprotocol/allocation-optimizer) integrated with the indexer software stack. ### What is a proof of indexing (POI)? -POIs are used in the network to verify that an Indexer is indexing the subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific subgraph deployment up to and including that block. +POIs are used in the network to verify that an Indexer is indexing the Subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific Subgraph deployment up to and including that block. ### When are indexing rewards distributed? @@ -41,7 +41,7 @@ The RewardsManager contract has a read-only [getRewards](https://github.com/grap Many of the community-made dashboards include pending rewards values and they can be easily checked manually by following these steps: -1. Query the [mainnet subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: +1. Query the [mainnet Subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: ```graphql query indexerAllocations { @@ -91,31 +91,31 @@ The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that - **indexingRewardCut** - the % of indexing rewards that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards when an allocation is closed and the Delegators will split the other 5%. -### How do Indexers know which subgraphs to index? +### How do Indexers know which Subgraphs to index? -Indexers may differentiate themselves by applying advanced techniques for making subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate subgraphs in the network: +Indexers may differentiate themselves by applying advanced techniques for making Subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate Subgraphs in the network: -- **Curation signal** - The proportion of network curation signal applied to a particular subgraph is a good indicator of the interest in that subgraph, especially during the bootstrap phase when query voluming is ramping up. +- **Curation signal** - The proportion of network curation signal applied to a particular Subgraph is a good indicator of the interest in that Subgraph, especially during the bootstrap phase when query voluming is ramping up. -- **Query fees collected** - The historical data for volume of query fees collected for a specific subgraph is a good indicator of future demand. +- **Query fees collected** - The historical data for volume of query fees collected for a specific Subgraph is a good indicator of future demand. -- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific subgraphs can allow an Indexer to monitor the supply side for subgraph queries to identify subgraphs that the network is showing confidence in or subgraphs that may show a need for more supply. +- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific Subgraphs can allow an Indexer to monitor the supply side for Subgraph queries to identify Subgraphs that the network is showing confidence in or Subgraphs that may show a need for more supply. -- **Subgraphs with no indexing rewards** - Some subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a subgraph if it is not generating indexing rewards. +- **Subgraphs with no indexing rewards** - Some Subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a Subgraph if it is not generating indexing rewards. ### What are the hardware requirements? -- **Small** - Enough to get started indexing several subgraphs, will likely need to be expanded. +- **Small** - Enough to get started indexing several Subgraphs, will likely need to be expanded. - **Standard** - Default setup, this is what is used in the example k8s/terraform deployment manifests. -- **Medium** - Production Indexer supporting 100 subgraphs and 200-500 requests per second. -- **Large** - Prepared to index all currently used subgraphs and serve requests for the related traffic. +- **Medium** - Production Indexer supporting 100 Subgraphs and 200-500 requests per second. +- **Large** - Prepared to index all currently used Subgraphs and serve requests for the related traffic. -| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | -| --- | :-: | :-: | :-: | :-: | :-: | -| Small | 4 | 8 | 1 | 4 | 16 | -| Standard | 8 | 30 | 1 | 12 | 48 | -| Medium | 16 | 64 | 2 | 32 | 64 | -| Large | 72 | 468 | 3.5 | 48 | 184 | +| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | +| -------- | :------------------: | :---------------------------: | :-------------------------: | :-------------: | :----------------------: | +| Small | 4 | 8 | 1 | 4 | 16 | +| Standard | 8 | 30 | 1 | 12 | 48 | +| Medium | 16 | 64 | 2 | 32 | 64 | +| Large | 72 | 468 | 3.5 | 48 | 184 | ### What are some basic security precautions an Indexer should take? @@ -125,17 +125,17 @@ Indexers may differentiate themselves by applying advanced techniques for making ## Infrastructure -At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. +At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a Subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. -- **PostgreSQL database** - The main store for the Graph Node, this is where subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. +- **PostgreSQL database** - The main store for the Graph Node, this is where Subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. -- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. +- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain Subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. -- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. +- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during Subgraph deployment to fetch the Subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. - **Indexer service** - Handles all required external communications with the network. Shares cost models and indexing statuses, passes query requests from gateways on to a Graph Node, and manages the query payments via state channels with the gateway. -- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing subgraph deployments to its Graph Node/s, and managing allocations. +- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing Subgraph deployments to its Graph Node/s, and managing allocations. - **Prometheus metrics server** - The Graph Node and Indexer components log their metrics to the metrics server. @@ -147,20 +147,20 @@ Note: To support agile scaling, it is recommended that query and indexing concer #### Uzel Graf -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | -| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | -| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | -| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for Subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for Subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Service -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 7600 | GraphQL HTTP server
(for paid subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | -| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------------------------------------- | ----------------------------------------------------------- | --------------- | ---------------------- | +| 7600 | GraphQL HTTP server
(for paid Subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | +| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Agent @@ -295,7 +295,7 @@ Deploy all resources with `kubectl apply -k $dir`. ### Uzel Graf -[Graph Node](https://github.com/graphprotocol/graph-node) is an open source Rust implementation that event sources the Ethereum blockchain to deterministically update a data store that can be queried via the GraphQL endpoint. Developers use subgraphs to define their schema, and a set of mappings for transforming the data sourced from the blockchain and the Graph Node handles syncing the entire chain, monitoring for new blocks, and serving it via a GraphQL endpoint. +[Graph Node](https://github.com/graphprotocol/graph-node) is an open source Rust implementation that event sources the Ethereum blockchain to deterministically update a data store that can be queried via the GraphQL endpoint. Developers use Subgraphs to define their schema, and a set of mappings for transforming the data sourced from the blockchain and the Graph Node handles syncing the entire chain, monitoring for new blocks, and serving it via a GraphQL endpoint. #### Getting started from source @@ -365,9 +365,9 @@ docker-compose up To successfully participate in the network requires almost constant monitoring and interaction, so we've built a suite of Typescript applications for facilitating an Indexers network participation. There are three Indexer components: -- **Indexer agent** - The agent monitors the network and the Indexer's own infrastructure and manages which subgraph deployments are indexed and allocated towards onchain and how much is allocated towards each. +- **Indexer agent** - The agent monitors the network and the Indexer's own infrastructure and manages which Subgraph deployments are indexed and allocated towards onchain and how much is allocated towards each. -- **Indexer service** - The only component that needs to be exposed externally, the service passes on subgraph queries to the graph node, manages state channels for query payments, shares important decision making information to clients like the gateways. +- **Indexer service** - The only component that needs to be exposed externally, the service passes on Subgraph queries to the graph node, manages state channels for query payments, shares important decision making information to clients like the gateways. - **Indexer CLI** - The command line interface for managing the Indexer agent. It allows Indexers to manage cost models, manual allocations, actions queue, and indexing rules. @@ -525,7 +525,7 @@ graph indexer status #### Indexer management using Indexer CLI -The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. +The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking Subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. #### Usage @@ -537,7 +537,7 @@ The **Indexer CLI** connects to the Indexer agent, typically through port-forwar - `graph indexer rules set [options] ...` - Set one or more indexing rules. -- `graph indexer rules start [options] ` - Start indexing a subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available subgraphs on the network will be indexed. +- `graph indexer rules start [options] ` - Start indexing a Subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available Subgraphs on the network will be indexed. - `graph indexer rules stop [options] ` - Stop indexing a deployment and set its `decisionBasis` to never, so it will skip this deployment when deciding on deployments to index. @@ -561,9 +561,9 @@ All commands which display rules in the output can choose between the supported #### Indexing rules -Indexing rules can either be applied as global defaults or for specific subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. +Indexing rules can either be applied as global defaults or for specific Subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the Subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. -For example, if the global rule has a `minStake` of **5** (GRT), any subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. +For example, if the global rule has a `minStake` of **5** (GRT), any Subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. Data model: @@ -679,7 +679,7 @@ graph indexer actions execute approve Note that supported action types for allocation management have different input requirements: -- `Allocate` - allocate stake to a specific subgraph deployment +- `Allocate` - allocate stake to a specific Subgraph deployment - required action params: - deploymentID @@ -694,7 +694,7 @@ Note that supported action types for allocation management have different input - poi - force (forces using the provided POI even if it doesn’t match what the graph-node provides) -- `Reallocate` - atomically close allocation and open a fresh allocation for the same subgraph deployment +- `Reallocate` - atomically close allocation and open a fresh allocation for the same Subgraph deployment - required action params: - allocationID @@ -706,7 +706,7 @@ Note that supported action types for allocation management have different input #### Cost models -Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. +Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each Subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. #### Agora @@ -782,7 +782,7 @@ Once an Indexer has staked GRT in the protocol, the [Indexer components](/indexi 6. Call `stake()` to stake GRT in the protocol. -7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. +7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on Subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. 8. (Optional) In order to control the distribution of rewards and strategically attract Delegators Indexers can update their delegation parameters by updating their indexingRewardCut (parts per million), queryFeeCut (parts per million), and cooldownBlocks (number of blocks). To do so call `setDelegationParameters()`. The following example sets the queryFeeCut to distribute 95% of query rebates to the Indexer and 5% to Delegators, set the indexingRewardCutto distribute 60% of indexing rewards to the Indexer and 40% to Delegators, and set `thecooldownBlocks` period to 500 blocks. @@ -810,8 +810,8 @@ To set the delegation parameters using Graph Explorer interface, follow these st After being created by an Indexer a healthy allocation goes through two states. -- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. +- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a Subgraph deployment, which allows them to claim indexing rewards and serve queries for that Subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. - **Closed** - An Indexer is free to close an allocation once 1 epoch has passed ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L335)) or their Indexer agent will automatically close the allocation after the **maxAllocationEpochs** (currently 28 days). When an allocation is closed with a valid proof of indexing (POI) their indexing rewards are distributed to the Indexer and its Delegators ([learn more](/indexing/overview/#how-are-indexing-rewards-distributed)). -Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. +Indexers are recommended to utilize offchain syncing functionality to sync Subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for Subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. From 0d64c7f85a647fc5c3b53f5a77a5bacd9dd20ac7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:08:58 -0500 Subject: [PATCH 0057/1789] New translations overview.mdx (German) --- website/src/pages/de/indexing/overview.mdx | 98 +++++++++++----------- 1 file changed, 49 insertions(+), 49 deletions(-) diff --git a/website/src/pages/de/indexing/overview.mdx b/website/src/pages/de/indexing/overview.mdx index 05530cbff93a..78469db0d86f 100644 --- a/website/src/pages/de/indexing/overview.mdx +++ b/website/src/pages/de/indexing/overview.mdx @@ -7,7 +7,7 @@ Indexer sind Knotenbetreiber im Graph Network, die Graph Tokens (GRT) einsetzen, GRT that is staked in the protocol is subject to a thawing period and can be slashed if Indexers are malicious and serve incorrect data to applications or if they index incorrectly. Indexers also earn rewards for delegated stake from Delegators, to contribute to the network. -Indexers select subgraphs to index based on the subgraph’s curation signal, where Curators stake GRT in order to indicate which subgraphs are high-quality and should be prioritized. Consumers (eg. applications) can also set parameters for which Indexers process queries for their subgraphs and set preferences for query fee pricing. +Indexers select Subgraphs to index based on the Subgraph’s curation signal, where Curators stake GRT in order to indicate which Subgraphs are high-quality and should be prioritized. Consumers (eg. applications) can also set parameters for which Indexers process queries for their Subgraphs and set preferences for query fee pricing. ## FAQ @@ -19,17 +19,17 @@ The minimum stake for an Indexer is currently set to 100K GRT. **Query fee rebates** - Payments for serving queries on the network. These payments are mediated via state channels between an Indexer and a gateway. Each query request from a gateway contains a payment and the corresponding response a proof of query result validity. -**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing subgraph deployments for the network. +**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing Subgraph deployments for the network. ### How are indexing rewards distributed? -Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** +Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across Subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that Subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** Numerous tools have been created by the community for calculating rewards; you'll find a collection of them organized in the [Community Guides collection](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). You can also find an up to date list of tools in the #Delegators and #Indexers channels on the [Discord server](https://discord.gg/graphprotocol). Here we link a [recommended allocation optimiser](https://github.com/graphprotocol/allocation-optimizer) integrated with the indexer software stack. ### What is a proof of indexing (POI)? -POIs are used in the network to verify that an Indexer is indexing the subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific subgraph deployment up to and including that block. +POIs are used in the network to verify that an Indexer is indexing the Subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific Subgraph deployment up to and including that block. ### When are indexing rewards distributed? @@ -41,7 +41,7 @@ The RewardsManager contract has a read-only [getRewards](https://github.com/grap Many of the community-made dashboards include pending rewards values and they can be easily checked manually by following these steps: -1. Query the [mainnet subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: +1. Query the [mainnet Subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: ```graphql query indexerAllocations { @@ -91,31 +91,31 @@ The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that - **indexingRewardCut** - the % of indexing rewards that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards when an allocation is closed and the Delegators will split the other 5%. -### How do Indexers know which subgraphs to index? +### How do Indexers know which Subgraphs to index? -Indexers may differentiate themselves by applying advanced techniques for making subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate subgraphs in the network: +Indexers may differentiate themselves by applying advanced techniques for making Subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate Subgraphs in the network: -- **Curation signal** - The proportion of network curation signal applied to a particular subgraph is a good indicator of the interest in that subgraph, especially during the bootstrap phase when query voluming is ramping up. +- **Curation signal** - The proportion of network curation signal applied to a particular Subgraph is a good indicator of the interest in that Subgraph, especially during the bootstrap phase when query voluming is ramping up. -- **Query fees collected** - The historical data for volume of query fees collected for a specific subgraph is a good indicator of future demand. +- **Query fees collected** - The historical data for volume of query fees collected for a specific Subgraph is a good indicator of future demand. -- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific subgraphs can allow an Indexer to monitor the supply side for subgraph queries to identify subgraphs that the network is showing confidence in or subgraphs that may show a need for more supply. +- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific Subgraphs can allow an Indexer to monitor the supply side for Subgraph queries to identify Subgraphs that the network is showing confidence in or Subgraphs that may show a need for more supply. -- **Subgraphs with no indexing rewards** - Some subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a subgraph if it is not generating indexing rewards. +- **Subgraphs with no indexing rewards** - Some Subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a Subgraph if it is not generating indexing rewards. ### What are the hardware requirements? -- **Small** - Enough to get started indexing several subgraphs, will likely need to be expanded. +- **Small** - Enough to get started indexing several Subgraphs, will likely need to be expanded. - **Standard** - Default setup, this is what is used in the example k8s/terraform deployment manifests. -- **Medium** - Production Indexer supporting 100 subgraphs and 200-500 requests per second. -- **Large** - Prepared to index all currently used subgraphs and serve requests for the related traffic. +- **Medium** - Production Indexer supporting 100 Subgraphs and 200-500 requests per second. +- **Large** - Prepared to index all currently used Subgraphs and serve requests for the related traffic. -| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | -| --- | :-: | :-: | :-: | :-: | :-: | -| Small | 4 | 8 | 1 | 4 | 16 | -| Standard | 8 | 30 | 1 | 12 | 48 | -| Medium | 16 | 64 | 2 | 32 | 64 | -| Large | 72 | 468 | 3.5 | 48 | 184 | +| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | +| -------- | :------------------: | :---------------------------: | :-------------------------: | :-------------: | :----------------------: | +| Small | 4 | 8 | 1 | 4 | 16 | +| Standard | 8 | 30 | 1 | 12 | 48 | +| Medium | 16 | 64 | 2 | 32 | 64 | +| Large | 72 | 468 | 3.5 | 48 | 184 | ### What are some basic security precautions an Indexer should take? @@ -125,17 +125,17 @@ Indexers may differentiate themselves by applying advanced techniques for making ## Infrastructure -At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. +At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a Subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. -- **PostgreSQL database** - The main store for the Graph Node, this is where subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. +- **PostgreSQL database** - The main store for the Graph Node, this is where Subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. -- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. +- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain Subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. -- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. +- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during Subgraph deployment to fetch the Subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. - **Indexer service** - Handles all required external communications with the network. Shares cost models and indexing statuses, passes query requests from gateways on to a Graph Node, and manages the query payments via state channels with the gateway. -- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing subgraph deployments to its Graph Node/s, and managing allocations. +- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing Subgraph deployments to its Graph Node/s, and managing allocations. - **Prometheus metrics server** - The Graph Node and Indexer components log their metrics to the metrics server. @@ -147,20 +147,20 @@ Note: To support agile scaling, it is recommended that query and indexing concer #### Graph Node -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | -| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | -| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | -| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for Subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for Subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Service -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 7600 | GraphQL HTTP server
(for paid subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | -| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------------------------------------- | ----------------------------------------------------------- | --------------- | ---------------------- | +| 7600 | GraphQL HTTP server
(for paid Subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | +| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Agent @@ -295,7 +295,7 @@ Deploy all resources with `kubectl apply -k $dir`. ### Graph Node -[Graph Node](https://github.com/graphprotocol/graph-node) is an open source Rust implementation that event sources the Ethereum blockchain to deterministically update a data store that can be queried via the GraphQL endpoint. Developers use subgraphs to define their schema, and a set of mappings for transforming the data sourced from the blockchain and the Graph Node handles syncing the entire chain, monitoring for new blocks, and serving it via a GraphQL endpoint. +[Graph Node](https://github.com/graphprotocol/graph-node) is an open source Rust implementation that event sources the Ethereum blockchain to deterministically update a data store that can be queried via the GraphQL endpoint. Developers use Subgraphs to define their schema, and a set of mappings for transforming the data sourced from the blockchain and the Graph Node handles syncing the entire chain, monitoring for new blocks, and serving it via a GraphQL endpoint. #### Getting started from source @@ -365,9 +365,9 @@ docker-compose up To successfully participate in the network requires almost constant monitoring and interaction, so we've built a suite of Typescript applications for facilitating an Indexers network participation. There are three Indexer components: -- **Indexer agent** - The agent monitors the network and the Indexer's own infrastructure and manages which subgraph deployments are indexed and allocated towards onchain and how much is allocated towards each. +- **Indexer agent** - The agent monitors the network and the Indexer's own infrastructure and manages which Subgraph deployments are indexed and allocated towards onchain and how much is allocated towards each. -- **Indexer service** - The only component that needs to be exposed externally, the service passes on subgraph queries to the graph node, manages state channels for query payments, shares important decision making information to clients like the gateways. +- **Indexer service** - The only component that needs to be exposed externally, the service passes on Subgraph queries to the graph node, manages state channels for query payments, shares important decision making information to clients like the gateways. - **Indexer CLI** - The command line interface for managing the Indexer agent. It allows Indexers to manage cost models, manual allocations, actions queue, and indexing rules. @@ -525,7 +525,7 @@ graph indexer status #### Indexer management using Indexer CLI -The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. +The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking Subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. #### Usage @@ -537,7 +537,7 @@ The **Indexer CLI** connects to the Indexer agent, typically through port-forwar - `graph indexer rules set [options] ...` - Set one or more indexing rules. -- `graph indexer rules start [options] ` - Start indexing a subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available subgraphs on the network will be indexed. +- `graph indexer rules start [options] ` - Start indexing a Subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available Subgraphs on the network will be indexed. - `graph indexer rules stop [options] ` - Stop indexing a deployment and set its `decisionBasis` to never, so it will skip this deployment when deciding on deployments to index. @@ -561,9 +561,9 @@ All commands which display rules in the output can choose between the supported #### Indexing rules -Indexing rules can either be applied as global defaults or for specific subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. +Indexing rules can either be applied as global defaults or for specific Subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the Subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. -For example, if the global rule has a `minStake` of **5** (GRT), any subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. +For example, if the global rule has a `minStake` of **5** (GRT), any Subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. Data model: @@ -679,7 +679,7 @@ graph indexer actions execute approve Note that supported action types for allocation management have different input requirements: -- `Allocate` - allocate stake to a specific subgraph deployment +- `Allocate` - allocate stake to a specific Subgraph deployment - required action params: - deploymentID @@ -694,7 +694,7 @@ Note that supported action types for allocation management have different input - poi - force (forces using the provided POI even if it doesn’t match what the graph-node provides) -- `Reallocate` - atomically close allocation and open a fresh allocation for the same subgraph deployment +- `Reallocate` - atomically close allocation and open a fresh allocation for the same Subgraph deployment - required action params: - allocationID @@ -706,7 +706,7 @@ Note that supported action types for allocation management have different input #### Cost models -Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. +Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each Subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. #### Agora @@ -782,7 +782,7 @@ Once an Indexer has staked GRT in the protocol, the [Indexer components](/indexi 6. Call `stake()` to stake GRT in the protocol. -7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. +7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on Subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. 8. (Optional) In order to control the distribution of rewards and strategically attract Delegators Indexers can update their delegation parameters by updating their indexingRewardCut (parts per million), queryFeeCut (parts per million), and cooldownBlocks (number of blocks). To do so call `setDelegationParameters()`. The following example sets the queryFeeCut to distribute 95% of query rebates to the Indexer and 5% to Delegators, set the indexingRewardCutto distribute 60% of indexing rewards to the Indexer and 40% to Delegators, and set `thecooldownBlocks` period to 500 blocks. @@ -810,8 +810,8 @@ To set the delegation parameters using Graph Explorer interface, follow these st After being created by an Indexer a healthy allocation goes through two states. -- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. +- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a Subgraph deployment, which allows them to claim indexing rewards and serve queries for that Subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. - **Closed** - An Indexer is free to close an allocation once 1 epoch has passed ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L335)) or their Indexer agent will automatically close the allocation after the **maxAllocationEpochs** (currently 28 days). When an allocation is closed with a valid proof of indexing (POI) their indexing rewards are distributed to the Indexer and its Delegators ([learn more](/indexing/overview/#how-are-indexing-rewards-distributed)). -Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. +Indexers are recommended to utilize offchain syncing functionality to sync Subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for Subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. From 1445215980490fcb5a1ef08b45824be0f8492399 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:08:59 -0500 Subject: [PATCH 0058/1789] New translations overview.mdx (Italian) --- website/src/pages/it/indexing/overview.mdx | 98 +++++++++++----------- 1 file changed, 49 insertions(+), 49 deletions(-) diff --git a/website/src/pages/it/indexing/overview.mdx b/website/src/pages/it/indexing/overview.mdx index 7a4a5525e2d0..5416dae65dfe 100644 --- a/website/src/pages/it/indexing/overview.mdx +++ b/website/src/pages/it/indexing/overview.mdx @@ -7,7 +7,7 @@ Gli Indexer sono operatori di nodi di The Graph Network che fanno staking di Gra Il GRT che viene fatto staking nel protocollo è soggetto a un periodo di scongelamento e può essere ridotto se gli Indexer sono malintenzionati e servono dati errati alle applicazioni o se indicizzano in modo errato. Gli Indexer guadagnano anche ricompense per le stake delegate dai Delegator, per contribuire alla rete. -Gli Indexer selezionano i subgraph da indicizzare in base al segnale di curation del subgraph, dove i Curator fanno staking di GRT per indicare quali subgraph sono di alta qualità e dovrebbero essere prioritari. I consumatori (ad esempio, le applicazioni) possono anche impostare i parametri per cui gli Indexer elaborano le query per i loro subgraph e stabilire le preferenze per le tariffe di query. +Indexers select Subgraphs to index based on the Subgraph’s curation signal, where Curators stake GRT in order to indicate which Subgraphs are high-quality and should be prioritized. Consumers (eg. applications) can also set parameters for which Indexers process queries for their Subgraphs and set preferences for query fee pricing. ## FAQ @@ -19,17 +19,17 @@ The minimum stake for an Indexer is currently set to 100K GRT. **Query fee rebates** - Payments for serving queries on the network. These payments are mediated via state channels between an Indexer and a gateway. Each query request from a gateway contains a payment and the corresponding response a proof of query result validity. -**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing subgraph deployments for the network. +**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing Subgraph deployments for the network. ### How are indexing rewards distributed? -Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** +Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across Subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that Subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** Numerous tools have been created by the community for calculating rewards; you'll find a collection of them organized in the [Community Guides collection](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). You can also find an up to date list of tools in the #Delegators and #Indexers channels on the [Discord server](https://discord.gg/graphprotocol). Here we link a [recommended allocation optimiser](https://github.com/graphprotocol/allocation-optimizer) integrated with the indexer software stack. ### What is a proof of indexing (POI)? -POIs are used in the network to verify that an Indexer is indexing the subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific subgraph deployment up to and including that block. +POIs are used in the network to verify that an Indexer is indexing the Subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific Subgraph deployment up to and including that block. ### When are indexing rewards distributed? @@ -41,7 +41,7 @@ The RewardsManager contract has a read-only [getRewards](https://github.com/grap Many of the community-made dashboards include pending rewards values and they can be easily checked manually by following these steps: -1. Query the [mainnet subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: +1. Query the [mainnet Subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: ```graphql query indexerAllocations { @@ -91,31 +91,31 @@ The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that - **indexingRewardCut** - the % of indexing rewards that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards when an allocation is closed and the Delegators will split the other 5%. -### How do Indexers know which subgraphs to index? +### How do Indexers know which Subgraphs to index? -Indexers may differentiate themselves by applying advanced techniques for making subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate subgraphs in the network: +Indexers may differentiate themselves by applying advanced techniques for making Subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate Subgraphs in the network: -- **Curation signal** - The proportion of network curation signal applied to a particular subgraph is a good indicator of the interest in that subgraph, especially during the bootstrap phase when query voluming is ramping up. +- **Curation signal** - The proportion of network curation signal applied to a particular Subgraph is a good indicator of the interest in that Subgraph, especially during the bootstrap phase when query voluming is ramping up. -- **Query fees collected** - The historical data for volume of query fees collected for a specific subgraph is a good indicator of future demand. +- **Query fees collected** - The historical data for volume of query fees collected for a specific Subgraph is a good indicator of future demand. -- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific subgraphs can allow an Indexer to monitor the supply side for subgraph queries to identify subgraphs that the network is showing confidence in or subgraphs that may show a need for more supply. +- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific Subgraphs can allow an Indexer to monitor the supply side for Subgraph queries to identify Subgraphs that the network is showing confidence in or Subgraphs that may show a need for more supply. -- **Subgraphs with no indexing rewards** - Some subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a subgraph if it is not generating indexing rewards. +- **Subgraphs with no indexing rewards** - Some Subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a Subgraph if it is not generating indexing rewards. ### What are the hardware requirements? -- **Small** - Enough to get started indexing several subgraphs, will likely need to be expanded. +- **Small** - Enough to get started indexing several Subgraphs, will likely need to be expanded. - **Standard** - Default setup, this is what is used in the example k8s/terraform deployment manifests. -- **Medium** - Production Indexer supporting 100 subgraphs and 200-500 requests per second. -- **Large** - Prepared to index all currently used subgraphs and serve requests for the related traffic. +- **Medium** - Production Indexer supporting 100 Subgraphs and 200-500 requests per second. +- **Large** - Prepared to index all currently used Subgraphs and serve requests for the related traffic. -| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | -| --- | :-: | :-: | :-: | :-: | :-: | -| Small | 4 | 8 | 1 | 4 | 16 | -| Standard | 8 | 30 | 1 | 12 | 48 | -| Medium | 16 | 64 | 2 | 32 | 64 | -| Large | 72 | 468 | 3.5 | 48 | 184 | +| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | +| -------- | :------------------: | :---------------------------: | :-------------------------: | :-------------: | :----------------------: | +| Small | 4 | 8 | 1 | 4 | 16 | +| Standard | 8 | 30 | 1 | 12 | 48 | +| Medium | 16 | 64 | 2 | 32 | 64 | +| Large | 72 | 468 | 3.5 | 48 | 184 | ### What are some basic security precautions an Indexer should take? @@ -125,17 +125,17 @@ Indexers may differentiate themselves by applying advanced techniques for making ## Infrastructure -At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. +At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a Subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. -- **PostgreSQL database** - The main store for the Graph Node, this is where subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. +- **PostgreSQL database** - The main store for the Graph Node, this is where Subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. -- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. +- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain Subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. -- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. +- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during Subgraph deployment to fetch the Subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. - **Indexer service** - Handles all required external communications with the network. Shares cost models and indexing statuses, passes query requests from gateways on to a Graph Node, and manages the query payments via state channels with the gateway. -- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing subgraph deployments to its Graph Node/s, and managing allocations. +- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing Subgraph deployments to its Graph Node/s, and managing allocations. - **Prometheus metrics server** - The Graph Node and Indexer components log their metrics to the metrics server. @@ -147,20 +147,20 @@ Note: To support agile scaling, it is recommended that query and indexing concer #### Graph Node -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | -| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | -| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | -| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for Subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for Subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Service -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 7600 | GraphQL HTTP server
(for paid subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | -| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------------------------------------- | ----------------------------------------------------------- | --------------- | ---------------------- | +| 7600 | GraphQL HTTP server
(for paid Subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | +| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Agent @@ -295,7 +295,7 @@ Deploy all resources with `kubectl apply -k $dir`. ### Graph Node -[Graph Node](https://github.com/graphprotocol/graph-node) is an open source Rust implementation that event sources the Ethereum blockchain to deterministically update a data store that can be queried via the GraphQL endpoint. Developers use subgraphs to define their schema, and a set of mappings for transforming the data sourced from the blockchain and the Graph Node handles syncing the entire chain, monitoring for new blocks, and serving it via a GraphQL endpoint. +[Graph Node](https://github.com/graphprotocol/graph-node) is an open source Rust implementation that event sources the Ethereum blockchain to deterministically update a data store that can be queried via the GraphQL endpoint. Developers use Subgraphs to define their schema, and a set of mappings for transforming the data sourced from the blockchain and the Graph Node handles syncing the entire chain, monitoring for new blocks, and serving it via a GraphQL endpoint. #### Getting started from source @@ -365,9 +365,9 @@ docker-compose up To successfully participate in the network requires almost constant monitoring and interaction, so we've built a suite of Typescript applications for facilitating an Indexers network participation. There are three Indexer components: -- **Indexer agent** - The agent monitors the network and the Indexer's own infrastructure and manages which subgraph deployments are indexed and allocated towards onchain and how much is allocated towards each. +- **Indexer agent** - The agent monitors the network and the Indexer's own infrastructure and manages which Subgraph deployments are indexed and allocated towards onchain and how much is allocated towards each. -- **Indexer service** - The only component that needs to be exposed externally, the service passes on subgraph queries to the graph node, manages state channels for query payments, shares important decision making information to clients like the gateways. +- **Indexer service** - The only component that needs to be exposed externally, the service passes on Subgraph queries to the graph node, manages state channels for query payments, shares important decision making information to clients like the gateways. - **Indexer CLI** - The command line interface for managing the Indexer agent. It allows Indexers to manage cost models, manual allocations, actions queue, and indexing rules. @@ -525,7 +525,7 @@ graph indexer status #### Indexer management using Indexer CLI -The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. +The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking Subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. #### Usage @@ -537,7 +537,7 @@ The **Indexer CLI** connects to the Indexer agent, typically through port-forwar - `graph indexer rules set [options] ...` - Set one or more indexing rules. -- `graph indexer rules start [options] ` - Start indexing a subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available subgraphs on the network will be indexed. +- `graph indexer rules start [options] ` - Start indexing a Subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available Subgraphs on the network will be indexed. - `graph indexer rules stop [options] ` - Stop indexing a deployment and set its `decisionBasis` to never, so it will skip this deployment when deciding on deployments to index. @@ -561,9 +561,9 @@ All commands which display rules in the output can choose between the supported #### Indexing rules -Indexing rules can either be applied as global defaults or for specific subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. +Indexing rules can either be applied as global defaults or for specific Subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the Subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. -For example, if the global rule has a `minStake` of **5** (GRT), any subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. +For example, if the global rule has a `minStake` of **5** (GRT), any Subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. Data model: @@ -679,7 +679,7 @@ graph indexer actions execute approve Note that supported action types for allocation management have different input requirements: -- `Allocate` - allocate stake to a specific subgraph deployment +- `Allocate` - allocate stake to a specific Subgraph deployment - required action params: - deploymentID @@ -694,7 +694,7 @@ Note that supported action types for allocation management have different input - poi - force (forces using the provided POI even if it doesn’t match what the graph-node provides) -- `Reallocate` - atomically close allocation and open a fresh allocation for the same subgraph deployment +- `Reallocate` - atomically close allocation and open a fresh allocation for the same Subgraph deployment - required action params: - allocationID @@ -706,7 +706,7 @@ Note that supported action types for allocation management have different input #### Cost models -Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. +Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each Subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. #### Agora @@ -782,7 +782,7 @@ Once an Indexer has staked GRT in the protocol, the [Indexer components](/indexi 6. Call `stake()` to stake GRT in the protocol. -7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. +7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on Subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. 8. (Optional) In order to control the distribution of rewards and strategically attract Delegators Indexers can update their delegation parameters by updating their indexingRewardCut (parts per million), queryFeeCut (parts per million), and cooldownBlocks (number of blocks). To do so call `setDelegationParameters()`. The following example sets the queryFeeCut to distribute 95% of query rebates to the Indexer and 5% to Delegators, set the indexingRewardCutto distribute 60% of indexing rewards to the Indexer and 40% to Delegators, and set `thecooldownBlocks` period to 500 blocks. @@ -810,8 +810,8 @@ To set the delegation parameters using Graph Explorer interface, follow these st After being created by an Indexer a healthy allocation goes through two states. -- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. +- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a Subgraph deployment, which allows them to claim indexing rewards and serve queries for that Subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. - **Closed** - An Indexer is free to close an allocation once 1 epoch has passed ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L335)) or their Indexer agent will automatically close the allocation after the **maxAllocationEpochs** (currently 28 days). When an allocation is closed with a valid proof of indexing (POI) their indexing rewards are distributed to the Indexer and its Delegators ([learn more](/indexing/overview/#how-are-indexing-rewards-distributed)). -Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. +Indexers are recommended to utilize offchain syncing functionality to sync Subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for Subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. From 12ee7912dd2f4ef5183c7447032919c046493628 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:09:00 -0500 Subject: [PATCH 0059/1789] New translations overview.mdx (Japanese) --- website/src/pages/ja/indexing/overview.mdx | 98 +++++++++++----------- 1 file changed, 49 insertions(+), 49 deletions(-) diff --git a/website/src/pages/ja/indexing/overview.mdx b/website/src/pages/ja/indexing/overview.mdx index f952fafb882b..3c456e33b782 100644 --- a/website/src/pages/ja/indexing/overview.mdx +++ b/website/src/pages/ja/indexing/overview.mdx @@ -7,7 +7,7 @@ sidebarTitle: 概要 プロトコルにステークされた GRT は解凍期間が設けられており、インデクサーが悪意を持ってアプリケーションに不正なデータを提供したり、不正なインデックスを作成した場合には、スラッシュされる可能性があります。 また、インデクサーはデリゲーターからステークによる委任を受けて、ネットワークに貢献することができます。 -インデクサ − は、サブグラフのキュレーション・シグナルに基づいてインデックスを作成するサブグラフを選択し、キュレーターは、どのサブグラフが高品質で優先されるべきかを示すために GRT をステークします。 消費者(アプリケーションなど)は、インデクサーが自分のサブグラフに対するクエリを処理するパラメータを設定したり、クエリフィーの設定を行うこともできます。 +Indexers select Subgraphs to index based on the Subgraph’s curation signal, where Curators stake GRT in order to indicate which Subgraphs are high-quality and should be prioritized. Consumers (eg. applications) can also set parameters for which Indexers process queries for their Subgraphs and set preferences for query fee pricing. ## FAQ @@ -19,17 +19,17 @@ The minimum stake for an Indexer is currently set to 100K GRT. **Query fee rebates** - Payments for serving queries on the network. These payments are mediated via state channels between an Indexer and a gateway. Each query request from a gateway contains a payment and the corresponding response a proof of query result validity. -**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing subgraph deployments for the network. +**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing Subgraph deployments for the network. ### How are indexing rewards distributed? -Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** +Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across Subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that Subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** Numerous tools have been created by the community for calculating rewards; you'll find a collection of them organized in the [Community Guides collection](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). You can also find an up to date list of tools in the #Delegators and #Indexers channels on the [Discord server](https://discord.gg/graphprotocol). Here we link a [recommended allocation optimiser](https://github.com/graphprotocol/allocation-optimizer) integrated with the indexer software stack. ### What is a proof of indexing (POI)? -POIs are used in the network to verify that an Indexer is indexing the subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific subgraph deployment up to and including that block. +POIs are used in the network to verify that an Indexer is indexing the Subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific Subgraph deployment up to and including that block. ### When are indexing rewards distributed? @@ -41,7 +41,7 @@ The RewardsManager contract has a read-only [getRewards](https://github.com/grap Many of the community-made dashboards include pending rewards values and they can be easily checked manually by following these steps: -1. Query the [mainnet subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: +1. Query the [mainnet Subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: ```graphql query indexerAllocations { @@ -91,31 +91,31 @@ The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that - **indexingRewardCut** - the % of indexing rewards that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards when an allocation is closed and the Delegators will split the other 5%. -### How do Indexers know which subgraphs to index? +### How do Indexers know which Subgraphs to index? -Indexers may differentiate themselves by applying advanced techniques for making subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate subgraphs in the network: +Indexers may differentiate themselves by applying advanced techniques for making Subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate Subgraphs in the network: -- **Curation signal** - The proportion of network curation signal applied to a particular subgraph is a good indicator of the interest in that subgraph, especially during the bootstrap phase when query voluming is ramping up. +- **Curation signal** - The proportion of network curation signal applied to a particular Subgraph is a good indicator of the interest in that Subgraph, especially during the bootstrap phase when query voluming is ramping up. -- **Query fees collected** - The historical data for volume of query fees collected for a specific subgraph is a good indicator of future demand. +- **Query fees collected** - The historical data for volume of query fees collected for a specific Subgraph is a good indicator of future demand. -- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific subgraphs can allow an Indexer to monitor the supply side for subgraph queries to identify subgraphs that the network is showing confidence in or subgraphs that may show a need for more supply. +- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific Subgraphs can allow an Indexer to monitor the supply side for Subgraph queries to identify Subgraphs that the network is showing confidence in or Subgraphs that may show a need for more supply. -- **Subgraphs with no indexing rewards** - Some subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a subgraph if it is not generating indexing rewards. +- **Subgraphs with no indexing rewards** - Some Subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a Subgraph if it is not generating indexing rewards. ### What are the hardware requirements? -- **Small** - Enough to get started indexing several subgraphs, will likely need to be expanded. +- **Small** - Enough to get started indexing several Subgraphs, will likely need to be expanded. - **Standard** - Default setup, this is what is used in the example k8s/terraform deployment manifests. -- **Medium** - Production Indexer supporting 100 subgraphs and 200-500 requests per second. -- **Large** - Prepared to index all currently used subgraphs and serve requests for the related traffic. +- **Medium** - Production Indexer supporting 100 Subgraphs and 200-500 requests per second. +- **Large** - Prepared to index all currently used Subgraphs and serve requests for the related traffic. -| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | -| --- | :-: | :-: | :-: | :-: | :-: | -| Small | 4 | 8 | 1 | 4 | 16 | -| Standard | 8 | 30 | 1 | 12 | 48 | -| Medium | 16 | 64 | 2 | 32 | 64 | -| Large | 72 | 468 | 3.5 | 48 | 184 | +| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | +| -------- | :------------------: | :---------------------------: | :-------------------------: | :-------------: | :----------------------: | +| Small | 4 | 8 | 1 | 4 | 16 | +| Standard | 8 | 30 | 1 | 12 | 48 | +| Medium | 16 | 64 | 2 | 32 | 64 | +| Large | 72 | 468 | 3.5 | 48 | 184 | ### What are some basic security precautions an Indexer should take? @@ -125,17 +125,17 @@ Indexers may differentiate themselves by applying advanced techniques for making ## Infrastructure -At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. +At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a Subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. -- **PostgreSQL database** - The main store for the Graph Node, this is where subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. +- **PostgreSQL database** - The main store for the Graph Node, this is where Subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. -- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. +- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain Subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. -- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. +- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during Subgraph deployment to fetch the Subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. - **Indexer service** - Handles all required external communications with the network. Shares cost models and indexing statuses, passes query requests from gateways on to a Graph Node, and manages the query payments via state channels with the gateway. -- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing subgraph deployments to its Graph Node/s, and managing allocations. +- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing Subgraph deployments to its Graph Node/s, and managing allocations. - **Prometheus metrics server** - The Graph Node and Indexer components log their metrics to the metrics server. @@ -147,20 +147,20 @@ Note: To support agile scaling, it is recommended that query and indexing concer #### グラフノード -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | -| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | -| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | -| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for Subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for Subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Service -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 7600 | GraphQL HTTP server
(for paid subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | -| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------------------------------------- | ----------------------------------------------------------- | --------------- | ---------------------- | +| 7600 | GraphQL HTTP server
(for paid Subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | +| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Agent @@ -295,7 +295,7 @@ Deploy all resources with `kubectl apply -k $dir`. ### グラフノード -[Graph Node](https://github.com/graphprotocol/graph-node) is an open source Rust implementation that event sources the Ethereum blockchain to deterministically update a data store that can be queried via the GraphQL endpoint. Developers use subgraphs to define their schema, and a set of mappings for transforming the data sourced from the blockchain and the Graph Node handles syncing the entire chain, monitoring for new blocks, and serving it via a GraphQL endpoint. +[Graph Node](https://github.com/graphprotocol/graph-node) is an open source Rust implementation that event sources the Ethereum blockchain to deterministically update a data store that can be queried via the GraphQL endpoint. Developers use Subgraphs to define their schema, and a set of mappings for transforming the data sourced from the blockchain and the Graph Node handles syncing the entire chain, monitoring for new blocks, and serving it via a GraphQL endpoint. #### Getting started from source @@ -365,9 +365,9 @@ docker-compose up To successfully participate in the network requires almost constant monitoring and interaction, so we've built a suite of Typescript applications for facilitating an Indexers network participation. There are three Indexer components: -- **Indexer agent** - The agent monitors the network and the Indexer's own infrastructure and manages which subgraph deployments are indexed and allocated towards onchain and how much is allocated towards each. +- **Indexer agent** - The agent monitors the network and the Indexer's own infrastructure and manages which Subgraph deployments are indexed and allocated towards onchain and how much is allocated towards each. -- **Indexer service** - The only component that needs to be exposed externally, the service passes on subgraph queries to the graph node, manages state channels for query payments, shares important decision making information to clients like the gateways. +- **Indexer service** - The only component that needs to be exposed externally, the service passes on Subgraph queries to the graph node, manages state channels for query payments, shares important decision making information to clients like the gateways. - **Indexer CLI** - The command line interface for managing the Indexer agent. It allows Indexers to manage cost models, manual allocations, actions queue, and indexing rules. @@ -525,7 +525,7 @@ graph indexer status #### Indexer management using Indexer CLI -The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. +The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking Subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. #### Usage @@ -537,7 +537,7 @@ The **Indexer CLI** connects to the Indexer agent, typically through port-forwar - `graph indexer rules set [options] ...` - Set one or more indexing rules. -- `graph indexer rules start [options] ` - Start indexing a subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available subgraphs on the network will be indexed. +- `graph indexer rules start [options] ` - Start indexing a Subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available Subgraphs on the network will be indexed. - `graph indexer rules stop [options] ` - Stop indexing a deployment and set its `decisionBasis` to never, so it will skip this deployment when deciding on deployments to index. @@ -561,9 +561,9 @@ All commands which display rules in the output can choose between the supported #### Indexing rules -Indexing rules can either be applied as global defaults or for specific subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. +Indexing rules can either be applied as global defaults or for specific Subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the Subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. -For example, if the global rule has a `minStake` of **5** (GRT), any subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. +For example, if the global rule has a `minStake` of **5** (GRT), any Subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. Data model: @@ -679,7 +679,7 @@ graph indexer actions execute approve Note that supported action types for allocation management have different input requirements: -- `Allocate` - allocate stake to a specific subgraph deployment +- `Allocate` - allocate stake to a specific Subgraph deployment - required action params: - deploymentID @@ -694,7 +694,7 @@ Note that supported action types for allocation management have different input - poi - force (forces using the provided POI even if it doesn’t match what the graph-node provides) -- `Reallocate` - atomically close allocation and open a fresh allocation for the same subgraph deployment +- `Reallocate` - atomically close allocation and open a fresh allocation for the same Subgraph deployment - required action params: - allocationID @@ -706,7 +706,7 @@ Note that supported action types for allocation management have different input #### Cost models -Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. +Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each Subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. #### Agora @@ -782,7 +782,7 @@ Once an Indexer has staked GRT in the protocol, the [Indexer components](/indexi 6. Call `stake()` to stake GRT in the protocol. -7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. +7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on Subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. 8. (Optional) In order to control the distribution of rewards and strategically attract Delegators Indexers can update their delegation parameters by updating their indexingRewardCut (parts per million), queryFeeCut (parts per million), and cooldownBlocks (number of blocks). To do so call `setDelegationParameters()`. The following example sets the queryFeeCut to distribute 95% of query rebates to the Indexer and 5% to Delegators, set the indexingRewardCutto distribute 60% of indexing rewards to the Indexer and 40% to Delegators, and set `thecooldownBlocks` period to 500 blocks. @@ -810,8 +810,8 @@ To set the delegation parameters using Graph Explorer interface, follow these st After being created by an Indexer a healthy allocation goes through two states. -- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. +- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a Subgraph deployment, which allows them to claim indexing rewards and serve queries for that Subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. - **Closed** - An Indexer is free to close an allocation once 1 epoch has passed ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L335)) or their Indexer agent will automatically close the allocation after the **maxAllocationEpochs** (currently 28 days). When an allocation is closed with a valid proof of indexing (POI) their indexing rewards are distributed to the Indexer and its Delegators ([learn more](/indexing/overview/#how-are-indexing-rewards-distributed)). -Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. +Indexers are recommended to utilize offchain syncing functionality to sync Subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for Subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. From d132e072be298538b7c965b63f49735f864d67b8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:09:01 -0500 Subject: [PATCH 0060/1789] New translations overview.mdx (Korean) --- website/src/pages/ko/indexing/overview.mdx | 98 +++++++++++----------- 1 file changed, 49 insertions(+), 49 deletions(-) diff --git a/website/src/pages/ko/indexing/overview.mdx b/website/src/pages/ko/indexing/overview.mdx index 914b04e0bf47..8223b3cd348f 100644 --- a/website/src/pages/ko/indexing/overview.mdx +++ b/website/src/pages/ko/indexing/overview.mdx @@ -7,7 +7,7 @@ Indexers are node operators in The Graph Network that stake Graph Tokens (GRT) i GRT that is staked in the protocol is subject to a thawing period and can be slashed if Indexers are malicious and serve incorrect data to applications or if they index incorrectly. Indexers also earn rewards for delegated stake from Delegators, to contribute to the network. -Indexers select subgraphs to index based on the subgraph’s curation signal, where Curators stake GRT in order to indicate which subgraphs are high-quality and should be prioritized. Consumers (eg. applications) can also set parameters for which Indexers process queries for their subgraphs and set preferences for query fee pricing. +Indexers select Subgraphs to index based on the Subgraph’s curation signal, where Curators stake GRT in order to indicate which Subgraphs are high-quality and should be prioritized. Consumers (eg. applications) can also set parameters for which Indexers process queries for their Subgraphs and set preferences for query fee pricing. ## FAQ @@ -19,17 +19,17 @@ The minimum stake for an Indexer is currently set to 100K GRT. **Query fee rebates** - Payments for serving queries on the network. These payments are mediated via state channels between an Indexer and a gateway. Each query request from a gateway contains a payment and the corresponding response a proof of query result validity. -**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing subgraph deployments for the network. +**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing Subgraph deployments for the network. ### How are indexing rewards distributed? -Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** +Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across Subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that Subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** Numerous tools have been created by the community for calculating rewards; you'll find a collection of them organized in the [Community Guides collection](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). You can also find an up to date list of tools in the #Delegators and #Indexers channels on the [Discord server](https://discord.gg/graphprotocol). Here we link a [recommended allocation optimiser](https://github.com/graphprotocol/allocation-optimizer) integrated with the indexer software stack. ### What is a proof of indexing (POI)? -POIs are used in the network to verify that an Indexer is indexing the subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific subgraph deployment up to and including that block. +POIs are used in the network to verify that an Indexer is indexing the Subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific Subgraph deployment up to and including that block. ### When are indexing rewards distributed? @@ -41,7 +41,7 @@ The RewardsManager contract has a read-only [getRewards](https://github.com/grap Many of the community-made dashboards include pending rewards values and they can be easily checked manually by following these steps: -1. Query the [mainnet subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: +1. Query the [mainnet Subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: ```graphql query indexerAllocations { @@ -91,31 +91,31 @@ The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that - **indexingRewardCut** - the % of indexing rewards that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards when an allocation is closed and the Delegators will split the other 5%. -### How do Indexers know which subgraphs to index? +### How do Indexers know which Subgraphs to index? -Indexers may differentiate themselves by applying advanced techniques for making subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate subgraphs in the network: +Indexers may differentiate themselves by applying advanced techniques for making Subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate Subgraphs in the network: -- **Curation signal** - The proportion of network curation signal applied to a particular subgraph is a good indicator of the interest in that subgraph, especially during the bootstrap phase when query voluming is ramping up. +- **Curation signal** - The proportion of network curation signal applied to a particular Subgraph is a good indicator of the interest in that Subgraph, especially during the bootstrap phase when query voluming is ramping up. -- **Query fees collected** - The historical data for volume of query fees collected for a specific subgraph is a good indicator of future demand. +- **Query fees collected** - The historical data for volume of query fees collected for a specific Subgraph is a good indicator of future demand. -- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific subgraphs can allow an Indexer to monitor the supply side for subgraph queries to identify subgraphs that the network is showing confidence in or subgraphs that may show a need for more supply. +- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific Subgraphs can allow an Indexer to monitor the supply side for Subgraph queries to identify Subgraphs that the network is showing confidence in or Subgraphs that may show a need for more supply. -- **Subgraphs with no indexing rewards** - Some subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a subgraph if it is not generating indexing rewards. +- **Subgraphs with no indexing rewards** - Some Subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a Subgraph if it is not generating indexing rewards. ### What are the hardware requirements? -- **Small** - Enough to get started indexing several subgraphs, will likely need to be expanded. +- **Small** - Enough to get started indexing several Subgraphs, will likely need to be expanded. - **Standard** - Default setup, this is what is used in the example k8s/terraform deployment manifests. -- **Medium** - Production Indexer supporting 100 subgraphs and 200-500 requests per second. -- **Large** - Prepared to index all currently used subgraphs and serve requests for the related traffic. +- **Medium** - Production Indexer supporting 100 Subgraphs and 200-500 requests per second. +- **Large** - Prepared to index all currently used Subgraphs and serve requests for the related traffic. -| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | -| --- | :-: | :-: | :-: | :-: | :-: | -| Small | 4 | 8 | 1 | 4 | 16 | -| Standard | 8 | 30 | 1 | 12 | 48 | -| Medium | 16 | 64 | 2 | 32 | 64 | -| Large | 72 | 468 | 3.5 | 48 | 184 | +| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | +| -------- | :------------------: | :---------------------------: | :-------------------------: | :-------------: | :----------------------: | +| Small | 4 | 8 | 1 | 4 | 16 | +| Standard | 8 | 30 | 1 | 12 | 48 | +| Medium | 16 | 64 | 2 | 32 | 64 | +| Large | 72 | 468 | 3.5 | 48 | 184 | ### What are some basic security precautions an Indexer should take? @@ -125,17 +125,17 @@ Indexers may differentiate themselves by applying advanced techniques for making ## Infrastructure -At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. +At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a Subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. -- **PostgreSQL database** - The main store for the Graph Node, this is where subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. +- **PostgreSQL database** - The main store for the Graph Node, this is where Subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. -- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. +- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain Subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. -- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. +- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during Subgraph deployment to fetch the Subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. - **Indexer service** - Handles all required external communications with the network. Shares cost models and indexing statuses, passes query requests from gateways on to a Graph Node, and manages the query payments via state channels with the gateway. -- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing subgraph deployments to its Graph Node/s, and managing allocations. +- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing Subgraph deployments to its Graph Node/s, and managing allocations. - **Prometheus metrics server** - The Graph Node and Indexer components log their metrics to the metrics server. @@ -147,20 +147,20 @@ Note: To support agile scaling, it is recommended that query and indexing concer #### Graph Node -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | -| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | -| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | -| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for Subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for Subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Service -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 7600 | GraphQL HTTP server
(for paid subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | -| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------------------------------------- | ----------------------------------------------------------- | --------------- | ---------------------- | +| 7600 | GraphQL HTTP server
(for paid Subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | +| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Agent @@ -295,7 +295,7 @@ Deploy all resources with `kubectl apply -k $dir`. ### Graph Node -[Graph Node](https://github.com/graphprotocol/graph-node) is an open source Rust implementation that event sources the Ethereum blockchain to deterministically update a data store that can be queried via the GraphQL endpoint. Developers use subgraphs to define their schema, and a set of mappings for transforming the data sourced from the blockchain and the Graph Node handles syncing the entire chain, monitoring for new blocks, and serving it via a GraphQL endpoint. +[Graph Node](https://github.com/graphprotocol/graph-node) is an open source Rust implementation that event sources the Ethereum blockchain to deterministically update a data store that can be queried via the GraphQL endpoint. Developers use Subgraphs to define their schema, and a set of mappings for transforming the data sourced from the blockchain and the Graph Node handles syncing the entire chain, monitoring for new blocks, and serving it via a GraphQL endpoint. #### Getting started from source @@ -365,9 +365,9 @@ docker-compose up To successfully participate in the network requires almost constant monitoring and interaction, so we've built a suite of Typescript applications for facilitating an Indexers network participation. There are three Indexer components: -- **Indexer agent** - The agent monitors the network and the Indexer's own infrastructure and manages which subgraph deployments are indexed and allocated towards onchain and how much is allocated towards each. +- **Indexer agent** - The agent monitors the network and the Indexer's own infrastructure and manages which Subgraph deployments are indexed and allocated towards onchain and how much is allocated towards each. -- **Indexer service** - The only component that needs to be exposed externally, the service passes on subgraph queries to the graph node, manages state channels for query payments, shares important decision making information to clients like the gateways. +- **Indexer service** - The only component that needs to be exposed externally, the service passes on Subgraph queries to the graph node, manages state channels for query payments, shares important decision making information to clients like the gateways. - **Indexer CLI** - The command line interface for managing the Indexer agent. It allows Indexers to manage cost models, manual allocations, actions queue, and indexing rules. @@ -525,7 +525,7 @@ graph indexer status #### Indexer management using Indexer CLI -The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. +The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking Subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. #### Usage @@ -537,7 +537,7 @@ The **Indexer CLI** connects to the Indexer agent, typically through port-forwar - `graph indexer rules set [options] ...` - Set one or more indexing rules. -- `graph indexer rules start [options] ` - Start indexing a subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available subgraphs on the network will be indexed. +- `graph indexer rules start [options] ` - Start indexing a Subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available Subgraphs on the network will be indexed. - `graph indexer rules stop [options] ` - Stop indexing a deployment and set its `decisionBasis` to never, so it will skip this deployment when deciding on deployments to index. @@ -561,9 +561,9 @@ All commands which display rules in the output can choose between the supported #### Indexing rules -Indexing rules can either be applied as global defaults or for specific subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. +Indexing rules can either be applied as global defaults or for specific Subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the Subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. -For example, if the global rule has a `minStake` of **5** (GRT), any subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. +For example, if the global rule has a `minStake` of **5** (GRT), any Subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. Data model: @@ -679,7 +679,7 @@ graph indexer actions execute approve Note that supported action types for allocation management have different input requirements: -- `Allocate` - allocate stake to a specific subgraph deployment +- `Allocate` - allocate stake to a specific Subgraph deployment - required action params: - deploymentID @@ -694,7 +694,7 @@ Note that supported action types for allocation management have different input - poi - force (forces using the provided POI even if it doesn’t match what the graph-node provides) -- `Reallocate` - atomically close allocation and open a fresh allocation for the same subgraph deployment +- `Reallocate` - atomically close allocation and open a fresh allocation for the same Subgraph deployment - required action params: - allocationID @@ -706,7 +706,7 @@ Note that supported action types for allocation management have different input #### Cost models -Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. +Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each Subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. #### Agora @@ -782,7 +782,7 @@ Once an Indexer has staked GRT in the protocol, the [Indexer components](/indexi 6. Call `stake()` to stake GRT in the protocol. -7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. +7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on Subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. 8. (Optional) In order to control the distribution of rewards and strategically attract Delegators Indexers can update their delegation parameters by updating their indexingRewardCut (parts per million), queryFeeCut (parts per million), and cooldownBlocks (number of blocks). To do so call `setDelegationParameters()`. The following example sets the queryFeeCut to distribute 95% of query rebates to the Indexer and 5% to Delegators, set the indexingRewardCutto distribute 60% of indexing rewards to the Indexer and 40% to Delegators, and set `thecooldownBlocks` period to 500 blocks. @@ -810,8 +810,8 @@ To set the delegation parameters using Graph Explorer interface, follow these st After being created by an Indexer a healthy allocation goes through two states. -- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. +- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a Subgraph deployment, which allows them to claim indexing rewards and serve queries for that Subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. - **Closed** - An Indexer is free to close an allocation once 1 epoch has passed ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L335)) or their Indexer agent will automatically close the allocation after the **maxAllocationEpochs** (currently 28 days). When an allocation is closed with a valid proof of indexing (POI) their indexing rewards are distributed to the Indexer and its Delegators ([learn more](/indexing/overview/#how-are-indexing-rewards-distributed)). -Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. +Indexers are recommended to utilize offchain syncing functionality to sync Subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for Subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. From 9dd4e1d3787cfc97e5d824c5a45ee4660e1af336 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:09:03 -0500 Subject: [PATCH 0061/1789] New translations overview.mdx (Dutch) --- website/src/pages/nl/indexing/overview.mdx | 98 +++++++++++----------- 1 file changed, 49 insertions(+), 49 deletions(-) diff --git a/website/src/pages/nl/indexing/overview.mdx b/website/src/pages/nl/indexing/overview.mdx index f797c80855e5..287dbcb9b4fb 100644 --- a/website/src/pages/nl/indexing/overview.mdx +++ b/website/src/pages/nl/indexing/overview.mdx @@ -7,7 +7,7 @@ Indexeers zijn node-operators in The Graph Netwerk die Graph Tokens (GRT) inzett GRT dat in het protocol wordt ingezet, is onderheven aan een ontdooiperiode en kan worden geslashed als Indexers schadelijke acties ondernemen, onjuiste data aan applicaties leveren of als ze onjuist indexeren. Indexers verdienen ook beloningen voor gedelegeerde inzet van Delegators om te contributeren aan het netwerk. -Indexeerders selecteren subgraphs om te indexeren op basis van het curatiesignaal van de subgraph, waar Curatoren GRT inzetten om aan te geven welke subgraphs van hoge kwaliteit zijn en prioriteit moeten krijgen. Consumenten (bijv. applicaties) kunnen ook parameters instellen voor welke Indexeerders queries voor hun subgraphs verwerken en voorkeuren instellen voor de prijs van querykosten. +Indexers select Subgraphs to index based on the Subgraph’s curation signal, where Curators stake GRT in order to indicate which Subgraphs are high-quality and should be prioritized. Consumers (eg. applications) can also set parameters for which Indexers process queries for their Subgraphs and set preferences for query fee pricing. ## FAQ @@ -19,17 +19,17 @@ The minimum stake for an Indexer is currently set to 100K GRT. **Query fee rebates** - Payments for serving queries on the network. These payments are mediated via state channels between an Indexer and a gateway. Each query request from a gateway contains a payment and the corresponding response a proof of query result validity. -**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing subgraph deployments for the network. +**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing Subgraph deployments for the network. ### How are indexing rewards distributed? -Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** +Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across Subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that Subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** Numerous tools have been created by the community for calculating rewards; you'll find a collection of them organized in the [Community Guides collection](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). You can also find an up to date list of tools in the #Delegators and #Indexers channels on the [Discord server](https://discord.gg/graphprotocol). Here we link a [recommended allocation optimiser](https://github.com/graphprotocol/allocation-optimizer) integrated with the indexer software stack. ### What is a proof of indexing (POI)? -POIs are used in the network to verify that an Indexer is indexing the subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific subgraph deployment up to and including that block. +POIs are used in the network to verify that an Indexer is indexing the Subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific Subgraph deployment up to and including that block. ### When are indexing rewards distributed? @@ -41,7 +41,7 @@ The RewardsManager contract has a read-only [getRewards](https://github.com/grap Many of the community-made dashboards include pending rewards values and they can be easily checked manually by following these steps: -1. Query the [mainnet subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: +1. Query the [mainnet Subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: ```graphql query indexerAllocations { @@ -91,31 +91,31 @@ The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that - **indexingRewardCut** - the % of indexing rewards that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards when an allocation is closed and the Delegators will split the other 5%. -### How do Indexers know which subgraphs to index? +### How do Indexers know which Subgraphs to index? -Indexers may differentiate themselves by applying advanced techniques for making subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate subgraphs in the network: +Indexers may differentiate themselves by applying advanced techniques for making Subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate Subgraphs in the network: -- **Curation signal** - The proportion of network curation signal applied to a particular subgraph is a good indicator of the interest in that subgraph, especially during the bootstrap phase when query voluming is ramping up. +- **Curation signal** - The proportion of network curation signal applied to a particular Subgraph is a good indicator of the interest in that Subgraph, especially during the bootstrap phase when query voluming is ramping up. -- **Query fees collected** - The historical data for volume of query fees collected for a specific subgraph is a good indicator of future demand. +- **Query fees collected** - The historical data for volume of query fees collected for a specific Subgraph is a good indicator of future demand. -- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific subgraphs can allow an Indexer to monitor the supply side for subgraph queries to identify subgraphs that the network is showing confidence in or subgraphs that may show a need for more supply. +- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific Subgraphs can allow an Indexer to monitor the supply side for Subgraph queries to identify Subgraphs that the network is showing confidence in or Subgraphs that may show a need for more supply. -- **Subgraphs with no indexing rewards** - Some subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a subgraph if it is not generating indexing rewards. +- **Subgraphs with no indexing rewards** - Some Subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a Subgraph if it is not generating indexing rewards. ### What are the hardware requirements? -- **Small** - Enough to get started indexing several subgraphs, will likely need to be expanded. +- **Small** - Enough to get started indexing several Subgraphs, will likely need to be expanded. - **Standard** - Default setup, this is what is used in the example k8s/terraform deployment manifests. -- **Medium** - Production Indexer supporting 100 subgraphs and 200-500 requests per second. -- **Large** - Prepared to index all currently used subgraphs and serve requests for the related traffic. +- **Medium** - Production Indexer supporting 100 Subgraphs and 200-500 requests per second. +- **Large** - Prepared to index all currently used Subgraphs and serve requests for the related traffic. -| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | -| --- | :-: | :-: | :-: | :-: | :-: | -| Small | 4 | 8 | 1 | 4 | 16 | -| Standard | 8 | 30 | 1 | 12 | 48 | -| Medium | 16 | 64 | 2 | 32 | 64 | -| Large | 72 | 468 | 3.5 | 48 | 184 | +| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | +| -------- | :------------------: | :---------------------------: | :-------------------------: | :-------------: | :----------------------: | +| Small | 4 | 8 | 1 | 4 | 16 | +| Standard | 8 | 30 | 1 | 12 | 48 | +| Medium | 16 | 64 | 2 | 32 | 64 | +| Large | 72 | 468 | 3.5 | 48 | 184 | ### What are some basic security precautions an Indexer should take? @@ -125,17 +125,17 @@ Indexers may differentiate themselves by applying advanced techniques for making ## Infrastructure -At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. +At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a Subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. -- **PostgreSQL database** - The main store for the Graph Node, this is where subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. +- **PostgreSQL database** - The main store for the Graph Node, this is where Subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. -- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. +- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain Subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. -- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. +- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during Subgraph deployment to fetch the Subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. - **Indexer service** - Handles all required external communications with the network. Shares cost models and indexing statuses, passes query requests from gateways on to a Graph Node, and manages the query payments via state channels with the gateway. -- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing subgraph deployments to its Graph Node/s, and managing allocations. +- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing Subgraph deployments to its Graph Node/s, and managing allocations. - **Prometheus metrics server** - The Graph Node and Indexer components log their metrics to the metrics server. @@ -147,20 +147,20 @@ Note: To support agile scaling, it is recommended that query and indexing concer #### Graph Node -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | -| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | -| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | -| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for Subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for Subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Service -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 7600 | GraphQL HTTP server
(for paid subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | -| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------------------------------------- | ----------------------------------------------------------- | --------------- | ---------------------- | +| 7600 | GraphQL HTTP server
(for paid Subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | +| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Agent @@ -295,7 +295,7 @@ Deploy all resources with `kubectl apply -k $dir`. ### Graph Node -[Graph Node](https://github.com/graphprotocol/graph-node) is an open source Rust implementation that event sources the Ethereum blockchain to deterministically update a data store that can be queried via the GraphQL endpoint. Developers use subgraphs to define their schema, and a set of mappings for transforming the data sourced from the blockchain and the Graph Node handles syncing the entire chain, monitoring for new blocks, and serving it via a GraphQL endpoint. +[Graph Node](https://github.com/graphprotocol/graph-node) is an open source Rust implementation that event sources the Ethereum blockchain to deterministically update a data store that can be queried via the GraphQL endpoint. Developers use Subgraphs to define their schema, and a set of mappings for transforming the data sourced from the blockchain and the Graph Node handles syncing the entire chain, monitoring for new blocks, and serving it via a GraphQL endpoint. #### Getting started from source @@ -365,9 +365,9 @@ docker-compose up To successfully participate in the network requires almost constant monitoring and interaction, so we've built a suite of Typescript applications for facilitating an Indexers network participation. There are three Indexer components: -- **Indexer agent** - The agent monitors the network and the Indexer's own infrastructure and manages which subgraph deployments are indexed and allocated towards onchain and how much is allocated towards each. +- **Indexer agent** - The agent monitors the network and the Indexer's own infrastructure and manages which Subgraph deployments are indexed and allocated towards onchain and how much is allocated towards each. -- **Indexer service** - The only component that needs to be exposed externally, the service passes on subgraph queries to the graph node, manages state channels for query payments, shares important decision making information to clients like the gateways. +- **Indexer service** - The only component that needs to be exposed externally, the service passes on Subgraph queries to the graph node, manages state channels for query payments, shares important decision making information to clients like the gateways. - **Indexer CLI** - The command line interface for managing the Indexer agent. It allows Indexers to manage cost models, manual allocations, actions queue, and indexing rules. @@ -525,7 +525,7 @@ graph indexer status #### Indexer management using Indexer CLI -The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. +The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking Subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. #### Usage @@ -537,7 +537,7 @@ The **Indexer CLI** connects to the Indexer agent, typically through port-forwar - `graph indexer rules set [options] ...` - Set one or more indexing rules. -- `graph indexer rules start [options] ` - Start indexing a subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available subgraphs on the network will be indexed. +- `graph indexer rules start [options] ` - Start indexing a Subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available Subgraphs on the network will be indexed. - `graph indexer rules stop [options] ` - Stop indexing a deployment and set its `decisionBasis` to never, so it will skip this deployment when deciding on deployments to index. @@ -561,9 +561,9 @@ All commands which display rules in the output can choose between the supported #### Indexing rules -Indexing rules can either be applied as global defaults or for specific subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. +Indexing rules can either be applied as global defaults or for specific Subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the Subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. -For example, if the global rule has a `minStake` of **5** (GRT), any subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. +For example, if the global rule has a `minStake` of **5** (GRT), any Subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. Data model: @@ -679,7 +679,7 @@ graph indexer actions execute approve Note that supported action types for allocation management have different input requirements: -- `Allocate` - allocate stake to a specific subgraph deployment +- `Allocate` - allocate stake to a specific Subgraph deployment - required action params: - deploymentID @@ -694,7 +694,7 @@ Note that supported action types for allocation management have different input - poi - force (forces using the provided POI even if it doesn’t match what the graph-node provides) -- `Reallocate` - atomically close allocation and open a fresh allocation for the same subgraph deployment +- `Reallocate` - atomically close allocation and open a fresh allocation for the same Subgraph deployment - required action params: - allocationID @@ -706,7 +706,7 @@ Note that supported action types for allocation management have different input #### Cost models -Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. +Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each Subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. #### Agora @@ -782,7 +782,7 @@ Once an Indexer has staked GRT in the protocol, the [Indexer components](/indexi 6. Call `stake()` to stake GRT in the protocol. -7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. +7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on Subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. 8. (Optional) In order to control the distribution of rewards and strategically attract Delegators Indexers can update their delegation parameters by updating their indexingRewardCut (parts per million), queryFeeCut (parts per million), and cooldownBlocks (number of blocks). To do so call `setDelegationParameters()`. The following example sets the queryFeeCut to distribute 95% of query rebates to the Indexer and 5% to Delegators, set the indexingRewardCutto distribute 60% of indexing rewards to the Indexer and 40% to Delegators, and set `thecooldownBlocks` period to 500 blocks. @@ -810,8 +810,8 @@ To set the delegation parameters using Graph Explorer interface, follow these st After being created by an Indexer a healthy allocation goes through two states. -- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. +- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a Subgraph deployment, which allows them to claim indexing rewards and serve queries for that Subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. - **Closed** - An Indexer is free to close an allocation once 1 epoch has passed ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L335)) or their Indexer agent will automatically close the allocation after the **maxAllocationEpochs** (currently 28 days). When an allocation is closed with a valid proof of indexing (POI) their indexing rewards are distributed to the Indexer and its Delegators ([learn more](/indexing/overview/#how-are-indexing-rewards-distributed)). -Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. +Indexers are recommended to utilize offchain syncing functionality to sync Subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for Subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. From 03acc34b0c69989f1b0419756c4ad4a0385b14c9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:09:04 -0500 Subject: [PATCH 0062/1789] New translations overview.mdx (Polish) --- website/src/pages/pl/indexing/overview.mdx | 98 +++++++++++----------- 1 file changed, 49 insertions(+), 49 deletions(-) diff --git a/website/src/pages/pl/indexing/overview.mdx b/website/src/pages/pl/indexing/overview.mdx index 914b04e0bf47..8223b3cd348f 100644 --- a/website/src/pages/pl/indexing/overview.mdx +++ b/website/src/pages/pl/indexing/overview.mdx @@ -7,7 +7,7 @@ Indexers are node operators in The Graph Network that stake Graph Tokens (GRT) i GRT that is staked in the protocol is subject to a thawing period and can be slashed if Indexers are malicious and serve incorrect data to applications or if they index incorrectly. Indexers also earn rewards for delegated stake from Delegators, to contribute to the network. -Indexers select subgraphs to index based on the subgraph’s curation signal, where Curators stake GRT in order to indicate which subgraphs are high-quality and should be prioritized. Consumers (eg. applications) can also set parameters for which Indexers process queries for their subgraphs and set preferences for query fee pricing. +Indexers select Subgraphs to index based on the Subgraph’s curation signal, where Curators stake GRT in order to indicate which Subgraphs are high-quality and should be prioritized. Consumers (eg. applications) can also set parameters for which Indexers process queries for their Subgraphs and set preferences for query fee pricing. ## FAQ @@ -19,17 +19,17 @@ The minimum stake for an Indexer is currently set to 100K GRT. **Query fee rebates** - Payments for serving queries on the network. These payments are mediated via state channels between an Indexer and a gateway. Each query request from a gateway contains a payment and the corresponding response a proof of query result validity. -**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing subgraph deployments for the network. +**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing Subgraph deployments for the network. ### How are indexing rewards distributed? -Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** +Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across Subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that Subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** Numerous tools have been created by the community for calculating rewards; you'll find a collection of them organized in the [Community Guides collection](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). You can also find an up to date list of tools in the #Delegators and #Indexers channels on the [Discord server](https://discord.gg/graphprotocol). Here we link a [recommended allocation optimiser](https://github.com/graphprotocol/allocation-optimizer) integrated with the indexer software stack. ### What is a proof of indexing (POI)? -POIs are used in the network to verify that an Indexer is indexing the subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific subgraph deployment up to and including that block. +POIs are used in the network to verify that an Indexer is indexing the Subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific Subgraph deployment up to and including that block. ### When are indexing rewards distributed? @@ -41,7 +41,7 @@ The RewardsManager contract has a read-only [getRewards](https://github.com/grap Many of the community-made dashboards include pending rewards values and they can be easily checked manually by following these steps: -1. Query the [mainnet subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: +1. Query the [mainnet Subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: ```graphql query indexerAllocations { @@ -91,31 +91,31 @@ The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that - **indexingRewardCut** - the % of indexing rewards that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards when an allocation is closed and the Delegators will split the other 5%. -### How do Indexers know which subgraphs to index? +### How do Indexers know which Subgraphs to index? -Indexers may differentiate themselves by applying advanced techniques for making subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate subgraphs in the network: +Indexers may differentiate themselves by applying advanced techniques for making Subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate Subgraphs in the network: -- **Curation signal** - The proportion of network curation signal applied to a particular subgraph is a good indicator of the interest in that subgraph, especially during the bootstrap phase when query voluming is ramping up. +- **Curation signal** - The proportion of network curation signal applied to a particular Subgraph is a good indicator of the interest in that Subgraph, especially during the bootstrap phase when query voluming is ramping up. -- **Query fees collected** - The historical data for volume of query fees collected for a specific subgraph is a good indicator of future demand. +- **Query fees collected** - The historical data for volume of query fees collected for a specific Subgraph is a good indicator of future demand. -- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific subgraphs can allow an Indexer to monitor the supply side for subgraph queries to identify subgraphs that the network is showing confidence in or subgraphs that may show a need for more supply. +- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific Subgraphs can allow an Indexer to monitor the supply side for Subgraph queries to identify Subgraphs that the network is showing confidence in or Subgraphs that may show a need for more supply. -- **Subgraphs with no indexing rewards** - Some subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a subgraph if it is not generating indexing rewards. +- **Subgraphs with no indexing rewards** - Some Subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a Subgraph if it is not generating indexing rewards. ### What are the hardware requirements? -- **Small** - Enough to get started indexing several subgraphs, will likely need to be expanded. +- **Small** - Enough to get started indexing several Subgraphs, will likely need to be expanded. - **Standard** - Default setup, this is what is used in the example k8s/terraform deployment manifests. -- **Medium** - Production Indexer supporting 100 subgraphs and 200-500 requests per second. -- **Large** - Prepared to index all currently used subgraphs and serve requests for the related traffic. +- **Medium** - Production Indexer supporting 100 Subgraphs and 200-500 requests per second. +- **Large** - Prepared to index all currently used Subgraphs and serve requests for the related traffic. -| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | -| --- | :-: | :-: | :-: | :-: | :-: | -| Small | 4 | 8 | 1 | 4 | 16 | -| Standard | 8 | 30 | 1 | 12 | 48 | -| Medium | 16 | 64 | 2 | 32 | 64 | -| Large | 72 | 468 | 3.5 | 48 | 184 | +| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | +| -------- | :------------------: | :---------------------------: | :-------------------------: | :-------------: | :----------------------: | +| Small | 4 | 8 | 1 | 4 | 16 | +| Standard | 8 | 30 | 1 | 12 | 48 | +| Medium | 16 | 64 | 2 | 32 | 64 | +| Large | 72 | 468 | 3.5 | 48 | 184 | ### What are some basic security precautions an Indexer should take? @@ -125,17 +125,17 @@ Indexers may differentiate themselves by applying advanced techniques for making ## Infrastructure -At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. +At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a Subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. -- **PostgreSQL database** - The main store for the Graph Node, this is where subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. +- **PostgreSQL database** - The main store for the Graph Node, this is where Subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. -- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. +- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain Subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. -- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. +- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during Subgraph deployment to fetch the Subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. - **Indexer service** - Handles all required external communications with the network. Shares cost models and indexing statuses, passes query requests from gateways on to a Graph Node, and manages the query payments via state channels with the gateway. -- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing subgraph deployments to its Graph Node/s, and managing allocations. +- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing Subgraph deployments to its Graph Node/s, and managing allocations. - **Prometheus metrics server** - The Graph Node and Indexer components log their metrics to the metrics server. @@ -147,20 +147,20 @@ Note: To support agile scaling, it is recommended that query and indexing concer #### Graph Node -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | -| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | -| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | -| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for Subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for Subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Service -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 7600 | GraphQL HTTP server
(for paid subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | -| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------------------------------------- | ----------------------------------------------------------- | --------------- | ---------------------- | +| 7600 | GraphQL HTTP server
(for paid Subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | +| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Agent @@ -295,7 +295,7 @@ Deploy all resources with `kubectl apply -k $dir`. ### Graph Node -[Graph Node](https://github.com/graphprotocol/graph-node) is an open source Rust implementation that event sources the Ethereum blockchain to deterministically update a data store that can be queried via the GraphQL endpoint. Developers use subgraphs to define their schema, and a set of mappings for transforming the data sourced from the blockchain and the Graph Node handles syncing the entire chain, monitoring for new blocks, and serving it via a GraphQL endpoint. +[Graph Node](https://github.com/graphprotocol/graph-node) is an open source Rust implementation that event sources the Ethereum blockchain to deterministically update a data store that can be queried via the GraphQL endpoint. Developers use Subgraphs to define their schema, and a set of mappings for transforming the data sourced from the blockchain and the Graph Node handles syncing the entire chain, monitoring for new blocks, and serving it via a GraphQL endpoint. #### Getting started from source @@ -365,9 +365,9 @@ docker-compose up To successfully participate in the network requires almost constant monitoring and interaction, so we've built a suite of Typescript applications for facilitating an Indexers network participation. There are three Indexer components: -- **Indexer agent** - The agent monitors the network and the Indexer's own infrastructure and manages which subgraph deployments are indexed and allocated towards onchain and how much is allocated towards each. +- **Indexer agent** - The agent monitors the network and the Indexer's own infrastructure and manages which Subgraph deployments are indexed and allocated towards onchain and how much is allocated towards each. -- **Indexer service** - The only component that needs to be exposed externally, the service passes on subgraph queries to the graph node, manages state channels for query payments, shares important decision making information to clients like the gateways. +- **Indexer service** - The only component that needs to be exposed externally, the service passes on Subgraph queries to the graph node, manages state channels for query payments, shares important decision making information to clients like the gateways. - **Indexer CLI** - The command line interface for managing the Indexer agent. It allows Indexers to manage cost models, manual allocations, actions queue, and indexing rules. @@ -525,7 +525,7 @@ graph indexer status #### Indexer management using Indexer CLI -The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. +The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking Subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. #### Usage @@ -537,7 +537,7 @@ The **Indexer CLI** connects to the Indexer agent, typically through port-forwar - `graph indexer rules set [options] ...` - Set one or more indexing rules. -- `graph indexer rules start [options] ` - Start indexing a subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available subgraphs on the network will be indexed. +- `graph indexer rules start [options] ` - Start indexing a Subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available Subgraphs on the network will be indexed. - `graph indexer rules stop [options] ` - Stop indexing a deployment and set its `decisionBasis` to never, so it will skip this deployment when deciding on deployments to index. @@ -561,9 +561,9 @@ All commands which display rules in the output can choose between the supported #### Indexing rules -Indexing rules can either be applied as global defaults or for specific subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. +Indexing rules can either be applied as global defaults or for specific Subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the Subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. -For example, if the global rule has a `minStake` of **5** (GRT), any subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. +For example, if the global rule has a `minStake` of **5** (GRT), any Subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. Data model: @@ -679,7 +679,7 @@ graph indexer actions execute approve Note that supported action types for allocation management have different input requirements: -- `Allocate` - allocate stake to a specific subgraph deployment +- `Allocate` - allocate stake to a specific Subgraph deployment - required action params: - deploymentID @@ -694,7 +694,7 @@ Note that supported action types for allocation management have different input - poi - force (forces using the provided POI even if it doesn’t match what the graph-node provides) -- `Reallocate` - atomically close allocation and open a fresh allocation for the same subgraph deployment +- `Reallocate` - atomically close allocation and open a fresh allocation for the same Subgraph deployment - required action params: - allocationID @@ -706,7 +706,7 @@ Note that supported action types for allocation management have different input #### Cost models -Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. +Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each Subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. #### Agora @@ -782,7 +782,7 @@ Once an Indexer has staked GRT in the protocol, the [Indexer components](/indexi 6. Call `stake()` to stake GRT in the protocol. -7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. +7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on Subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. 8. (Optional) In order to control the distribution of rewards and strategically attract Delegators Indexers can update their delegation parameters by updating their indexingRewardCut (parts per million), queryFeeCut (parts per million), and cooldownBlocks (number of blocks). To do so call `setDelegationParameters()`. The following example sets the queryFeeCut to distribute 95% of query rebates to the Indexer and 5% to Delegators, set the indexingRewardCutto distribute 60% of indexing rewards to the Indexer and 40% to Delegators, and set `thecooldownBlocks` period to 500 blocks. @@ -810,8 +810,8 @@ To set the delegation parameters using Graph Explorer interface, follow these st After being created by an Indexer a healthy allocation goes through two states. -- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. +- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a Subgraph deployment, which allows them to claim indexing rewards and serve queries for that Subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. - **Closed** - An Indexer is free to close an allocation once 1 epoch has passed ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L335)) or their Indexer agent will automatically close the allocation after the **maxAllocationEpochs** (currently 28 days). When an allocation is closed with a valid proof of indexing (POI) their indexing rewards are distributed to the Indexer and its Delegators ([learn more](/indexing/overview/#how-are-indexing-rewards-distributed)). -Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. +Indexers are recommended to utilize offchain syncing functionality to sync Subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for Subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. From 6159f4372724cea29bf5d30bcb50c889950982d0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:09:05 -0500 Subject: [PATCH 0063/1789] New translations overview.mdx (Portuguese) --- website/src/pages/pt/indexing/overview.mdx | 98 +++++++++++----------- 1 file changed, 49 insertions(+), 49 deletions(-) diff --git a/website/src/pages/pt/indexing/overview.mdx b/website/src/pages/pt/indexing/overview.mdx index adf55ea75a43..c60bbe82be74 100644 --- a/website/src/pages/pt/indexing/overview.mdx +++ b/website/src/pages/pt/indexing/overview.mdx @@ -7,7 +7,7 @@ Indexadores são operadores de nodes na Graph Network que fazem staking em Graph O GRT em staking no protocolo é sujeito a um período de degelo, e pode passar por slashing (recolhimento de fundos) se algum Indexador maliciosamente servir dados incorretos para aplicativos ou indexar incorretamente. Os Indexadores também recebem recompensas dos Delegantes por stake delegado, para contribuir à rede. -Indexadores selecionam subgraphs para indexar com base no sinal de curadoria do subgraph, onde Curadores depositam GRT em staking para indicar quais subgraphs são de qualidade alta e devem ser priorizados. Consumidores (por ex., aplicativos) também podem configurar parâmetros para os quais Indexadores processam queries para seus subgraphs, além de configurar preferências para o preço das taxas de query. +Indexers select Subgraphs to index based on the Subgraph’s curation signal, where Curators stake GRT in order to indicate which Subgraphs are high-quality and should be prioritized. Consumers (eg. applications) can also set parameters for which Indexers process queries for their Subgraphs and set preferences for query fee pricing. ## FAQ @@ -19,17 +19,17 @@ The minimum stake for an Indexer is currently set to 100K GRT. **Query fee rebates** - Payments for serving queries on the network. These payments are mediated via state channels between an Indexer and a gateway. Each query request from a gateway contains a payment and the corresponding response a proof of query result validity. -**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing subgraph deployments for the network. +**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing Subgraph deployments for the network. ### How are indexing rewards distributed? -Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** +Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across Subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that Subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** Numerous tools have been created by the community for calculating rewards; you'll find a collection of them organized in the [Community Guides collection](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). You can also find an up to date list of tools in the #Delegators and #Indexers channels on the [Discord server](https://discord.gg/graphprotocol). Here we link a [recommended allocation optimiser](https://github.com/graphprotocol/allocation-optimizer) integrated with the indexer software stack. ### What is a proof of indexing (POI)? -POIs are used in the network to verify that an Indexer is indexing the subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific subgraph deployment up to and including that block. +POIs are used in the network to verify that an Indexer is indexing the Subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific Subgraph deployment up to and including that block. ### When are indexing rewards distributed? @@ -41,7 +41,7 @@ The RewardsManager contract has a read-only [getRewards](https://github.com/grap Many of the community-made dashboards include pending rewards values and they can be easily checked manually by following these steps: -1. Query the [mainnet subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: +1. Query the [mainnet Subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: ```graphql query indexerAllocations { @@ -91,31 +91,31 @@ The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that - **indexingRewardCut** - the % of indexing rewards that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards when an allocation is closed and the Delegators will split the other 5%. -### How do Indexers know which subgraphs to index? +### How do Indexers know which Subgraphs to index? -Indexers may differentiate themselves by applying advanced techniques for making subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate subgraphs in the network: +Indexers may differentiate themselves by applying advanced techniques for making Subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate Subgraphs in the network: -- **Curation signal** - The proportion of network curation signal applied to a particular subgraph is a good indicator of the interest in that subgraph, especially during the bootstrap phase when query voluming is ramping up. +- **Curation signal** - The proportion of network curation signal applied to a particular Subgraph is a good indicator of the interest in that Subgraph, especially during the bootstrap phase when query voluming is ramping up. -- **Query fees collected** - The historical data for volume of query fees collected for a specific subgraph is a good indicator of future demand. +- **Query fees collected** - The historical data for volume of query fees collected for a specific Subgraph is a good indicator of future demand. -- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific subgraphs can allow an Indexer to monitor the supply side for subgraph queries to identify subgraphs that the network is showing confidence in or subgraphs that may show a need for more supply. +- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific Subgraphs can allow an Indexer to monitor the supply side for Subgraph queries to identify Subgraphs that the network is showing confidence in or Subgraphs that may show a need for more supply. -- **Subgraphs with no indexing rewards** - Some subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a subgraph if it is not generating indexing rewards. +- **Subgraphs with no indexing rewards** - Some Subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a Subgraph if it is not generating indexing rewards. ### What are the hardware requirements? -- **Small** - Enough to get started indexing several subgraphs, will likely need to be expanded. +- **Small** - Enough to get started indexing several Subgraphs, will likely need to be expanded. - **Standard** - Default setup, this is what is used in the example k8s/terraform deployment manifests. -- **Medium** - Production Indexer supporting 100 subgraphs and 200-500 requests per second. -- **Large** - Prepared to index all currently used subgraphs and serve requests for the related traffic. +- **Medium** - Production Indexer supporting 100 Subgraphs and 200-500 requests per second. +- **Large** - Prepared to index all currently used Subgraphs and serve requests for the related traffic. -| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | -| --- | :-: | :-: | :-: | :-: | :-: | -| Small | 4 | 8 | 1 | 4 | 16 | -| Standard | 8 | 30 | 1 | 12 | 48 | -| Medium | 16 | 64 | 2 | 32 | 64 | -| Large | 72 | 468 | 3.5 | 48 | 184 | +| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | +| -------- | :------------------: | :---------------------------: | :-------------------------: | :-------------: | :----------------------: | +| Small | 4 | 8 | 1 | 4 | 16 | +| Standard | 8 | 30 | 1 | 12 | 48 | +| Medium | 16 | 64 | 2 | 32 | 64 | +| Large | 72 | 468 | 3.5 | 48 | 184 | ### What are some basic security precautions an Indexer should take? @@ -125,17 +125,17 @@ Indexers may differentiate themselves by applying advanced techniques for making ## Infrastructure -At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. +At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a Subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. -- **PostgreSQL database** - The main store for the Graph Node, this is where subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. +- **PostgreSQL database** - The main store for the Graph Node, this is where Subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. -- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. +- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain Subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. -- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. +- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during Subgraph deployment to fetch the Subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. - **Indexer service** - Handles all required external communications with the network. Shares cost models and indexing statuses, passes query requests from gateways on to a Graph Node, and manages the query payments via state channels with the gateway. -- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing subgraph deployments to its Graph Node/s, and managing allocations. +- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing Subgraph deployments to its Graph Node/s, and managing allocations. - **Prometheus metrics server** - The Graph Node and Indexer components log their metrics to the metrics server. @@ -147,20 +147,20 @@ Note: To support agile scaling, it is recommended that query and indexing concer #### Graph Node -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | -| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | -| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | -| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for Subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for Subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Service -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 7600 | GraphQL HTTP server
(for paid subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | -| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------------------------------------- | ----------------------------------------------------------- | --------------- | ---------------------- | +| 7600 | GraphQL HTTP server
(for paid Subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | +| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Agent @@ -295,7 +295,7 @@ Deploy all resources with `kubectl apply -k $dir`. ### Graph Node -[Graph Node](https://github.com/graphprotocol/graph-node) is an open source Rust implementation that event sources the Ethereum blockchain to deterministically update a data store that can be queried via the GraphQL endpoint. Developers use subgraphs to define their schema, and a set of mappings for transforming the data sourced from the blockchain and the Graph Node handles syncing the entire chain, monitoring for new blocks, and serving it via a GraphQL endpoint. +[Graph Node](https://github.com/graphprotocol/graph-node) is an open source Rust implementation that event sources the Ethereum blockchain to deterministically update a data store that can be queried via the GraphQL endpoint. Developers use Subgraphs to define their schema, and a set of mappings for transforming the data sourced from the blockchain and the Graph Node handles syncing the entire chain, monitoring for new blocks, and serving it via a GraphQL endpoint. #### Getting started from source @@ -365,9 +365,9 @@ docker-compose up To successfully participate in the network requires almost constant monitoring and interaction, so we've built a suite of Typescript applications for facilitating an Indexers network participation. There are three Indexer components: -- **Indexer agent** - The agent monitors the network and the Indexer's own infrastructure and manages which subgraph deployments are indexed and allocated towards onchain and how much is allocated towards each. +- **Indexer agent** - The agent monitors the network and the Indexer's own infrastructure and manages which Subgraph deployments are indexed and allocated towards onchain and how much is allocated towards each. -- **Indexer service** - The only component that needs to be exposed externally, the service passes on subgraph queries to the graph node, manages state channels for query payments, shares important decision making information to clients like the gateways. +- **Indexer service** - The only component that needs to be exposed externally, the service passes on Subgraph queries to the graph node, manages state channels for query payments, shares important decision making information to clients like the gateways. - **Indexer CLI** - The command line interface for managing the Indexer agent. It allows Indexers to manage cost models, manual allocations, actions queue, and indexing rules. @@ -525,7 +525,7 @@ graph indexer status #### Indexer management using Indexer CLI -The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. +The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking Subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. #### Usage @@ -537,7 +537,7 @@ The **Indexer CLI** connects to the Indexer agent, typically through port-forwar - `graph indexer rules set [options] ...` - Set one or more indexing rules. -- `graph indexer rules start [options] ` - Start indexing a subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available subgraphs on the network will be indexed. +- `graph indexer rules start [options] ` - Start indexing a Subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available Subgraphs on the network will be indexed. - `graph indexer rules stop [options] ` - Stop indexing a deployment and set its `decisionBasis` to never, so it will skip this deployment when deciding on deployments to index. @@ -561,9 +561,9 @@ All commands which display rules in the output can choose between the supported #### Indexing rules -Indexing rules can either be applied as global defaults or for specific subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. +Indexing rules can either be applied as global defaults or for specific Subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the Subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. -For example, if the global rule has a `minStake` of **5** (GRT), any subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. +For example, if the global rule has a `minStake` of **5** (GRT), any Subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. Data model: @@ -679,7 +679,7 @@ graph indexer actions execute approve Note that supported action types for allocation management have different input requirements: -- `Allocate` - allocate stake to a specific subgraph deployment +- `Allocate` - allocate stake to a specific Subgraph deployment - required action params: - deploymentID @@ -694,7 +694,7 @@ Note that supported action types for allocation management have different input - poi - force (forces using the provided POI even if it doesn’t match what the graph-node provides) -- `Reallocate` - atomically close allocation and open a fresh allocation for the same subgraph deployment +- `Reallocate` - atomically close allocation and open a fresh allocation for the same Subgraph deployment - required action params: - allocationID @@ -706,7 +706,7 @@ Note that supported action types for allocation management have different input #### Cost models -Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. +Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each Subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. #### Agora @@ -782,7 +782,7 @@ Once an Indexer has staked GRT in the protocol, the [Indexer components](/indexi 6. Call `stake()` to stake GRT in the protocol. -7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. +7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on Subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. 8. (Optional) In order to control the distribution of rewards and strategically attract Delegators Indexers can update their delegation parameters by updating their indexingRewardCut (parts per million), queryFeeCut (parts per million), and cooldownBlocks (number of blocks). To do so call `setDelegationParameters()`. The following example sets the queryFeeCut to distribute 95% of query rebates to the Indexer and 5% to Delegators, set the indexingRewardCutto distribute 60% of indexing rewards to the Indexer and 40% to Delegators, and set `thecooldownBlocks` period to 500 blocks. @@ -810,8 +810,8 @@ To set the delegation parameters using Graph Explorer interface, follow these st After being created by an Indexer a healthy allocation goes through two states. -- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. +- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a Subgraph deployment, which allows them to claim indexing rewards and serve queries for that Subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. - **Closed** - An Indexer is free to close an allocation once 1 epoch has passed ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L335)) or their Indexer agent will automatically close the allocation after the **maxAllocationEpochs** (currently 28 days). When an allocation is closed with a valid proof of indexing (POI) their indexing rewards are distributed to the Indexer and its Delegators ([learn more](/indexing/overview/#how-are-indexing-rewards-distributed)). -Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. +Indexers are recommended to utilize offchain syncing functionality to sync Subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for Subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. From 76528faed10b8685adb882087f7d3814e3562d15 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:09:06 -0500 Subject: [PATCH 0064/1789] New translations overview.mdx (Russian) --- website/src/pages/ru/indexing/overview.mdx | 100 ++++++++++----------- 1 file changed, 50 insertions(+), 50 deletions(-) diff --git a/website/src/pages/ru/indexing/overview.mdx b/website/src/pages/ru/indexing/overview.mdx index a1a21b206718..c9af9343abb4 100644 --- a/website/src/pages/ru/indexing/overview.mdx +++ b/website/src/pages/ru/indexing/overview.mdx @@ -5,9 +5,9 @@ sidebarTitle: Обзор Индексаторы — это операторы нод в сети The Graph, которые стейкают токены Graph (GRT) для предоставления услуг индексирования и обработки запросов. Индексаторы получают оплату за запросы и вознаграждение за свои услуги индексирования. Они также получают комиссию за запросы, которая возвращаются в соответствии с экспоненциальной функцией возврата. -Токены GRT, которые застейканы в протоколе, подлежат периоду "оттаивания" и могут быть срезаны, если индексаторы являются вредоносными и передают неверные данные приложениям или если они некорректно осуществляют индексирование. Индексаторы также получают вознаграждение за делегированный стейк от делегаторов, внося свой вклад в работу сети. +GRT that is staked in the protocol is subject to a thawing period and can be slashed if Indexers are malicious and serve incorrect data to applications or if they index incorrectly. Indexers also earn rewards for delegated stake from Delegators, to contribute to the network. -Индексаторы выбирают подграфы для индексирования на основе сигналов от кураторов, в которых кураторы стейкают токены GRT, чтобы обозначить, какие подграфы являются высококачественными и заслуживают приоритетного внимания. Потребители (к примеру, приложения) также могут задавать параметры, по которым индексаторы обрабатывают запросы к их подграфам, и устанавливать предпочтения по цене за запрос. +Indexers select Subgraphs to index based on the Subgraph’s curation signal, where Curators stake GRT in order to indicate which Subgraphs are high-quality and should be prioritized. Consumers (eg. applications) can also set parameters for which Indexers process queries for their Subgraphs and set preferences for query fee pricing. ## FAQ @@ -19,17 +19,17 @@ The minimum stake for an Indexer is currently set to 100K GRT. **Query fee rebates** - Payments for serving queries on the network. These payments are mediated via state channels between an Indexer and a gateway. Each query request from a gateway contains a payment and the corresponding response a proof of query result validity. -**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing subgraph deployments for the network. +**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing Subgraph deployments for the network. ### How are indexing rewards distributed? -Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** +Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across Subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that Subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** Numerous tools have been created by the community for calculating rewards; you'll find a collection of them organized in the [Community Guides collection](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). You can also find an up to date list of tools in the #Delegators and #Indexers channels on the [Discord server](https://discord.gg/graphprotocol). Here we link a [recommended allocation optimiser](https://github.com/graphprotocol/allocation-optimizer) integrated with the indexer software stack. ### What is a proof of indexing (POI)? -POIs are used in the network to verify that an Indexer is indexing the subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific subgraph deployment up to and including that block. +POIs are used in the network to verify that an Indexer is indexing the Subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific Subgraph deployment up to and including that block. ### When are indexing rewards distributed? @@ -41,7 +41,7 @@ The RewardsManager contract has a read-only [getRewards](https://github.com/grap Many of the community-made dashboards include pending rewards values and they can be easily checked manually by following these steps: -1. Query the [mainnet subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: +1. Query the [mainnet Subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: ```graphql query indexerAllocations { @@ -91,31 +91,31 @@ The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that - **indexingRewardCut** - the % of indexing rewards that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards when an allocation is closed and the Delegators will split the other 5%. -### How do Indexers know which subgraphs to index? +### How do Indexers know which Subgraphs to index? -Indexers may differentiate themselves by applying advanced techniques for making subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate subgraphs in the network: +Indexers may differentiate themselves by applying advanced techniques for making Subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate Subgraphs in the network: -- **Curation signal** - The proportion of network curation signal applied to a particular subgraph is a good indicator of the interest in that subgraph, especially during the bootstrap phase when query voluming is ramping up. +- **Curation signal** - The proportion of network curation signal applied to a particular Subgraph is a good indicator of the interest in that Subgraph, especially during the bootstrap phase when query voluming is ramping up. -- **Query fees collected** - The historical data for volume of query fees collected for a specific subgraph is a good indicator of future demand. +- **Query fees collected** - The historical data for volume of query fees collected for a specific Subgraph is a good indicator of future demand. -- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific subgraphs can allow an Indexer to monitor the supply side for subgraph queries to identify subgraphs that the network is showing confidence in or subgraphs that may show a need for more supply. +- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific Subgraphs can allow an Indexer to monitor the supply side for Subgraph queries to identify Subgraphs that the network is showing confidence in or Subgraphs that may show a need for more supply. -- **Subgraphs with no indexing rewards** - Some subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a subgraph if it is not generating indexing rewards. +- **Subgraphs with no indexing rewards** - Some Subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a Subgraph if it is not generating indexing rewards. ### What are the hardware requirements? -- **Small** - Enough to get started indexing several subgraphs, will likely need to be expanded. +- **Small** - Enough to get started indexing several Subgraphs, will likely need to be expanded. - **Standard** - Default setup, this is what is used in the example k8s/terraform deployment manifests. -- **Medium** - Production Indexer supporting 100 subgraphs and 200-500 requests per second. -- **Large** - Prepared to index all currently used subgraphs and serve requests for the related traffic. +- **Medium** - Production Indexer supporting 100 Subgraphs and 200-500 requests per second. +- **Large** - Prepared to index all currently used Subgraphs and serve requests for the related traffic. -| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | -| --- | :-: | :-: | :-: | :-: | :-: | -| Small | 4 | 8 | 1 | 4 | 16 | -| Standard | 8 | 30 | 1 | 12 | 48 | -| Medium | 16 | 64 | 2 | 32 | 64 | -| Large | 72 | 468 | 3.5 | 48 | 184 | +| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | +| -------- | :------------------: | :---------------------------: | :-------------------------: | :-------------: | :----------------------: | +| Small | 4 | 8 | 1 | 4 | 16 | +| Standard | 8 | 30 | 1 | 12 | 48 | +| Medium | 16 | 64 | 2 | 32 | 64 | +| Large | 72 | 468 | 3.5 | 48 | 184 | ### What are some basic security precautions an Indexer should take? @@ -125,17 +125,17 @@ Indexers may differentiate themselves by applying advanced techniques for making ## Infrastructure -At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. +At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a Subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. -- **PostgreSQL database** - The main store for the Graph Node, this is where subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. +- **PostgreSQL database** - The main store for the Graph Node, this is where Subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. -- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. +- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain Subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. -- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. +- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during Subgraph deployment to fetch the Subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. - **Indexer service** - Handles all required external communications with the network. Shares cost models and indexing statuses, passes query requests from gateways on to a Graph Node, and manages the query payments via state channels with the gateway. -- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing subgraph deployments to its Graph Node/s, and managing allocations. +- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing Subgraph deployments to its Graph Node/s, and managing allocations. - **Prometheus metrics server** - The Graph Node and Indexer components log their metrics to the metrics server. @@ -147,20 +147,20 @@ Note: To support agile scaling, it is recommended that query and indexing concer #### Graph Node -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | -| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | -| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | -| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for Subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for Subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Service -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 7600 | GraphQL HTTP server
(for paid subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | -| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------------------------------------- | ----------------------------------------------------------- | --------------- | ---------------------- | +| 7600 | GraphQL HTTP server
(for paid Subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | +| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Agent @@ -295,7 +295,7 @@ Deploy all resources with `kubectl apply -k $dir`. ### Graph Node -[Graph Node](https://github.com/graphprotocol/graph-node) is an open source Rust implementation that event sources the Ethereum blockchain to deterministically update a data store that can be queried via the GraphQL endpoint. Developers use subgraphs to define their schema, and a set of mappings for transforming the data sourced from the blockchain and the Graph Node handles syncing the entire chain, monitoring for new blocks, and serving it via a GraphQL endpoint. +[Graph Node](https://github.com/graphprotocol/graph-node) is an open source Rust implementation that event sources the Ethereum blockchain to deterministically update a data store that can be queried via the GraphQL endpoint. Developers use Subgraphs to define their schema, and a set of mappings for transforming the data sourced from the blockchain and the Graph Node handles syncing the entire chain, monitoring for new blocks, and serving it via a GraphQL endpoint. #### Getting started from source @@ -365,9 +365,9 @@ docker-compose up To successfully participate in the network requires almost constant monitoring and interaction, so we've built a suite of Typescript applications for facilitating an Indexers network participation. There are three Indexer components: -- **Indexer agent** - The agent monitors the network and the Indexer's own infrastructure and manages which subgraph deployments are indexed and allocated towards onchain and how much is allocated towards each. +- **Indexer agent** - The agent monitors the network and the Indexer's own infrastructure and manages which Subgraph deployments are indexed and allocated towards onchain and how much is allocated towards each. -- **Indexer service** - The only component that needs to be exposed externally, the service passes on subgraph queries to the graph node, manages state channels for query payments, shares important decision making information to clients like the gateways. +- **Indexer service** - The only component that needs to be exposed externally, the service passes on Subgraph queries to the graph node, manages state channels for query payments, shares important decision making information to clients like the gateways. - **Indexer CLI** - The command line interface for managing the Indexer agent. It allows Indexers to manage cost models, manual allocations, actions queue, and indexing rules. @@ -525,7 +525,7 @@ graph indexer status #### Indexer management using Indexer CLI -The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. +The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking Subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. #### Usage @@ -537,7 +537,7 @@ The **Indexer CLI** connects to the Indexer agent, typically through port-forwar - `graph indexer rules set [options] ...` - Set one or more indexing rules. -- `graph indexer rules start [options] ` - Start indexing a subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available subgraphs on the network will be indexed. +- `graph indexer rules start [options] ` - Start indexing a Subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available Subgraphs on the network will be indexed. - `graph indexer rules stop [options] ` - Stop indexing a deployment and set its `decisionBasis` to never, so it will skip this deployment when deciding on deployments to index. @@ -561,9 +561,9 @@ All commands which display rules in the output can choose between the supported #### Indexing rules -Indexing rules can either be applied as global defaults or for specific subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. +Indexing rules can either be applied as global defaults or for specific Subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the Subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. -For example, if the global rule has a `minStake` of **5** (GRT), any subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. +For example, if the global rule has a `minStake` of **5** (GRT), any Subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. Data model: @@ -679,7 +679,7 @@ graph indexer actions execute approve Note that supported action types for allocation management have different input requirements: -- `Allocate` - allocate stake to a specific subgraph deployment +- `Allocate` - allocate stake to a specific Subgraph deployment - required action params: - deploymentID @@ -694,7 +694,7 @@ Note that supported action types for allocation management have different input - poi - force (forces using the provided POI even if it doesn’t match what the graph-node provides) -- `Reallocate` - atomically close allocation and open a fresh allocation for the same subgraph deployment +- `Reallocate` - atomically close allocation and open a fresh allocation for the same Subgraph deployment - required action params: - allocationID @@ -706,7 +706,7 @@ Note that supported action types for allocation management have different input #### Cost models -Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. +Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each Subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. #### Agora @@ -782,7 +782,7 @@ Once an Indexer has staked GRT in the protocol, the [Indexer components](/indexi 6. Call `stake()` to stake GRT in the protocol. -7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. +7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on Subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. 8. (Optional) In order to control the distribution of rewards and strategically attract Delegators Indexers can update their delegation parameters by updating their indexingRewardCut (parts per million), queryFeeCut (parts per million), and cooldownBlocks (number of blocks). To do so call `setDelegationParameters()`. The following example sets the queryFeeCut to distribute 95% of query rebates to the Indexer and 5% to Delegators, set the indexingRewardCutto distribute 60% of indexing rewards to the Indexer and 40% to Delegators, and set `thecooldownBlocks` period to 500 blocks. @@ -810,8 +810,8 @@ To set the delegation parameters using Graph Explorer interface, follow these st After being created by an Indexer a healthy allocation goes through two states. -- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. +- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a Subgraph deployment, which allows them to claim indexing rewards and serve queries for that Subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. - **Closed** - An Indexer is free to close an allocation once 1 epoch has passed ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L335)) or their Indexer agent will automatically close the allocation after the **maxAllocationEpochs** (currently 28 days). When an allocation is closed with a valid proof of indexing (POI) their indexing rewards are distributed to the Indexer and its Delegators ([learn more](/indexing/overview/#how-are-indexing-rewards-distributed)). -Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. +Indexers are recommended to utilize offchain syncing functionality to sync Subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for Subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. From 005b02b60e4d098cd423c8886286947728cb008e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:09:08 -0500 Subject: [PATCH 0065/1789] New translations overview.mdx (Swedish) --- website/src/pages/sv/indexing/overview.mdx | 98 +++++++++++----------- 1 file changed, 49 insertions(+), 49 deletions(-) diff --git a/website/src/pages/sv/indexing/overview.mdx b/website/src/pages/sv/indexing/overview.mdx index 26ecf1330d60..1224e41ed518 100644 --- a/website/src/pages/sv/indexing/overview.mdx +++ b/website/src/pages/sv/indexing/overview.mdx @@ -7,7 +7,7 @@ Indexerare är nodoperatörer i The Graph Network som satsar Graph Tokens (GRT) GRT som satsas i protokollet är föremål för en tiningperiod och kan drabbas av strykning om indexerare är skadliga och tillhandahåller felaktiga data till applikationer eller om de indexerar felaktigt. Indexerare tjänar också belöningar för delegerat satsning från Delegater, för att bidra till nätverket. -Indexerare väljer subgrafer att indexera baserat på subgrafens kuratersignal, där Curators satsar GRT för att ange vilka subgrafer som är av hög kvalitet och bör prioriteras. Konsumenter (t.ex. applikationer) kan också ställa in parametrar för vilka indexerare som behandlar frågor för deras subgrafer och ange preferenser för pris på frågebetalning. +Indexers select Subgraphs to index based on the Subgraph’s curation signal, where Curators stake GRT in order to indicate which Subgraphs are high-quality and should be prioritized. Consumers (eg. applications) can also set parameters for which Indexers process queries for their Subgraphs and set preferences for query fee pricing. ## FAQ @@ -19,17 +19,17 @@ The minimum stake for an Indexer is currently set to 100K GRT. **Query fee rebates** - Payments for serving queries on the network. These payments are mediated via state channels between an Indexer and a gateway. Each query request from a gateway contains a payment and the corresponding response a proof of query result validity. -**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing subgraph deployments for the network. +**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing Subgraph deployments for the network. ### How are indexing rewards distributed? -Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** +Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across Subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that Subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** Numerous tools have been created by the community for calculating rewards; you'll find a collection of them organized in the [Community Guides collection](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). You can also find an up to date list of tools in the #Delegators and #Indexers channels on the [Discord server](https://discord.gg/graphprotocol). Here we link a [recommended allocation optimiser](https://github.com/graphprotocol/allocation-optimizer) integrated with the indexer software stack. ### What is a proof of indexing (POI)? -POIs are used in the network to verify that an Indexer is indexing the subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific subgraph deployment up to and including that block. +POIs are used in the network to verify that an Indexer is indexing the Subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific Subgraph deployment up to and including that block. ### When are indexing rewards distributed? @@ -41,7 +41,7 @@ The RewardsManager contract has a read-only [getRewards](https://github.com/grap Many of the community-made dashboards include pending rewards values and they can be easily checked manually by following these steps: -1. Query the [mainnet subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: +1. Query the [mainnet Subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: ```graphql query indexerAllocations { @@ -91,31 +91,31 @@ The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that - **indexingRewardCut** - the % of indexing rewards that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards when an allocation is closed and the Delegators will split the other 5%. -### How do Indexers know which subgraphs to index? +### How do Indexers know which Subgraphs to index? -Indexers may differentiate themselves by applying advanced techniques for making subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate subgraphs in the network: +Indexers may differentiate themselves by applying advanced techniques for making Subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate Subgraphs in the network: -- **Curation signal** - The proportion of network curation signal applied to a particular subgraph is a good indicator of the interest in that subgraph, especially during the bootstrap phase when query voluming is ramping up. +- **Curation signal** - The proportion of network curation signal applied to a particular Subgraph is a good indicator of the interest in that Subgraph, especially during the bootstrap phase when query voluming is ramping up. -- **Query fees collected** - The historical data for volume of query fees collected for a specific subgraph is a good indicator of future demand. +- **Query fees collected** - The historical data for volume of query fees collected for a specific Subgraph is a good indicator of future demand. -- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific subgraphs can allow an Indexer to monitor the supply side for subgraph queries to identify subgraphs that the network is showing confidence in or subgraphs that may show a need for more supply. +- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific Subgraphs can allow an Indexer to monitor the supply side for Subgraph queries to identify Subgraphs that the network is showing confidence in or Subgraphs that may show a need for more supply. -- **Subgraphs with no indexing rewards** - Some subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a subgraph if it is not generating indexing rewards. +- **Subgraphs with no indexing rewards** - Some Subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a Subgraph if it is not generating indexing rewards. ### What are the hardware requirements? -- **Small** - Enough to get started indexing several subgraphs, will likely need to be expanded. +- **Small** - Enough to get started indexing several Subgraphs, will likely need to be expanded. - **Standard** - Default setup, this is what is used in the example k8s/terraform deployment manifests. -- **Medium** - Production Indexer supporting 100 subgraphs and 200-500 requests per second. -- **Large** - Prepared to index all currently used subgraphs and serve requests for the related traffic. +- **Medium** - Production Indexer supporting 100 Subgraphs and 200-500 requests per second. +- **Large** - Prepared to index all currently used Subgraphs and serve requests for the related traffic. -| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | -| --- | :-: | :-: | :-: | :-: | :-: | -| Small | 4 | 8 | 1 | 4 | 16 | -| Standard | 8 | 30 | 1 | 12 | 48 | -| Medium | 16 | 64 | 2 | 32 | 64 | -| Large | 72 | 468 | 3.5 | 48 | 184 | +| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | +| -------- | :------------------: | :---------------------------: | :-------------------------: | :-------------: | :----------------------: | +| Small | 4 | 8 | 1 | 4 | 16 | +| Standard | 8 | 30 | 1 | 12 | 48 | +| Medium | 16 | 64 | 2 | 32 | 64 | +| Large | 72 | 468 | 3.5 | 48 | 184 | ### What are some basic security precautions an Indexer should take? @@ -125,17 +125,17 @@ Indexers may differentiate themselves by applying advanced techniques for making ## Infrastructure -At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. +At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a Subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. -- **PostgreSQL database** - The main store for the Graph Node, this is where subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. +- **PostgreSQL database** - The main store for the Graph Node, this is where Subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. -- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. +- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain Subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. -- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. +- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during Subgraph deployment to fetch the Subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. - **Indexer service** - Handles all required external communications with the network. Shares cost models and indexing statuses, passes query requests from gateways on to a Graph Node, and manages the query payments via state channels with the gateway. -- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing subgraph deployments to its Graph Node/s, and managing allocations. +- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing Subgraph deployments to its Graph Node/s, and managing allocations. - **Prometheus metrics server** - The Graph Node and Indexer components log their metrics to the metrics server. @@ -147,20 +147,20 @@ Note: To support agile scaling, it is recommended that query and indexing concer #### Graf Node -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | -| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | -| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | -| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for Subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for Subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Service -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 7600 | GraphQL HTTP server
(for paid subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | -| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------------------------------------- | ----------------------------------------------------------- | --------------- | ---------------------- | +| 7600 | GraphQL HTTP server
(for paid Subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | +| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Agent @@ -295,7 +295,7 @@ Deploy all resources with `kubectl apply -k $dir`. ### Graf Node -[Graph Node](https://github.com/graphprotocol/graph-node) is an open source Rust implementation that event sources the Ethereum blockchain to deterministically update a data store that can be queried via the GraphQL endpoint. Developers use subgraphs to define their schema, and a set of mappings for transforming the data sourced from the blockchain and the Graph Node handles syncing the entire chain, monitoring for new blocks, and serving it via a GraphQL endpoint. +[Graph Node](https://github.com/graphprotocol/graph-node) is an open source Rust implementation that event sources the Ethereum blockchain to deterministically update a data store that can be queried via the GraphQL endpoint. Developers use Subgraphs to define their schema, and a set of mappings for transforming the data sourced from the blockchain and the Graph Node handles syncing the entire chain, monitoring for new blocks, and serving it via a GraphQL endpoint. #### Getting started from source @@ -365,9 +365,9 @@ docker-compose up To successfully participate in the network requires almost constant monitoring and interaction, so we've built a suite of Typescript applications for facilitating an Indexers network participation. There are three Indexer components: -- **Indexer agent** - The agent monitors the network and the Indexer's own infrastructure and manages which subgraph deployments are indexed and allocated towards onchain and how much is allocated towards each. +- **Indexer agent** - The agent monitors the network and the Indexer's own infrastructure and manages which Subgraph deployments are indexed and allocated towards onchain and how much is allocated towards each. -- **Indexer service** - The only component that needs to be exposed externally, the service passes on subgraph queries to the graph node, manages state channels for query payments, shares important decision making information to clients like the gateways. +- **Indexer service** - The only component that needs to be exposed externally, the service passes on Subgraph queries to the graph node, manages state channels for query payments, shares important decision making information to clients like the gateways. - **Indexer CLI** - The command line interface for managing the Indexer agent. It allows Indexers to manage cost models, manual allocations, actions queue, and indexing rules. @@ -525,7 +525,7 @@ graph indexer status #### Indexer management using Indexer CLI -The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. +The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking Subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. #### Usage @@ -537,7 +537,7 @@ The **Indexer CLI** connects to the Indexer agent, typically through port-forwar - `graph indexer rules set [options] ...` - Set one or more indexing rules. -- `graph indexer rules start [options] ` - Start indexing a subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available subgraphs on the network will be indexed. +- `graph indexer rules start [options] ` - Start indexing a Subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available Subgraphs on the network will be indexed. - `graph indexer rules stop [options] ` - Stop indexing a deployment and set its `decisionBasis` to never, so it will skip this deployment when deciding on deployments to index. @@ -561,9 +561,9 @@ All commands which display rules in the output can choose between the supported #### Indexing rules -Indexing rules can either be applied as global defaults or for specific subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. +Indexing rules can either be applied as global defaults or for specific Subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the Subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. -For example, if the global rule has a `minStake` of **5** (GRT), any subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. +For example, if the global rule has a `minStake` of **5** (GRT), any Subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. Data model: @@ -679,7 +679,7 @@ graph indexer actions execute approve Note that supported action types for allocation management have different input requirements: -- `Allocate` - allocate stake to a specific subgraph deployment +- `Allocate` - allocate stake to a specific Subgraph deployment - required action params: - deploymentID @@ -694,7 +694,7 @@ Note that supported action types for allocation management have different input - poi - force (forces using the provided POI even if it doesn’t match what the graph-node provides) -- `Reallocate` - atomically close allocation and open a fresh allocation for the same subgraph deployment +- `Reallocate` - atomically close allocation and open a fresh allocation for the same Subgraph deployment - required action params: - allocationID @@ -706,7 +706,7 @@ Note that supported action types for allocation management have different input #### Cost models -Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. +Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each Subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. #### Agora @@ -782,7 +782,7 @@ Once an Indexer has staked GRT in the protocol, the [Indexer components](/indexi 6. Call `stake()` to stake GRT in the protocol. -7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. +7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on Subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. 8. (Optional) In order to control the distribution of rewards and strategically attract Delegators Indexers can update their delegation parameters by updating their indexingRewardCut (parts per million), queryFeeCut (parts per million), and cooldownBlocks (number of blocks). To do so call `setDelegationParameters()`. The following example sets the queryFeeCut to distribute 95% of query rebates to the Indexer and 5% to Delegators, set the indexingRewardCutto distribute 60% of indexing rewards to the Indexer and 40% to Delegators, and set `thecooldownBlocks` period to 500 blocks. @@ -810,8 +810,8 @@ To set the delegation parameters using Graph Explorer interface, follow these st After being created by an Indexer a healthy allocation goes through two states. -- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. +- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a Subgraph deployment, which allows them to claim indexing rewards and serve queries for that Subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. - **Closed** - An Indexer is free to close an allocation once 1 epoch has passed ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L335)) or their Indexer agent will automatically close the allocation after the **maxAllocationEpochs** (currently 28 days). When an allocation is closed with a valid proof of indexing (POI) their indexing rewards are distributed to the Indexer and its Delegators ([learn more](/indexing/overview/#how-are-indexing-rewards-distributed)). -Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. +Indexers are recommended to utilize offchain syncing functionality to sync Subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for Subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. From a4263ac1bc9751f2d72aaa200d00d6f9bf7adada Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:09:09 -0500 Subject: [PATCH 0066/1789] New translations overview.mdx (Turkish) --- website/src/pages/tr/indexing/overview.mdx | 98 +++++++++++----------- 1 file changed, 49 insertions(+), 49 deletions(-) diff --git a/website/src/pages/tr/indexing/overview.mdx b/website/src/pages/tr/indexing/overview.mdx index 0de4a3fcb961..e64ec2f6effc 100644 --- a/website/src/pages/tr/indexing/overview.mdx +++ b/website/src/pages/tr/indexing/overview.mdx @@ -7,7 +7,7 @@ sidebarTitle: Genel Bakış GRT that is staked in the protocol is subject to a thawing period and can be slashed if Indexers are malicious and serve incorrect data to applications or if they index incorrectly. Indexers also earn rewards for delegated stake from Delegators, to contribute to the network. -Indexers select subgraphs to index based on the subgraph’s curation signal, where Curators stake GRT in order to indicate which subgraphs are high-quality and should be prioritized. Consumers (eg. applications) can also set parameters for which Indexers process queries for their subgraphs and set preferences for query fee pricing. +Indexers select Subgraphs to index based on the Subgraph’s curation signal, where Curators stake GRT in order to indicate which Subgraphs are high-quality and should be prioritized. Consumers (eg. applications) can also set parameters for which Indexers process queries for their Subgraphs and set preferences for query fee pricing. ## FAQ @@ -19,17 +19,17 @@ The minimum stake for an Indexer is currently set to 100K GRT. **Query fee rebates** - Payments for serving queries on the network. These payments are mediated via state channels between an Indexer and a gateway. Each query request from a gateway contains a payment and the corresponding response a proof of query result validity. -**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing subgraph deployments for the network. +**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing Subgraph deployments for the network. ### How are indexing rewards distributed? -Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** +Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across Subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that Subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** Numerous tools have been created by the community for calculating rewards; you'll find a collection of them organized in the [Community Guides collection](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). You can also find an up to date list of tools in the #Delegators and #Indexers channels on the [Discord server](https://discord.gg/graphprotocol). Here we link a [recommended allocation optimiser](https://github.com/graphprotocol/allocation-optimizer) integrated with the indexer software stack. ### What is a proof of indexing (POI)? -POIs are used in the network to verify that an Indexer is indexing the subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific subgraph deployment up to and including that block. +POIs are used in the network to verify that an Indexer is indexing the Subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific Subgraph deployment up to and including that block. ### When are indexing rewards distributed? @@ -41,7 +41,7 @@ The RewardsManager contract has a read-only [getRewards](https://github.com/grap Many of the community-made dashboards include pending rewards values and they can be easily checked manually by following these steps: -1. Query the [mainnet subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: +1. Query the [mainnet Subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: ```graphql query indexerAllocations { @@ -91,31 +91,31 @@ The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that - **indexingRewardCut** - the % of indexing rewards that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards when an allocation is closed and the Delegators will split the other 5%. -### How do Indexers know which subgraphs to index? +### How do Indexers know which Subgraphs to index? -Indexers may differentiate themselves by applying advanced techniques for making subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate subgraphs in the network: +Indexers may differentiate themselves by applying advanced techniques for making Subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate Subgraphs in the network: -- **Curation signal** - The proportion of network curation signal applied to a particular subgraph is a good indicator of the interest in that subgraph, especially during the bootstrap phase when query voluming is ramping up. +- **Curation signal** - The proportion of network curation signal applied to a particular Subgraph is a good indicator of the interest in that Subgraph, especially during the bootstrap phase when query voluming is ramping up. -- **Query fees collected** - The historical data for volume of query fees collected for a specific subgraph is a good indicator of future demand. +- **Query fees collected** - The historical data for volume of query fees collected for a specific Subgraph is a good indicator of future demand. -- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific subgraphs can allow an Indexer to monitor the supply side for subgraph queries to identify subgraphs that the network is showing confidence in or subgraphs that may show a need for more supply. +- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific Subgraphs can allow an Indexer to monitor the supply side for Subgraph queries to identify Subgraphs that the network is showing confidence in or Subgraphs that may show a need for more supply. -- **Subgraphs with no indexing rewards** - Some subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a subgraph if it is not generating indexing rewards. +- **Subgraphs with no indexing rewards** - Some Subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a Subgraph if it is not generating indexing rewards. ### What are the hardware requirements? -- **Small** - Enough to get started indexing several subgraphs, will likely need to be expanded. +- **Small** - Enough to get started indexing several Subgraphs, will likely need to be expanded. - **Standard** - Default setup, this is what is used in the example k8s/terraform deployment manifests. -- **Medium** - Production Indexer supporting 100 subgraphs and 200-500 requests per second. -- **Large** - Prepared to index all currently used subgraphs and serve requests for the related traffic. +- **Medium** - Production Indexer supporting 100 Subgraphs and 200-500 requests per second. +- **Large** - Prepared to index all currently used Subgraphs and serve requests for the related traffic. -| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | -| --- | :-: | :-: | :-: | :-: | :-: | -| Small | 4 | 8 | 1 | 4 | 16 | -| Standard | 8 | 30 | 1 | 12 | 48 | -| Medium | 16 | 64 | 2 | 32 | 64 | -| Large | 72 | 468 | 3.5 | 48 | 184 | +| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | +| -------- | :------------------: | :---------------------------: | :-------------------------: | :-------------: | :----------------------: | +| Small | 4 | 8 | 1 | 4 | 16 | +| Standard | 8 | 30 | 1 | 12 | 48 | +| Medium | 16 | 64 | 2 | 32 | 64 | +| Large | 72 | 468 | 3.5 | 48 | 184 | ### What are some basic security precautions an Indexer should take? @@ -125,17 +125,17 @@ Indexers may differentiate themselves by applying advanced techniques for making ## Infrastructure -At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. +At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a Subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. -- **PostgreSQL database** - The main store for the Graph Node, this is where subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. +- **PostgreSQL database** - The main store for the Graph Node, this is where Subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. -- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. +- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain Subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. -- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. +- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during Subgraph deployment to fetch the Subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. - **Indexer service** - Handles all required external communications with the network. Shares cost models and indexing statuses, passes query requests from gateways on to a Graph Node, and manages the query payments via state channels with the gateway. -- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing subgraph deployments to its Graph Node/s, and managing allocations. +- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing Subgraph deployments to its Graph Node/s, and managing allocations. - **Prometheus metrics server** - The Graph Node and Indexer components log their metrics to the metrics server. @@ -147,20 +147,20 @@ Note: To support agile scaling, it is recommended that query and indexing concer #### Graph Node -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | -| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | -| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | -| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for Subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for Subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Service -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 7600 | GraphQL HTTP server
(for paid subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | -| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------------------------------------- | ----------------------------------------------------------- | --------------- | ---------------------- | +| 7600 | GraphQL HTTP server
(for paid Subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | +| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Agent @@ -295,7 +295,7 @@ Deploy all resources with `kubectl apply -k $dir`. ### Graph Node -[Graph Node](https://github.com/graphprotocol/graph-node) is an open source Rust implementation that event sources the Ethereum blockchain to deterministically update a data store that can be queried via the GraphQL endpoint. Developers use subgraphs to define their schema, and a set of mappings for transforming the data sourced from the blockchain and the Graph Node handles syncing the entire chain, monitoring for new blocks, and serving it via a GraphQL endpoint. +[Graph Node](https://github.com/graphprotocol/graph-node) is an open source Rust implementation that event sources the Ethereum blockchain to deterministically update a data store that can be queried via the GraphQL endpoint. Developers use Subgraphs to define their schema, and a set of mappings for transforming the data sourced from the blockchain and the Graph Node handles syncing the entire chain, monitoring for new blocks, and serving it via a GraphQL endpoint. #### Getting started from source @@ -365,9 +365,9 @@ docker-compose up To successfully participate in the network requires almost constant monitoring and interaction, so we've built a suite of Typescript applications for facilitating an Indexers network participation. There are three Indexer components: -- **Indexer agent** - The agent monitors the network and the Indexer's own infrastructure and manages which subgraph deployments are indexed and allocated towards onchain and how much is allocated towards each. +- **Indexer agent** - The agent monitors the network and the Indexer's own infrastructure and manages which Subgraph deployments are indexed and allocated towards onchain and how much is allocated towards each. -- **Indexer service** - The only component that needs to be exposed externally, the service passes on subgraph queries to the graph node, manages state channels for query payments, shares important decision making information to clients like the gateways. +- **Indexer service** - The only component that needs to be exposed externally, the service passes on Subgraph queries to the graph node, manages state channels for query payments, shares important decision making information to clients like the gateways. - **Indexer CLI** - The command line interface for managing the Indexer agent. It allows Indexers to manage cost models, manual allocations, actions queue, and indexing rules. @@ -525,7 +525,7 @@ graph indexer status #### Indexer management using Indexer CLI -The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. +The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking Subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. #### Usage @@ -537,7 +537,7 @@ The **Indexer CLI** connects to the Indexer agent, typically through port-forwar - `graph indexer rules set [options] ...` - Set one or more indexing rules. -- `graph indexer rules start [options] ` - Start indexing a subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available subgraphs on the network will be indexed. +- `graph indexer rules start [options] ` - Start indexing a Subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available Subgraphs on the network will be indexed. - `graph indexer rules stop [options] ` - Stop indexing a deployment and set its `decisionBasis` to never, so it will skip this deployment when deciding on deployments to index. @@ -561,9 +561,9 @@ All commands which display rules in the output can choose between the supported #### Indexing rules -Indexing rules can either be applied as global defaults or for specific subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. +Indexing rules can either be applied as global defaults or for specific Subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the Subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. -For example, if the global rule has a `minStake` of **5** (GRT), any subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. +For example, if the global rule has a `minStake` of **5** (GRT), any Subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. Data model: @@ -679,7 +679,7 @@ graph indexer actions execute approve Note that supported action types for allocation management have different input requirements: -- `Allocate` - allocate stake to a specific subgraph deployment +- `Allocate` - allocate stake to a specific Subgraph deployment - required action params: - deploymentID @@ -694,7 +694,7 @@ Note that supported action types for allocation management have different input - poi - force (forces using the provided POI even if it doesn’t match what the graph-node provides) -- `Reallocate` - atomically close allocation and open a fresh allocation for the same subgraph deployment +- `Reallocate` - atomically close allocation and open a fresh allocation for the same Subgraph deployment - required action params: - allocationID @@ -706,7 +706,7 @@ Note that supported action types for allocation management have different input #### Cost models -Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. +Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each Subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. #### Agora @@ -782,7 +782,7 @@ Once an Indexer has staked GRT in the protocol, the [Indexer components](/indexi 6. Call `stake()` to stake GRT in the protocol. -7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. +7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on Subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. 8. (Optional) In order to control the distribution of rewards and strategically attract Delegators Indexers can update their delegation parameters by updating their indexingRewardCut (parts per million), queryFeeCut (parts per million), and cooldownBlocks (number of blocks). To do so call `setDelegationParameters()`. The following example sets the queryFeeCut to distribute 95% of query rebates to the Indexer and 5% to Delegators, set the indexingRewardCutto distribute 60% of indexing rewards to the Indexer and 40% to Delegators, and set `thecooldownBlocks` period to 500 blocks. @@ -810,8 +810,8 @@ To set the delegation parameters using Graph Explorer interface, follow these st After being created by an Indexer a healthy allocation goes through two states. -- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. +- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a Subgraph deployment, which allows them to claim indexing rewards and serve queries for that Subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. - **Closed** - An Indexer is free to close an allocation once 1 epoch has passed ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L335)) or their Indexer agent will automatically close the allocation after the **maxAllocationEpochs** (currently 28 days). When an allocation is closed with a valid proof of indexing (POI) their indexing rewards are distributed to the Indexer and its Delegators ([learn more](/indexing/overview/#how-are-indexing-rewards-distributed)). -Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. +Indexers are recommended to utilize offchain syncing functionality to sync Subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for Subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. From 3629f13b0e78d3934be6ea75591a70e3c28d8e9d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:09:10 -0500 Subject: [PATCH 0067/1789] New translations overview.mdx (Ukrainian) --- website/src/pages/uk/indexing/overview.mdx | 98 +++++++++++----------- 1 file changed, 49 insertions(+), 49 deletions(-) diff --git a/website/src/pages/uk/indexing/overview.mdx b/website/src/pages/uk/indexing/overview.mdx index 9bb1d1febb33..7da34e6558ea 100644 --- a/website/src/pages/uk/indexing/overview.mdx +++ b/website/src/pages/uk/indexing/overview.mdx @@ -7,7 +7,7 @@ sidebarTitle: Overview GRT, які застейкані в протоколі, підлягають періоду "розблокування" і можуть бути порізані (slashing), якщо індексатори є шкідливими та надають некоректні дані додаткам або якщо вони неправильно індексують. Індексатори також отримують винагороду за стейк, який вони отримують від делегатів, щоб зробити свій внесок у розвиток мережі. -Індексатори вибирають підграфи для індексування на основі сигналу від кураторів, де куратори стейкають GRT, щоб вказати, які підграфи є якісними та мають бути пріоритетними. Споживачі (наприклад, додатки) також можуть задавати параметри, за якими індексатори обробляють запити до їхніх підграфів, і встановлювати налаштування щодо оплати за запити. +Indexers select Subgraphs to index based on the Subgraph’s curation signal, where Curators stake GRT in order to indicate which Subgraphs are high-quality and should be prioritized. Consumers (eg. applications) can also set parameters for which Indexers process queries for their Subgraphs and set preferences for query fee pricing. ## FAQ @@ -19,17 +19,17 @@ The minimum stake for an Indexer is currently set to 100K GRT. **Query fee rebates** - Payments for serving queries on the network. These payments are mediated via state channels between an Indexer and a gateway. Each query request from a gateway contains a payment and the corresponding response a proof of query result validity. -**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing subgraph deployments for the network. +**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing Subgraph deployments for the network. ### How are indexing rewards distributed? -Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** +Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across Subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that Subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** Numerous tools have been created by the community for calculating rewards; you'll find a collection of them organized in the [Community Guides collection](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). You can also find an up to date list of tools in the #Delegators and #Indexers channels on the [Discord server](https://discord.gg/graphprotocol). Here we link a [recommended allocation optimiser](https://github.com/graphprotocol/allocation-optimizer) integrated with the indexer software stack. ### What is a proof of indexing (POI)? -POIs are used in the network to verify that an Indexer is indexing the subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific subgraph deployment up to and including that block. +POIs are used in the network to verify that an Indexer is indexing the Subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific Subgraph deployment up to and including that block. ### When are indexing rewards distributed? @@ -41,7 +41,7 @@ The RewardsManager contract has a read-only [getRewards](https://github.com/grap Many of the community-made dashboards include pending rewards values and they can be easily checked manually by following these steps: -1. Query the [mainnet subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: +1. Query the [mainnet Subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: ```graphql query indexerAllocations { @@ -91,31 +91,31 @@ The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that - **indexingRewardCut** - the % of indexing rewards that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards when an allocation is closed and the Delegators will split the other 5%. -### How do Indexers know which subgraphs to index? +### How do Indexers know which Subgraphs to index? -Indexers may differentiate themselves by applying advanced techniques for making subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate subgraphs in the network: +Indexers may differentiate themselves by applying advanced techniques for making Subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate Subgraphs in the network: -- **Curation signal** - The proportion of network curation signal applied to a particular subgraph is a good indicator of the interest in that subgraph, especially during the bootstrap phase when query voluming is ramping up. +- **Curation signal** - The proportion of network curation signal applied to a particular Subgraph is a good indicator of the interest in that Subgraph, especially during the bootstrap phase when query voluming is ramping up. -- **Query fees collected** - The historical data for volume of query fees collected for a specific subgraph is a good indicator of future demand. +- **Query fees collected** - The historical data for volume of query fees collected for a specific Subgraph is a good indicator of future demand. -- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific subgraphs can allow an Indexer to monitor the supply side for subgraph queries to identify subgraphs that the network is showing confidence in or subgraphs that may show a need for more supply. +- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific Subgraphs can allow an Indexer to monitor the supply side for Subgraph queries to identify Subgraphs that the network is showing confidence in or Subgraphs that may show a need for more supply. -- **Subgraphs with no indexing rewards** - Some subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a subgraph if it is not generating indexing rewards. +- **Subgraphs with no indexing rewards** - Some Subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a Subgraph if it is not generating indexing rewards. ### What are the hardware requirements? -- **Small** - Enough to get started indexing several subgraphs, will likely need to be expanded. +- **Small** - Enough to get started indexing several Subgraphs, will likely need to be expanded. - **Standard** - Default setup, this is what is used in the example k8s/terraform deployment manifests. -- **Medium** - Production Indexer supporting 100 subgraphs and 200-500 requests per second. -- **Large** - Prepared to index all currently used subgraphs and serve requests for the related traffic. +- **Medium** - Production Indexer supporting 100 Subgraphs and 200-500 requests per second. +- **Large** - Prepared to index all currently used Subgraphs and serve requests for the related traffic. -| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | -| --- | :-: | :-: | :-: | :-: | :-: | -| Small | 4 | 8 | 1 | 4 | 16 | -| Standard | 8 | 30 | 1 | 12 | 48 | -| Medium | 16 | 64 | 2 | 32 | 64 | -| Large | 72 | 468 | 3.5 | 48 | 184 | +| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | +| -------- | :------------------: | :---------------------------: | :-------------------------: | :-------------: | :----------------------: | +| Small | 4 | 8 | 1 | 4 | 16 | +| Standard | 8 | 30 | 1 | 12 | 48 | +| Medium | 16 | 64 | 2 | 32 | 64 | +| Large | 72 | 468 | 3.5 | 48 | 184 | ### What are some basic security precautions an Indexer should take? @@ -125,17 +125,17 @@ Indexers may differentiate themselves by applying advanced techniques for making ## Infrastructure -At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. +At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a Subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. -- **PostgreSQL database** - The main store for the Graph Node, this is where subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. +- **PostgreSQL database** - The main store for the Graph Node, this is where Subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. -- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. +- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain Subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. -- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. +- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during Subgraph deployment to fetch the Subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. - **Indexer service** - Handles all required external communications with the network. Shares cost models and indexing statuses, passes query requests from gateways on to a Graph Node, and manages the query payments via state channels with the gateway. -- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing subgraph deployments to its Graph Node/s, and managing allocations. +- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing Subgraph deployments to its Graph Node/s, and managing allocations. - **Prometheus metrics server** - The Graph Node and Indexer components log their metrics to the metrics server. @@ -147,20 +147,20 @@ Note: To support agile scaling, it is recommended that query and indexing concer #### Graph Node -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | -| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | -| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | -| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for Subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for Subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Service -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 7600 | GraphQL HTTP server
(for paid subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | -| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------------------------------------- | ----------------------------------------------------------- | --------------- | ---------------------- | +| 7600 | GraphQL HTTP server
(for paid Subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | +| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Agent @@ -295,7 +295,7 @@ Deploy all resources with `kubectl apply -k $dir`. ### Graph Node -[Graph Node](https://github.com/graphprotocol/graph-node) is an open source Rust implementation that event sources the Ethereum blockchain to deterministically update a data store that can be queried via the GraphQL endpoint. Developers use subgraphs to define their schema, and a set of mappings for transforming the data sourced from the blockchain and the Graph Node handles syncing the entire chain, monitoring for new blocks, and serving it via a GraphQL endpoint. +[Graph Node](https://github.com/graphprotocol/graph-node) is an open source Rust implementation that event sources the Ethereum blockchain to deterministically update a data store that can be queried via the GraphQL endpoint. Developers use Subgraphs to define their schema, and a set of mappings for transforming the data sourced from the blockchain and the Graph Node handles syncing the entire chain, monitoring for new blocks, and serving it via a GraphQL endpoint. #### Getting started from source @@ -365,9 +365,9 @@ docker-compose up To successfully participate in the network requires almost constant monitoring and interaction, so we've built a suite of Typescript applications for facilitating an Indexers network participation. There are three Indexer components: -- **Indexer agent** - The agent monitors the network and the Indexer's own infrastructure and manages which subgraph deployments are indexed and allocated towards onchain and how much is allocated towards each. +- **Indexer agent** - The agent monitors the network and the Indexer's own infrastructure and manages which Subgraph deployments are indexed and allocated towards onchain and how much is allocated towards each. -- **Indexer service** - The only component that needs to be exposed externally, the service passes on subgraph queries to the graph node, manages state channels for query payments, shares important decision making information to clients like the gateways. +- **Indexer service** - The only component that needs to be exposed externally, the service passes on Subgraph queries to the graph node, manages state channels for query payments, shares important decision making information to clients like the gateways. - **Indexer CLI** - The command line interface for managing the Indexer agent. It allows Indexers to manage cost models, manual allocations, actions queue, and indexing rules. @@ -525,7 +525,7 @@ graph indexer status #### Indexer management using Indexer CLI -The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. +The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking Subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. #### Usage @@ -537,7 +537,7 @@ The **Indexer CLI** connects to the Indexer agent, typically through port-forwar - `graph indexer rules set [options] ...` - Set one or more indexing rules. -- `graph indexer rules start [options] ` - Start indexing a subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available subgraphs on the network will be indexed. +- `graph indexer rules start [options] ` - Start indexing a Subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available Subgraphs on the network will be indexed. - `graph indexer rules stop [options] ` - Stop indexing a deployment and set its `decisionBasis` to never, so it will skip this deployment when deciding on deployments to index. @@ -561,9 +561,9 @@ All commands which display rules in the output can choose between the supported #### Indexing rules -Indexing rules can either be applied as global defaults or for specific subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. +Indexing rules can either be applied as global defaults or for specific Subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the Subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. -For example, if the global rule has a `minStake` of **5** (GRT), any subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. +For example, if the global rule has a `minStake` of **5** (GRT), any Subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. Data model: @@ -679,7 +679,7 @@ graph indexer actions execute approve Note that supported action types for allocation management have different input requirements: -- `Allocate` - allocate stake to a specific subgraph deployment +- `Allocate` - allocate stake to a specific Subgraph deployment - required action params: - deploymentID @@ -694,7 +694,7 @@ Note that supported action types for allocation management have different input - poi - force (forces using the provided POI even if it doesn’t match what the graph-node provides) -- `Reallocate` - atomically close allocation and open a fresh allocation for the same subgraph deployment +- `Reallocate` - atomically close allocation and open a fresh allocation for the same Subgraph deployment - required action params: - allocationID @@ -706,7 +706,7 @@ Note that supported action types for allocation management have different input #### Cost models -Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. +Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each Subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. #### Agora @@ -782,7 +782,7 @@ Once an Indexer has staked GRT in the protocol, the [Indexer components](/indexi 6. Call `stake()` to stake GRT in the protocol. -7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. +7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on Subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. 8. (Optional) In order to control the distribution of rewards and strategically attract Delegators Indexers can update their delegation parameters by updating their indexingRewardCut (parts per million), queryFeeCut (parts per million), and cooldownBlocks (number of blocks). To do so call `setDelegationParameters()`. The following example sets the queryFeeCut to distribute 95% of query rebates to the Indexer and 5% to Delegators, set the indexingRewardCutto distribute 60% of indexing rewards to the Indexer and 40% to Delegators, and set `thecooldownBlocks` period to 500 blocks. @@ -810,8 +810,8 @@ To set the delegation parameters using Graph Explorer interface, follow these st After being created by an Indexer a healthy allocation goes through two states. -- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. +- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a Subgraph deployment, which allows them to claim indexing rewards and serve queries for that Subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. - **Closed** - An Indexer is free to close an allocation once 1 epoch has passed ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L335)) or their Indexer agent will automatically close the allocation after the **maxAllocationEpochs** (currently 28 days). When an allocation is closed with a valid proof of indexing (POI) their indexing rewards are distributed to the Indexer and its Delegators ([learn more](/indexing/overview/#how-are-indexing-rewards-distributed)). -Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. +Indexers are recommended to utilize offchain syncing functionality to sync Subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for Subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. From c64a6233eba1782761f65f98d6dd79aee35c121d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:09:12 -0500 Subject: [PATCH 0068/1789] New translations overview.mdx (Chinese Simplified) --- website/src/pages/zh/indexing/overview.mdx | 100 ++++++++++----------- 1 file changed, 50 insertions(+), 50 deletions(-) diff --git a/website/src/pages/zh/indexing/overview.mdx b/website/src/pages/zh/indexing/overview.mdx index 33c864e1dc69..0da68c1beb84 100644 --- a/website/src/pages/zh/indexing/overview.mdx +++ b/website/src/pages/zh/indexing/overview.mdx @@ -1,13 +1,13 @@ --- title: 索引概述 -sidebarTitle: 概述 +sidebarTitle: Overview --- 索引人是The Graph 网络中的节点运营商,他们质押 Graph代币(GRT) 以提供索引和查询处理服务。 索引人通过他们的服务赚取查询费和索引奖励。 他们还根据 Cobbs-Douglas 回扣函数从回扣池中赚取收益,该回扣池与所有网络贡献者按他们的工作成比例共享。 抵押在协议中的 GRT 会受到解冻期的影响,如果索引人是恶意的并向应用程序提供不正确的数据或索引不正确,则可能会被削减。 索引人也可以从委托人那里获得委托,为网络做出贡献。 -索引人根据子图的策展信号选择要索引的子图,其中策展人质押 GRT 以指示哪些子图是高质量的并应优先考虑。 消费者(例如应用程序)还可以设置索引人处理其子图查询的参数,并设置查询费用定价的偏好。 +Indexers select Subgraphs to index based on the Subgraph’s curation signal, where Curators stake GRT in order to indicate which Subgraphs are high-quality and should be prioritized. Consumers (eg. applications) can also set parameters for which Indexers process queries for their Subgraphs and set preferences for query fee pricing. ## FAQ @@ -19,17 +19,17 @@ The minimum stake for an Indexer is currently set to 100K GRT. **Query fee rebates** - Payments for serving queries on the network. These payments are mediated via state channels between an Indexer and a gateway. Each query request from a gateway contains a payment and the corresponding response a proof of query result validity. -**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing subgraph deployments for the network. +**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing Subgraph deployments for the network. ### How are indexing rewards distributed? -Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** +Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across Subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that Subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** Numerous tools have been created by the community for calculating rewards; you'll find a collection of them organized in the [Community Guides collection](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). You can also find an up to date list of tools in the #Delegators and #Indexers channels on the [Discord server](https://discord.gg/graphprotocol). Here we link a [recommended allocation optimiser](https://github.com/graphprotocol/allocation-optimizer) integrated with the indexer software stack. ### What is a proof of indexing (POI)? -POIs are used in the network to verify that an Indexer is indexing the subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific subgraph deployment up to and including that block. +POIs are used in the network to verify that an Indexer is indexing the Subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific Subgraph deployment up to and including that block. ### When are indexing rewards distributed? @@ -41,7 +41,7 @@ The RewardsManager contract has a read-only [getRewards](https://github.com/grap Many of the community-made dashboards include pending rewards values and they can be easily checked manually by following these steps: -1. Query the [mainnet subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: +1. Query the [mainnet Subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: ```graphql query indexerAllocations { @@ -91,31 +91,31 @@ The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that - **indexingRewardCut** - the % of indexing rewards that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards when an allocation is closed and the Delegators will split the other 5%. -### How do Indexers know which subgraphs to index? +### How do Indexers know which Subgraphs to index? -Indexers may differentiate themselves by applying advanced techniques for making subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate subgraphs in the network: +Indexers may differentiate themselves by applying advanced techniques for making Subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate Subgraphs in the network: -- **Curation signal** - The proportion of network curation signal applied to a particular subgraph is a good indicator of the interest in that subgraph, especially during the bootstrap phase when query voluming is ramping up. +- **Curation signal** - The proportion of network curation signal applied to a particular Subgraph is a good indicator of the interest in that Subgraph, especially during the bootstrap phase when query voluming is ramping up. -- **Query fees collected** - The historical data for volume of query fees collected for a specific subgraph is a good indicator of future demand. +- **Query fees collected** - The historical data for volume of query fees collected for a specific Subgraph is a good indicator of future demand. -- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific subgraphs can allow an Indexer to monitor the supply side for subgraph queries to identify subgraphs that the network is showing confidence in or subgraphs that may show a need for more supply. +- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific Subgraphs can allow an Indexer to monitor the supply side for Subgraph queries to identify Subgraphs that the network is showing confidence in or Subgraphs that may show a need for more supply. -- **Subgraphs with no indexing rewards** - Some subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a subgraph if it is not generating indexing rewards. +- **Subgraphs with no indexing rewards** - Some Subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a Subgraph if it is not generating indexing rewards. ### What are the hardware requirements? -- **Small** - Enough to get started indexing several subgraphs, will likely need to be expanded. +- **Small** - Enough to get started indexing several Subgraphs, will likely need to be expanded. - **Standard** - Default setup, this is what is used in the example k8s/terraform deployment manifests. -- **Medium** - Production Indexer supporting 100 subgraphs and 200-500 requests per second. -- **Large** - Prepared to index all currently used subgraphs and serve requests for the related traffic. +- **Medium** - Production Indexer supporting 100 Subgraphs and 200-500 requests per second. +- **Large** - Prepared to index all currently used Subgraphs and serve requests for the related traffic. -| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | -| --- | :-: | :-: | :-: | :-: | :-: | -| Small | 4 | 8 | 1 | 4 | 16 | -| Standard | 8 | 30 | 1 | 12 | 48 | -| Medium | 16 | 64 | 2 | 32 | 64 | -| Large | 72 | 468 | 3.5 | 48 | 184 | +| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | +| -------- | :------------------: | :---------------------------: | :-------------------------: | :-------------: | :----------------------: | +| Small | 4 | 8 | 1 | 4 | 16 | +| Standard | 8 | 30 | 1 | 12 | 48 | +| Medium | 16 | 64 | 2 | 32 | 64 | +| Large | 72 | 468 | 3.5 | 48 | 184 | ### What are some basic security precautions an Indexer should take? @@ -125,17 +125,17 @@ Indexers may differentiate themselves by applying advanced techniques for making ## Infrastructure -At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. +At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a Subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. -- **PostgreSQL database** - The main store for the Graph Node, this is where subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. +- **PostgreSQL database** - The main store for the Graph Node, this is where Subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. -- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. +- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain Subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. -- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. +- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during Subgraph deployment to fetch the Subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. - **Indexer service** - Handles all required external communications with the network. Shares cost models and indexing statuses, passes query requests from gateways on to a Graph Node, and manages the query payments via state channels with the gateway. -- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing subgraph deployments to its Graph Node/s, and managing allocations. +- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing Subgraph deployments to its Graph Node/s, and managing allocations. - **Prometheus metrics server** - The Graph Node and Indexer components log their metrics to the metrics server. @@ -147,20 +147,20 @@ Note: To support agile scaling, it is recommended that query and indexing concer #### Graph 节点 -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | -| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | -| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | -| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for Subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for Subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Service -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 7600 | GraphQL HTTP server
(for paid subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | -| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------------------------------------- | ----------------------------------------------------------- | --------------- | ---------------------- | +| 7600 | GraphQL HTTP server
(for paid Subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | +| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Agent @@ -295,7 +295,7 @@ Deploy all resources with `kubectl apply -k $dir`. ### Graph 节点 -[Graph Node](https://github.com/graphprotocol/graph-node) is an open source Rust implementation that event sources the Ethereum blockchain to deterministically update a data store that can be queried via the GraphQL endpoint. Developers use subgraphs to define their schema, and a set of mappings for transforming the data sourced from the blockchain and the Graph Node handles syncing the entire chain, monitoring for new blocks, and serving it via a GraphQL endpoint. +[Graph Node](https://github.com/graphprotocol/graph-node) is an open source Rust implementation that event sources the Ethereum blockchain to deterministically update a data store that can be queried via the GraphQL endpoint. Developers use Subgraphs to define their schema, and a set of mappings for transforming the data sourced from the blockchain and the Graph Node handles syncing the entire chain, monitoring for new blocks, and serving it via a GraphQL endpoint. #### Getting started from source @@ -365,9 +365,9 @@ docker-compose up To successfully participate in the network requires almost constant monitoring and interaction, so we've built a suite of Typescript applications for facilitating an Indexers network participation. There are three Indexer components: -- **Indexer agent** - The agent monitors the network and the Indexer's own infrastructure and manages which subgraph deployments are indexed and allocated towards onchain and how much is allocated towards each. +- **Indexer agent** - The agent monitors the network and the Indexer's own infrastructure and manages which Subgraph deployments are indexed and allocated towards onchain and how much is allocated towards each. -- **Indexer service** - The only component that needs to be exposed externally, the service passes on subgraph queries to the graph node, manages state channels for query payments, shares important decision making information to clients like the gateways. +- **Indexer service** - The only component that needs to be exposed externally, the service passes on Subgraph queries to the graph node, manages state channels for query payments, shares important decision making information to clients like the gateways. - **Indexer CLI** - The command line interface for managing the Indexer agent. It allows Indexers to manage cost models, manual allocations, actions queue, and indexing rules. @@ -525,7 +525,7 @@ graph indexer status #### Indexer management using Indexer CLI -The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. +The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking Subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. #### Usage @@ -537,7 +537,7 @@ The **Indexer CLI** connects to the Indexer agent, typically through port-forwar - `graph indexer rules set [options] ...` - Set one or more indexing rules. -- `graph indexer rules start [options] ` - Start indexing a subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available subgraphs on the network will be indexed. +- `graph indexer rules start [options] ` - Start indexing a Subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available Subgraphs on the network will be indexed. - `graph indexer rules stop [options] ` - Stop indexing a deployment and set its `decisionBasis` to never, so it will skip this deployment when deciding on deployments to index. @@ -561,9 +561,9 @@ All commands which display rules in the output can choose between the supported #### Indexing rules -Indexing rules can either be applied as global defaults or for specific subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. +Indexing rules can either be applied as global defaults or for specific Subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the Subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. -For example, if the global rule has a `minStake` of **5** (GRT), any subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. +For example, if the global rule has a `minStake` of **5** (GRT), any Subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. Data model: @@ -679,7 +679,7 @@ graph indexer actions execute approve Note that supported action types for allocation management have different input requirements: -- `Allocate` - allocate stake to a specific subgraph deployment +- `Allocate` - allocate stake to a specific Subgraph deployment - required action params: - deploymentID @@ -694,7 +694,7 @@ Note that supported action types for allocation management have different input - poi - force (forces using the provided POI even if it doesn’t match what the graph-node provides) -- `Reallocate` - atomically close allocation and open a fresh allocation for the same subgraph deployment +- `Reallocate` - atomically close allocation and open a fresh allocation for the same Subgraph deployment - required action params: - allocationID @@ -706,7 +706,7 @@ Note that supported action types for allocation management have different input #### Cost models -Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. +Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each Subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. #### Agora @@ -782,7 +782,7 @@ Once an Indexer has staked GRT in the protocol, the [Indexer components](/indexi 6. Call `stake()` to stake GRT in the protocol. -7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. +7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on Subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. 8. (Optional) In order to control the distribution of rewards and strategically attract Delegators Indexers can update their delegation parameters by updating their indexingRewardCut (parts per million), queryFeeCut (parts per million), and cooldownBlocks (number of blocks). To do so call `setDelegationParameters()`. The following example sets the queryFeeCut to distribute 95% of query rebates to the Indexer and 5% to Delegators, set the indexingRewardCutto distribute 60% of indexing rewards to the Indexer and 40% to Delegators, and set `thecooldownBlocks` period to 500 blocks. @@ -810,8 +810,8 @@ To set the delegation parameters using Graph Explorer interface, follow these st After being created by an Indexer a healthy allocation goes through two states. -- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. +- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a Subgraph deployment, which allows them to claim indexing rewards and serve queries for that Subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. - **Closed** - An Indexer is free to close an allocation once 1 epoch has passed ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L335)) or their Indexer agent will automatically close the allocation after the **maxAllocationEpochs** (currently 28 days). When an allocation is closed with a valid proof of indexing (POI) their indexing rewards are distributed to the Indexer and its Delegators ([learn more](/indexing/overview/#how-are-indexing-rewards-distributed)). -Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. +Indexers are recommended to utilize offchain syncing functionality to sync Subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for Subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. From aa5cd540085fe2498c8f21841fed26c9554d4768 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:09:13 -0500 Subject: [PATCH 0069/1789] New translations overview.mdx (Urdu (Pakistan)) --- website/src/pages/ur/indexing/overview.mdx | 98 +++++++++++----------- 1 file changed, 49 insertions(+), 49 deletions(-) diff --git a/website/src/pages/ur/indexing/overview.mdx b/website/src/pages/ur/indexing/overview.mdx index 40f78c96c399..4de00b96be5f 100644 --- a/website/src/pages/ur/indexing/overview.mdx +++ b/website/src/pages/ur/indexing/overview.mdx @@ -7,7 +7,7 @@ sidebarTitle: جائزہ پروٹوکول میں داؤ پر لگائی گئی GRT پگھلنے کی مدت سے مشروط ہے اور اگر انڈیکسرز بدنیتی پر مبنی ہوں اور ایپلیکیشنز کو غلط ڈیٹا پیش کرتے ہیں یا اگر وہ غلط طریقے سے انڈیکس کرتے ہیں تو اسے کم کیا جا سکتا ہے. انڈیکسرز نیٹ ورک میں حصہ ڈالنے کے لیے ڈیلیگیٹرز کی جانب سے دیے گئے سٹیک کے لیے بھی انعامات حاصل کرتے ہیں. -انڈیکسرز سب گراف کے کیوریشن سگنل کی بنیاد پر انڈیکس کرنے کے لیے سب گرافس کا انتخاب کرتے ہیں, جہاں کیوریٹرز GRT کو سٹیک کرتے ہیں تاکہ یہ ظاہر کیا جا سکے کہ کون سے سب گرافس اعلیٰ معیار کے ہیں اور انہیں ترجیح دی جانی چاہیے. صارفین (مثلاً ایپلی کیشنز) ایسے عوامل کا تعین کر سکتے ہیں جن کے لیے انڈیکسرز اپنے سب گرافس کے لیے کیوریز پر کارروائی کرتے ہیں اور کیوری کی فیس کی قیمتوں کے لیے ترجیحات طے کرتے ہیں. +Indexers select Subgraphs to index based on the Subgraph’s curation signal, where Curators stake GRT in order to indicate which Subgraphs are high-quality and should be prioritized. Consumers (eg. applications) can also set parameters for which Indexers process queries for their Subgraphs and set preferences for query fee pricing. ## FAQ @@ -19,17 +19,17 @@ The minimum stake for an Indexer is currently set to 100K GRT. **Query fee rebates** - Payments for serving queries on the network. These payments are mediated via state channels between an Indexer and a gateway. Each query request from a gateway contains a payment and the corresponding response a proof of query result validity. -**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing subgraph deployments for the network. +**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing Subgraph deployments for the network. ### How are indexing rewards distributed? -Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** +Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across Subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that Subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** Numerous tools have been created by the community for calculating rewards; you'll find a collection of them organized in the [Community Guides collection](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). You can also find an up to date list of tools in the #Delegators and #Indexers channels on the [Discord server](https://discord.gg/graphprotocol). Here we link a [recommended allocation optimiser](https://github.com/graphprotocol/allocation-optimizer) integrated with the indexer software stack. ### What is a proof of indexing (POI)? -POIs are used in the network to verify that an Indexer is indexing the subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific subgraph deployment up to and including that block. +POIs are used in the network to verify that an Indexer is indexing the Subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific Subgraph deployment up to and including that block. ### When are indexing rewards distributed? @@ -41,7 +41,7 @@ The RewardsManager contract has a read-only [getRewards](https://github.com/grap Many of the community-made dashboards include pending rewards values and they can be easily checked manually by following these steps: -1. Query the [mainnet subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: +1. Query the [mainnet Subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: ```graphql query indexerAllocations { @@ -91,31 +91,31 @@ The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that - **indexingRewardCut** - the % of indexing rewards that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards when an allocation is closed and the Delegators will split the other 5%. -### How do Indexers know which subgraphs to index? +### How do Indexers know which Subgraphs to index? -Indexers may differentiate themselves by applying advanced techniques for making subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate subgraphs in the network: +Indexers may differentiate themselves by applying advanced techniques for making Subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate Subgraphs in the network: -- **Curation signal** - The proportion of network curation signal applied to a particular subgraph is a good indicator of the interest in that subgraph, especially during the bootstrap phase when query voluming is ramping up. +- **Curation signal** - The proportion of network curation signal applied to a particular Subgraph is a good indicator of the interest in that Subgraph, especially during the bootstrap phase when query voluming is ramping up. -- **Query fees collected** - The historical data for volume of query fees collected for a specific subgraph is a good indicator of future demand. +- **Query fees collected** - The historical data for volume of query fees collected for a specific Subgraph is a good indicator of future demand. -- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific subgraphs can allow an Indexer to monitor the supply side for subgraph queries to identify subgraphs that the network is showing confidence in or subgraphs that may show a need for more supply. +- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific Subgraphs can allow an Indexer to monitor the supply side for Subgraph queries to identify Subgraphs that the network is showing confidence in or Subgraphs that may show a need for more supply. -- **Subgraphs with no indexing rewards** - Some subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a subgraph if it is not generating indexing rewards. +- **Subgraphs with no indexing rewards** - Some Subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a Subgraph if it is not generating indexing rewards. ### What are the hardware requirements? -- **Small** - Enough to get started indexing several subgraphs, will likely need to be expanded. +- **Small** - Enough to get started indexing several Subgraphs, will likely need to be expanded. - **Standard** - Default setup, this is what is used in the example k8s/terraform deployment manifests. -- **Medium** - Production Indexer supporting 100 subgraphs and 200-500 requests per second. -- **Large** - Prepared to index all currently used subgraphs and serve requests for the related traffic. +- **Medium** - Production Indexer supporting 100 Subgraphs and 200-500 requests per second. +- **Large** - Prepared to index all currently used Subgraphs and serve requests for the related traffic. -| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | -| --- | :-: | :-: | :-: | :-: | :-: | -| Small | 4 | 8 | 1 | 4 | 16 | -| Standard | 8 | 30 | 1 | 12 | 48 | -| Medium | 16 | 64 | 2 | 32 | 64 | -| Large | 72 | 468 | 3.5 | 48 | 184 | +| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | +| -------- | :------------------: | :---------------------------: | :-------------------------: | :-------------: | :----------------------: | +| Small | 4 | 8 | 1 | 4 | 16 | +| Standard | 8 | 30 | 1 | 12 | 48 | +| Medium | 16 | 64 | 2 | 32 | 64 | +| Large | 72 | 468 | 3.5 | 48 | 184 | ### What are some basic security precautions an Indexer should take? @@ -125,17 +125,17 @@ Indexers may differentiate themselves by applying advanced techniques for making ## Infrastructure -At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. +At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a Subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. -- **PostgreSQL database** - The main store for the Graph Node, this is where subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. +- **PostgreSQL database** - The main store for the Graph Node, this is where Subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. -- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. +- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain Subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. -- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. +- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during Subgraph deployment to fetch the Subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. - **Indexer service** - Handles all required external communications with the network. Shares cost models and indexing statuses, passes query requests from gateways on to a Graph Node, and manages the query payments via state channels with the gateway. -- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing subgraph deployments to its Graph Node/s, and managing allocations. +- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing Subgraph deployments to its Graph Node/s, and managing allocations. - **Prometheus metrics server** - The Graph Node and Indexer components log their metrics to the metrics server. @@ -147,20 +147,20 @@ Note: To support agile scaling, it is recommended that query and indexing concer #### گراف نوڈ -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | -| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | -| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | -| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for Subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for Subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Service -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 7600 | GraphQL HTTP server
(for paid subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | -| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------------------------------------- | ----------------------------------------------------------- | --------------- | ---------------------- | +| 7600 | GraphQL HTTP server
(for paid Subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | +| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Agent @@ -295,7 +295,7 @@ Deploy all resources with `kubectl apply -k $dir`. ### گراف نوڈ -[Graph Node](https://github.com/graphprotocol/graph-node) is an open source Rust implementation that event sources the Ethereum blockchain to deterministically update a data store that can be queried via the GraphQL endpoint. Developers use subgraphs to define their schema, and a set of mappings for transforming the data sourced from the blockchain and the Graph Node handles syncing the entire chain, monitoring for new blocks, and serving it via a GraphQL endpoint. +[Graph Node](https://github.com/graphprotocol/graph-node) is an open source Rust implementation that event sources the Ethereum blockchain to deterministically update a data store that can be queried via the GraphQL endpoint. Developers use Subgraphs to define their schema, and a set of mappings for transforming the data sourced from the blockchain and the Graph Node handles syncing the entire chain, monitoring for new blocks, and serving it via a GraphQL endpoint. #### Getting started from source @@ -365,9 +365,9 @@ docker-compose up To successfully participate in the network requires almost constant monitoring and interaction, so we've built a suite of Typescript applications for facilitating an Indexers network participation. There are three Indexer components: -- **Indexer agent** - The agent monitors the network and the Indexer's own infrastructure and manages which subgraph deployments are indexed and allocated towards onchain and how much is allocated towards each. +- **Indexer agent** - The agent monitors the network and the Indexer's own infrastructure and manages which Subgraph deployments are indexed and allocated towards onchain and how much is allocated towards each. -- **Indexer service** - The only component that needs to be exposed externally, the service passes on subgraph queries to the graph node, manages state channels for query payments, shares important decision making information to clients like the gateways. +- **Indexer service** - The only component that needs to be exposed externally, the service passes on Subgraph queries to the graph node, manages state channels for query payments, shares important decision making information to clients like the gateways. - **Indexer CLI** - The command line interface for managing the Indexer agent. It allows Indexers to manage cost models, manual allocations, actions queue, and indexing rules. @@ -525,7 +525,7 @@ graph indexer status #### Indexer management using Indexer CLI -The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. +The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking Subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. #### Usage @@ -537,7 +537,7 @@ The **Indexer CLI** connects to the Indexer agent, typically through port-forwar - `graph indexer rules set [options] ...` - Set one or more indexing rules. -- `graph indexer rules start [options] ` - Start indexing a subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available subgraphs on the network will be indexed. +- `graph indexer rules start [options] ` - Start indexing a Subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available Subgraphs on the network will be indexed. - `graph indexer rules stop [options] ` - Stop indexing a deployment and set its `decisionBasis` to never, so it will skip this deployment when deciding on deployments to index. @@ -561,9 +561,9 @@ All commands which display rules in the output can choose between the supported #### Indexing rules -Indexing rules can either be applied as global defaults or for specific subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. +Indexing rules can either be applied as global defaults or for specific Subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the Subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. -For example, if the global rule has a `minStake` of **5** (GRT), any subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. +For example, if the global rule has a `minStake` of **5** (GRT), any Subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. Data model: @@ -679,7 +679,7 @@ graph indexer actions execute approve Note that supported action types for allocation management have different input requirements: -- `Allocate` - allocate stake to a specific subgraph deployment +- `Allocate` - allocate stake to a specific Subgraph deployment - required action params: - deploymentID @@ -694,7 +694,7 @@ Note that supported action types for allocation management have different input - poi - force (forces using the provided POI even if it doesn’t match what the graph-node provides) -- `Reallocate` - atomically close allocation and open a fresh allocation for the same subgraph deployment +- `Reallocate` - atomically close allocation and open a fresh allocation for the same Subgraph deployment - required action params: - allocationID @@ -706,7 +706,7 @@ Note that supported action types for allocation management have different input #### Cost models -Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. +Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each Subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. #### Agora @@ -782,7 +782,7 @@ Once an Indexer has staked GRT in the protocol, the [Indexer components](/indexi 6. Call `stake()` to stake GRT in the protocol. -7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. +7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on Subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. 8. (Optional) In order to control the distribution of rewards and strategically attract Delegators Indexers can update their delegation parameters by updating their indexingRewardCut (parts per million), queryFeeCut (parts per million), and cooldownBlocks (number of blocks). To do so call `setDelegationParameters()`. The following example sets the queryFeeCut to distribute 95% of query rebates to the Indexer and 5% to Delegators, set the indexingRewardCutto distribute 60% of indexing rewards to the Indexer and 40% to Delegators, and set `thecooldownBlocks` period to 500 blocks. @@ -810,8 +810,8 @@ To set the delegation parameters using Graph Explorer interface, follow these st After being created by an Indexer a healthy allocation goes through two states. -- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. +- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a Subgraph deployment, which allows them to claim indexing rewards and serve queries for that Subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. - **Closed** - An Indexer is free to close an allocation once 1 epoch has passed ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L335)) or their Indexer agent will automatically close the allocation after the **maxAllocationEpochs** (currently 28 days). When an allocation is closed with a valid proof of indexing (POI) their indexing rewards are distributed to the Indexer and its Delegators ([learn more](/indexing/overview/#how-are-indexing-rewards-distributed)). -Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. +Indexers are recommended to utilize offchain syncing functionality to sync Subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for Subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. From 7c95acd26d24a300ca8e9ee65dd8d5515bf8f10f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:09:14 -0500 Subject: [PATCH 0070/1789] New translations overview.mdx (Vietnamese) --- website/src/pages/vi/indexing/overview.mdx | 98 +++++++++++----------- 1 file changed, 49 insertions(+), 49 deletions(-) diff --git a/website/src/pages/vi/indexing/overview.mdx b/website/src/pages/vi/indexing/overview.mdx index e09a783ede2a..7ff05d355d65 100644 --- a/website/src/pages/vi/indexing/overview.mdx +++ b/website/src/pages/vi/indexing/overview.mdx @@ -7,7 +7,7 @@ Indexers are node operators in The Graph Network that stake Graph Tokens (GRT) i GRT that is staked in the protocol is subject to a thawing period and can be slashed if Indexers are malicious and serve incorrect data to applications or if they index incorrectly. Indexers also earn rewards for delegated stake from Delegators, to contribute to the network. -Indexer chọn các subgraph để index dựa trên tín hiệu curation của subgraph, trong đó Curator stake GRT để chỉ ra subgraph nào có chất lượng cao và cần được ưu tiên. Bên tiêu dùng (ví dụ: ứng dụng) cũng có thể đặt các tham số (parameter) mà Indexer xử lý các truy vấn cho các subgraph của họ và đặt các tùy chọn cho việc định giá phí truy vấn. +Indexers select Subgraphs to index based on the Subgraph’s curation signal, where Curators stake GRT in order to indicate which Subgraphs are high-quality and should be prioritized. Consumers (eg. applications) can also set parameters for which Indexers process queries for their Subgraphs and set preferences for query fee pricing. ## FAQ @@ -19,17 +19,17 @@ The minimum stake for an Indexer is currently set to 100K GRT. **Query fee rebates** - Payments for serving queries on the network. These payments are mediated via state channels between an Indexer and a gateway. Each query request from a gateway contains a payment and the corresponding response a proof of query result validity. -**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing subgraph deployments for the network. +**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing Subgraph deployments for the network. ### How are indexing rewards distributed? -Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** +Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across Subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that Subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** Numerous tools have been created by the community for calculating rewards; you'll find a collection of them organized in the [Community Guides collection](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). You can also find an up to date list of tools in the #Delegators and #Indexers channels on the [Discord server](https://discord.gg/graphprotocol). Here we link a [recommended allocation optimiser](https://github.com/graphprotocol/allocation-optimizer) integrated with the indexer software stack. ### What is a proof of indexing (POI)? -POIs are used in the network to verify that an Indexer is indexing the subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific subgraph deployment up to and including that block. +POIs are used in the network to verify that an Indexer is indexing the Subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific Subgraph deployment up to and including that block. ### When are indexing rewards distributed? @@ -41,7 +41,7 @@ The RewardsManager contract has a read-only [getRewards](https://github.com/grap Many of the community-made dashboards include pending rewards values and they can be easily checked manually by following these steps: -1. Query the [mainnet subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: +1. Query the [mainnet Subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: ```graphql query indexerAllocations { @@ -91,31 +91,31 @@ The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that - **indexingRewardCut** - the % of indexing rewards that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards when an allocation is closed and the Delegators will split the other 5%. -### How do Indexers know which subgraphs to index? +### How do Indexers know which Subgraphs to index? -Indexers may differentiate themselves by applying advanced techniques for making subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate subgraphs in the network: +Indexers may differentiate themselves by applying advanced techniques for making Subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate Subgraphs in the network: -- **Curation signal** - The proportion of network curation signal applied to a particular subgraph is a good indicator of the interest in that subgraph, especially during the bootstrap phase when query voluming is ramping up. +- **Curation signal** - The proportion of network curation signal applied to a particular Subgraph is a good indicator of the interest in that Subgraph, especially during the bootstrap phase when query voluming is ramping up. -- **Query fees collected** - The historical data for volume of query fees collected for a specific subgraph is a good indicator of future demand. +- **Query fees collected** - The historical data for volume of query fees collected for a specific Subgraph is a good indicator of future demand. -- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific subgraphs can allow an Indexer to monitor the supply side for subgraph queries to identify subgraphs that the network is showing confidence in or subgraphs that may show a need for more supply. +- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific Subgraphs can allow an Indexer to monitor the supply side for Subgraph queries to identify Subgraphs that the network is showing confidence in or Subgraphs that may show a need for more supply. -- **Subgraphs with no indexing rewards** - Some subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a subgraph if it is not generating indexing rewards. +- **Subgraphs with no indexing rewards** - Some Subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a Subgraph if it is not generating indexing rewards. ### What are the hardware requirements? -- **Small** - Enough to get started indexing several subgraphs, will likely need to be expanded. +- **Small** - Enough to get started indexing several Subgraphs, will likely need to be expanded. - **Standard** - Default setup, this is what is used in the example k8s/terraform deployment manifests. -- **Medium** - Production Indexer supporting 100 subgraphs and 200-500 requests per second. -- **Large** - Prepared to index all currently used subgraphs and serve requests for the related traffic. +- **Medium** - Production Indexer supporting 100 Subgraphs and 200-500 requests per second. +- **Large** - Prepared to index all currently used Subgraphs and serve requests for the related traffic. -| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | -| --- | :-: | :-: | :-: | :-: | :-: | -| Small | 4 | 8 | 1 | 4 | 16 | -| Standard | 8 | 30 | 1 | 12 | 48 | -| Medium | 16 | 64 | 2 | 32 | 64 | -| Large | 72 | 468 | 3.5 | 48 | 184 | +| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | +| -------- | :------------------: | :---------------------------: | :-------------------------: | :-------------: | :----------------------: | +| Small | 4 | 8 | 1 | 4 | 16 | +| Standard | 8 | 30 | 1 | 12 | 48 | +| Medium | 16 | 64 | 2 | 32 | 64 | +| Large | 72 | 468 | 3.5 | 48 | 184 | ### What are some basic security precautions an Indexer should take? @@ -125,17 +125,17 @@ Indexers may differentiate themselves by applying advanced techniques for making ## Infrastructure -At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. +At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a Subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. -- **PostgreSQL database** - The main store for the Graph Node, this is where subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. +- **PostgreSQL database** - The main store for the Graph Node, this is where Subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. -- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. +- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain Subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. -- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. +- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during Subgraph deployment to fetch the Subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. - **Indexer service** - Handles all required external communications with the network. Shares cost models and indexing statuses, passes query requests from gateways on to a Graph Node, and manages the query payments via state channels with the gateway. -- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing subgraph deployments to its Graph Node/s, and managing allocations. +- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing Subgraph deployments to its Graph Node/s, and managing allocations. - **Prometheus metrics server** - The Graph Node and Indexer components log their metrics to the metrics server. @@ -147,20 +147,20 @@ Note: To support agile scaling, it is recommended that query and indexing concer #### Graph Node -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | -| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | -| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | -| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for Subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for Subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Service -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 7600 | GraphQL HTTP server
(for paid subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | -| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------------------------------------- | ----------------------------------------------------------- | --------------- | ---------------------- | +| 7600 | GraphQL HTTP server
(for paid Subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | +| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Agent @@ -295,7 +295,7 @@ Deploy all resources with `kubectl apply -k $dir`. ### Graph Node -[Graph Node](https://github.com/graphprotocol/graph-node) is an open source Rust implementation that event sources the Ethereum blockchain to deterministically update a data store that can be queried via the GraphQL endpoint. Developers use subgraphs to define their schema, and a set of mappings for transforming the data sourced from the blockchain and the Graph Node handles syncing the entire chain, monitoring for new blocks, and serving it via a GraphQL endpoint. +[Graph Node](https://github.com/graphprotocol/graph-node) is an open source Rust implementation that event sources the Ethereum blockchain to deterministically update a data store that can be queried via the GraphQL endpoint. Developers use Subgraphs to define their schema, and a set of mappings for transforming the data sourced from the blockchain and the Graph Node handles syncing the entire chain, monitoring for new blocks, and serving it via a GraphQL endpoint. #### Getting started from source @@ -365,9 +365,9 @@ docker-compose up To successfully participate in the network requires almost constant monitoring and interaction, so we've built a suite of Typescript applications for facilitating an Indexers network participation. There are three Indexer components: -- **Indexer agent** - The agent monitors the network and the Indexer's own infrastructure and manages which subgraph deployments are indexed and allocated towards onchain and how much is allocated towards each. +- **Indexer agent** - The agent monitors the network and the Indexer's own infrastructure and manages which Subgraph deployments are indexed and allocated towards onchain and how much is allocated towards each. -- **Indexer service** - The only component that needs to be exposed externally, the service passes on subgraph queries to the graph node, manages state channels for query payments, shares important decision making information to clients like the gateways. +- **Indexer service** - The only component that needs to be exposed externally, the service passes on Subgraph queries to the graph node, manages state channels for query payments, shares important decision making information to clients like the gateways. - **Indexer CLI** - The command line interface for managing the Indexer agent. It allows Indexers to manage cost models, manual allocations, actions queue, and indexing rules. @@ -525,7 +525,7 @@ graph indexer status #### Indexer management using Indexer CLI -The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. +The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking Subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. #### Usage @@ -537,7 +537,7 @@ The **Indexer CLI** connects to the Indexer agent, typically through port-forwar - `graph indexer rules set [options] ...` - Set one or more indexing rules. -- `graph indexer rules start [options] ` - Start indexing a subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available subgraphs on the network will be indexed. +- `graph indexer rules start [options] ` - Start indexing a Subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available Subgraphs on the network will be indexed. - `graph indexer rules stop [options] ` - Stop indexing a deployment and set its `decisionBasis` to never, so it will skip this deployment when deciding on deployments to index. @@ -561,9 +561,9 @@ All commands which display rules in the output can choose between the supported #### Indexing rules -Indexing rules can either be applied as global defaults or for specific subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. +Indexing rules can either be applied as global defaults or for specific Subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the Subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. -For example, if the global rule has a `minStake` of **5** (GRT), any subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. +For example, if the global rule has a `minStake` of **5** (GRT), any Subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. Data model: @@ -679,7 +679,7 @@ graph indexer actions execute approve Note that supported action types for allocation management have different input requirements: -- `Allocate` - allocate stake to a specific subgraph deployment +- `Allocate` - allocate stake to a specific Subgraph deployment - required action params: - deploymentID @@ -694,7 +694,7 @@ Note that supported action types for allocation management have different input - poi - force (forces using the provided POI even if it doesn’t match what the graph-node provides) -- `Reallocate` - atomically close allocation and open a fresh allocation for the same subgraph deployment +- `Reallocate` - atomically close allocation and open a fresh allocation for the same Subgraph deployment - required action params: - allocationID @@ -706,7 +706,7 @@ Note that supported action types for allocation management have different input #### Cost models -Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. +Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each Subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. #### Agora @@ -782,7 +782,7 @@ Once an Indexer has staked GRT in the protocol, the [Indexer components](/indexi 6. Call `stake()` to stake GRT in the protocol. -7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. +7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on Subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. 8. (Optional) In order to control the distribution of rewards and strategically attract Delegators Indexers can update their delegation parameters by updating their indexingRewardCut (parts per million), queryFeeCut (parts per million), and cooldownBlocks (number of blocks). To do so call `setDelegationParameters()`. The following example sets the queryFeeCut to distribute 95% of query rebates to the Indexer and 5% to Delegators, set the indexingRewardCutto distribute 60% of indexing rewards to the Indexer and 40% to Delegators, and set `thecooldownBlocks` period to 500 blocks. @@ -810,8 +810,8 @@ To set the delegation parameters using Graph Explorer interface, follow these st After being created by an Indexer a healthy allocation goes through two states. -- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. +- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a Subgraph deployment, which allows them to claim indexing rewards and serve queries for that Subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. - **Closed** - An Indexer is free to close an allocation once 1 epoch has passed ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L335)) or their Indexer agent will automatically close the allocation after the **maxAllocationEpochs** (currently 28 days). When an allocation is closed with a valid proof of indexing (POI) their indexing rewards are distributed to the Indexer and its Delegators ([learn more](/indexing/overview/#how-are-indexing-rewards-distributed)). -Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. +Indexers are recommended to utilize offchain syncing functionality to sync Subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for Subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. From b6bf4dab5cb10b6f6682fc9002b81082fd55901d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:09:15 -0500 Subject: [PATCH 0071/1789] New translations overview.mdx (Marathi) --- website/src/pages/mr/indexing/overview.mdx | 98 +++++++++++----------- 1 file changed, 49 insertions(+), 49 deletions(-) diff --git a/website/src/pages/mr/indexing/overview.mdx b/website/src/pages/mr/indexing/overview.mdx index 0113721170dd..caeed8f5e0b2 100644 --- a/website/src/pages/mr/indexing/overview.mdx +++ b/website/src/pages/mr/indexing/overview.mdx @@ -7,7 +7,7 @@ Indexers are node operators in The Graph Network that stake Graph Tokens (GRT) i प्रोटोकॉलमध्ये स्टॅक केलेले GRT वितळण्याच्या कालावधीच्या अधीन आहे आणि जर इंडेक्सर्स दुर्भावनापूर्ण असतील आणि ऍप्लिकेशन्सना चुकीचा डेटा देत असतील किंवा ते चुकीच्या पद्धतीने इंडेक्स करत असतील तर ते कमी केले जाऊ शकतात. इंडेक्सर्स नेटवर्कमध्ये योगदान देण्यासाठी डेलिगेटर्सकडून डेलिगेटेड स्टेकसाठी बक्षिसे देखील मिळवतात. -इंडेक्सर्स सबग्राफच्या क्युरेशन सिग्नलच्या आधारे इंडेक्समध्ये सबग्राफ निवडतात, जिथे क्यूरेटर्स जीआरटी घेतात जेणेकरून कोणते सबग्राफ उच्च-गुणवत्तेचे आहेत आणि त्यांना प्राधान्य दिले पाहिजे. ग्राहक (उदा. ऍप्लिकेशन्स) मापदंड देखील सेट करू शकतात ज्यासाठी इंडेक्सर्स त्यांच्या सबग्राफसाठी क्वेरी प्रक्रिया करतात आणि क्वेरी शुल्क किंमतीसाठी प्राधान्ये सेट करतात. +Indexers select Subgraphs to index based on the Subgraph’s curation signal, where Curators stake GRT in order to indicate which Subgraphs are high-quality and should be prioritized. Consumers (eg. applications) can also set parameters for which Indexers process queries for their Subgraphs and set preferences for query fee pricing. ## FAQ @@ -19,17 +19,17 @@ The minimum stake for an Indexer is currently set to 100K GRT. **Query fee rebates** - Payments for serving queries on the network. These payments are mediated via state channels between an Indexer and a gateway. Each query request from a gateway contains a payment and the corresponding response a proof of query result validity. -**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing subgraph deployments for the network. +**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing Subgraph deployments for the network. ### How are indexing rewards distributed? -Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** +Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across Subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that Subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** Numerous tools have been created by the community for calculating rewards; you'll find a collection of them organized in the [Community Guides collection](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). You can also find an up to date list of tools in the #Delegators and #Indexers channels on the [Discord server](https://discord.gg/graphprotocol). Here we link a [recommended allocation optimiser](https://github.com/graphprotocol/allocation-optimizer) integrated with the indexer software stack. ### What is a proof of indexing (POI)? -POIs are used in the network to verify that an Indexer is indexing the subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific subgraph deployment up to and including that block. +POIs are used in the network to verify that an Indexer is indexing the Subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific Subgraph deployment up to and including that block. ### When are indexing rewards distributed? @@ -41,7 +41,7 @@ The RewardsManager contract has a read-only [getRewards](https://github.com/grap Many of the community-made dashboards include pending rewards values and they can be easily checked manually by following these steps: -1. Query the [mainnet subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: +1. Query the [mainnet Subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: ```graphql query indexerAllocations { @@ -91,31 +91,31 @@ The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that - **indexingRewardCut** - the % of indexing rewards that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards when an allocation is closed and the Delegators will split the other 5%. -### How do Indexers know which subgraphs to index? +### How do Indexers know which Subgraphs to index? -Indexers may differentiate themselves by applying advanced techniques for making subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate subgraphs in the network: +Indexers may differentiate themselves by applying advanced techniques for making Subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate Subgraphs in the network: -- **Curation signal** - The proportion of network curation signal applied to a particular subgraph is a good indicator of the interest in that subgraph, especially during the bootstrap phase when query voluming is ramping up. +- **Curation signal** - The proportion of network curation signal applied to a particular Subgraph is a good indicator of the interest in that Subgraph, especially during the bootstrap phase when query voluming is ramping up. -- **Query fees collected** - The historical data for volume of query fees collected for a specific subgraph is a good indicator of future demand. +- **Query fees collected** - The historical data for volume of query fees collected for a specific Subgraph is a good indicator of future demand. -- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific subgraphs can allow an Indexer to monitor the supply side for subgraph queries to identify subgraphs that the network is showing confidence in or subgraphs that may show a need for more supply. +- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific Subgraphs can allow an Indexer to monitor the supply side for Subgraph queries to identify Subgraphs that the network is showing confidence in or Subgraphs that may show a need for more supply. -- **Subgraphs with no indexing rewards** - Some subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a subgraph if it is not generating indexing rewards. +- **Subgraphs with no indexing rewards** - Some Subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a Subgraph if it is not generating indexing rewards. ### What are the hardware requirements? -- **Small** - Enough to get started indexing several subgraphs, will likely need to be expanded. +- **Small** - Enough to get started indexing several Subgraphs, will likely need to be expanded. - **Standard** - Default setup, this is what is used in the example k8s/terraform deployment manifests. -- **Medium** - Production Indexer supporting 100 subgraphs and 200-500 requests per second. -- **Large** - Prepared to index all currently used subgraphs and serve requests for the related traffic. +- **Medium** - Production Indexer supporting 100 Subgraphs and 200-500 requests per second. +- **Large** - Prepared to index all currently used Subgraphs and serve requests for the related traffic. -| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | -| --- | :-: | :-: | :-: | :-: | :-: | -| Small | 4 | 8 | 1 | 4 | 16 | -| Standard | 8 | 30 | 1 | 12 | 48 | -| Medium | 16 | 64 | 2 | 32 | 64 | -| Large | 72 | 468 | 3.5 | 48 | 184 | +| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | +| -------- | :------------------: | :---------------------------: | :-------------------------: | :-------------: | :----------------------: | +| Small | 4 | 8 | 1 | 4 | 16 | +| Standard | 8 | 30 | 1 | 12 | 48 | +| Medium | 16 | 64 | 2 | 32 | 64 | +| Large | 72 | 468 | 3.5 | 48 | 184 | ### What are some basic security precautions an Indexer should take? @@ -125,17 +125,17 @@ Indexers may differentiate themselves by applying advanced techniques for making ## Infrastructure -At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. +At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a Subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. -- **PostgreSQL database** - The main store for the Graph Node, this is where subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. +- **PostgreSQL database** - The main store for the Graph Node, this is where Subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. -- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. +- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain Subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. -- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. +- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during Subgraph deployment to fetch the Subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. - **Indexer service** - Handles all required external communications with the network. Shares cost models and indexing statuses, passes query requests from gateways on to a Graph Node, and manages the query payments via state channels with the gateway. -- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing subgraph deployments to its Graph Node/s, and managing allocations. +- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing Subgraph deployments to its Graph Node/s, and managing allocations. - **Prometheus metrics server** - The Graph Node and Indexer components log their metrics to the metrics server. @@ -147,20 +147,20 @@ Note: To support agile scaling, it is recommended that query and indexing concer #### आलेख नोड -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | -| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | -| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | -| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for Subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for Subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Service -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 7600 | GraphQL HTTP server
(for paid subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | -| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------------------------------------- | ----------------------------------------------------------- | --------------- | ---------------------- | +| 7600 | GraphQL HTTP server
(for paid Subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | +| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Agent @@ -295,7 +295,7 @@ Deploy all resources with `kubectl apply -k $dir`. ### आलेख नोड -[Graph Node](https://github.com/graphprotocol/graph-node) is an open source Rust implementation that event sources the Ethereum blockchain to deterministically update a data store that can be queried via the GraphQL endpoint. Developers use subgraphs to define their schema, and a set of mappings for transforming the data sourced from the blockchain and the Graph Node handles syncing the entire chain, monitoring for new blocks, and serving it via a GraphQL endpoint. +[Graph Node](https://github.com/graphprotocol/graph-node) is an open source Rust implementation that event sources the Ethereum blockchain to deterministically update a data store that can be queried via the GraphQL endpoint. Developers use Subgraphs to define their schema, and a set of mappings for transforming the data sourced from the blockchain and the Graph Node handles syncing the entire chain, monitoring for new blocks, and serving it via a GraphQL endpoint. #### Getting started from source @@ -365,9 +365,9 @@ docker-compose up To successfully participate in the network requires almost constant monitoring and interaction, so we've built a suite of Typescript applications for facilitating an Indexers network participation. There are three Indexer components: -- **Indexer agent** - The agent monitors the network and the Indexer's own infrastructure and manages which subgraph deployments are indexed and allocated towards onchain and how much is allocated towards each. +- **Indexer agent** - The agent monitors the network and the Indexer's own infrastructure and manages which Subgraph deployments are indexed and allocated towards onchain and how much is allocated towards each. -- **Indexer service** - The only component that needs to be exposed externally, the service passes on subgraph queries to the graph node, manages state channels for query payments, shares important decision making information to clients like the gateways. +- **Indexer service** - The only component that needs to be exposed externally, the service passes on Subgraph queries to the graph node, manages state channels for query payments, shares important decision making information to clients like the gateways. - **Indexer CLI** - The command line interface for managing the Indexer agent. It allows Indexers to manage cost models, manual allocations, actions queue, and indexing rules. @@ -525,7 +525,7 @@ graph indexer status #### Indexer management using Indexer CLI -The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. +The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking Subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. #### Usage @@ -537,7 +537,7 @@ The **Indexer CLI** connects to the Indexer agent, typically through port-forwar - `graph indexer rules set [options] ...` - Set one or more indexing rules. -- `graph indexer rules start [options] ` - Start indexing a subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available subgraphs on the network will be indexed. +- `graph indexer rules start [options] ` - Start indexing a Subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available Subgraphs on the network will be indexed. - `graph indexer rules stop [options] ` - Stop indexing a deployment and set its `decisionBasis` to never, so it will skip this deployment when deciding on deployments to index. @@ -561,9 +561,9 @@ All commands which display rules in the output can choose between the supported #### Indexing rules -Indexing rules can either be applied as global defaults or for specific subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. +Indexing rules can either be applied as global defaults or for specific Subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the Subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. -For example, if the global rule has a `minStake` of **5** (GRT), any subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. +For example, if the global rule has a `minStake` of **5** (GRT), any Subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. Data model: @@ -679,7 +679,7 @@ graph indexer actions execute approve Note that supported action types for allocation management have different input requirements: -- `Allocate` - allocate stake to a specific subgraph deployment +- `Allocate` - allocate stake to a specific Subgraph deployment - required action params: - deploymentID @@ -694,7 +694,7 @@ Note that supported action types for allocation management have different input - poi - force (forces using the provided POI even if it doesn’t match what the graph-node provides) -- `Reallocate` - atomically close allocation and open a fresh allocation for the same subgraph deployment +- `Reallocate` - atomically close allocation and open a fresh allocation for the same Subgraph deployment - required action params: - allocationID @@ -706,7 +706,7 @@ Note that supported action types for allocation management have different input #### Cost models -Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. +Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each Subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. #### Agora @@ -782,7 +782,7 @@ Once an Indexer has staked GRT in the protocol, the [Indexer components](/indexi 6. Call `stake()` to stake GRT in the protocol. -7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. +7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on Subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. 8. (Optional) In order to control the distribution of rewards and strategically attract Delegators Indexers can update their delegation parameters by updating their indexingRewardCut (parts per million), queryFeeCut (parts per million), and cooldownBlocks (number of blocks). To do so call `setDelegationParameters()`. The following example sets the queryFeeCut to distribute 95% of query rebates to the Indexer and 5% to Delegators, set the indexingRewardCutto distribute 60% of indexing rewards to the Indexer and 40% to Delegators, and set `thecooldownBlocks` period to 500 blocks. @@ -810,8 +810,8 @@ To set the delegation parameters using Graph Explorer interface, follow these st After being created by an Indexer a healthy allocation goes through two states. -- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. +- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a Subgraph deployment, which allows them to claim indexing rewards and serve queries for that Subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. - **Closed** - An Indexer is free to close an allocation once 1 epoch has passed ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L335)) or their Indexer agent will automatically close the allocation after the **maxAllocationEpochs** (currently 28 days). When an allocation is closed with a valid proof of indexing (POI) their indexing rewards are distributed to the Indexer and its Delegators ([learn more](/indexing/overview/#how-are-indexing-rewards-distributed)). -Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. +Indexers are recommended to utilize offchain syncing functionality to sync Subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for Subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. From 9910a63025469c362a09662fb4d425ef750ab59e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:09:16 -0500 Subject: [PATCH 0072/1789] New translations overview.mdx (Hindi) --- website/src/pages/hi/indexing/overview.mdx | 98 +++++++++++----------- 1 file changed, 49 insertions(+), 49 deletions(-) diff --git a/website/src/pages/hi/indexing/overview.mdx b/website/src/pages/hi/indexing/overview.mdx index f1109f1f70c9..106ac764d2b4 100644 --- a/website/src/pages/hi/indexing/overview.mdx +++ b/website/src/pages/hi/indexing/overview.mdx @@ -7,7 +7,7 @@ Indexers are node operators in The Graph Network that stake Graph Tokens (GRT) i जीआरटी जो प्रोटोकॉल में दांव पर लगा है, विगलन अवधि के अधीन है और यदि अनुक्रमणिका दुर्भावनापूर्ण हैं और अनुप्रयोगों को गलत डेटा प्रदान करते हैं या यदि वे गलत तरीके से अनुक्रमणित करते हैं तो इसे घटाया जा सकता है। इंडेक्सर्स नेटवर्क में योगदान करने के लिए डेलीगेटर्स से प्रत्यायोजित हिस्सेदारी के लिए पुरस्कार भी अर्जित करते हैं। -इंडेक्सर्स सबग्राफ के क्यूरेशन सिग्नल के आधार पर इंडेक्स के लिए सबग्राफ का चयन करते हैं, जहां क्यूरेटर GRT को यह इंगित करने के लिए दांव पर लगाते हैं कि कौन से सबग्राफ उच्च-गुणवत्ता वाले हैं और उन्हें प्राथमिकता दी जानी चाहिए। उपभोक्ता (उदाहरण के लिए अनुप्रयोग) पैरामीटर भी सेट कर सकते हैं जिसके लिए इंडेक्सर्स अपने सबग्राफ के लिए प्रश्नों को प्रोसेस करते हैं और क्वेरी शुल्क मूल्य निर्धारण के लिए वरीयताएँ निर्धारित करते हैं। +Indexers select Subgraphs to index based on the Subgraph’s curation signal, where Curators stake GRT in order to indicate which Subgraphs are high-quality and should be prioritized. Consumers (eg. applications) can also set parameters for which Indexers process queries for their Subgraphs and set preferences for query fee pricing. ## FAQ @@ -19,17 +19,17 @@ The minimum stake for an Indexer is currently set to 100K GRT. **Query fee rebates** - Payments for serving queries on the network. These payments are mediated via state channels between an Indexer and a gateway. Each query request from a gateway contains a payment and the corresponding response a proof of query result validity. -**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing subgraph deployments for the network. +**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing Subgraph deployments for the network. ### How are indexing rewards distributed? -Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** +Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across Subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that Subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** Numerous tools have been created by the community for calculating rewards; you'll find a collection of them organized in the [Community Guides collection](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). You can also find an up to date list of tools in the #Delegators and #Indexers channels on the [Discord server](https://discord.gg/graphprotocol). Here we link a [recommended allocation optimiser](https://github.com/graphprotocol/allocation-optimizer) integrated with the indexer software stack. ### What is a proof of indexing (POI)? -POIs are used in the network to verify that an Indexer is indexing the subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific subgraph deployment up to and including that block. +POIs are used in the network to verify that an Indexer is indexing the Subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific Subgraph deployment up to and including that block. ### When are indexing rewards distributed? @@ -41,7 +41,7 @@ The RewardsManager contract has a read-only [getRewards](https://github.com/grap Many of the community-made dashboards include pending rewards values and they can be easily checked manually by following these steps: -1. Query the [mainnet subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: +1. Query the [mainnet Subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: ```graphql query indexerAllocations { @@ -91,31 +91,31 @@ The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that - **indexingRewardCut** - the % of indexing rewards that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards when an allocation is closed and the Delegators will split the other 5%. -### How do Indexers know which subgraphs to index? +### How do Indexers know which Subgraphs to index? -Indexers may differentiate themselves by applying advanced techniques for making subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate subgraphs in the network: +Indexers may differentiate themselves by applying advanced techniques for making Subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate Subgraphs in the network: -- **Curation signal** - The proportion of network curation signal applied to a particular subgraph is a good indicator of the interest in that subgraph, especially during the bootstrap phase when query voluming is ramping up. +- **Curation signal** - The proportion of network curation signal applied to a particular Subgraph is a good indicator of the interest in that Subgraph, especially during the bootstrap phase when query voluming is ramping up. -- **Query fees collected** - The historical data for volume of query fees collected for a specific subgraph is a good indicator of future demand. +- **Query fees collected** - The historical data for volume of query fees collected for a specific Subgraph is a good indicator of future demand. -- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific subgraphs can allow an Indexer to monitor the supply side for subgraph queries to identify subgraphs that the network is showing confidence in or subgraphs that may show a need for more supply. +- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific Subgraphs can allow an Indexer to monitor the supply side for Subgraph queries to identify Subgraphs that the network is showing confidence in or Subgraphs that may show a need for more supply. -- **Subgraphs with no indexing rewards** - Some subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a subgraph if it is not generating indexing rewards. +- **Subgraphs with no indexing rewards** - Some Subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a Subgraph if it is not generating indexing rewards. ### What are the hardware requirements? -- **Small** - Enough to get started indexing several subgraphs, will likely need to be expanded. +- **Small** - Enough to get started indexing several Subgraphs, will likely need to be expanded. - **Standard** - Default setup, this is what is used in the example k8s/terraform deployment manifests. -- **Medium** - Production Indexer supporting 100 subgraphs and 200-500 requests per second. -- **Large** - Prepared to index all currently used subgraphs and serve requests for the related traffic. +- **Medium** - Production Indexer supporting 100 Subgraphs and 200-500 requests per second. +- **Large** - Prepared to index all currently used Subgraphs and serve requests for the related traffic. -| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | -| --- | :-: | :-: | :-: | :-: | :-: | -| Small | 4 | 8 | 1 | 4 | 16 | -| Standard | 8 | 30 | 1 | 12 | 48 | -| Medium | 16 | 64 | 2 | 32 | 64 | -| Large | 72 | 468 | 3.5 | 48 | 184 | +| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | +| -------- | :------------------: | :---------------------------: | :-------------------------: | :-------------: | :----------------------: | +| Small | 4 | 8 | 1 | 4 | 16 | +| Standard | 8 | 30 | 1 | 12 | 48 | +| Medium | 16 | 64 | 2 | 32 | 64 | +| Large | 72 | 468 | 3.5 | 48 | 184 | ### What are some basic security precautions an Indexer should take? @@ -125,17 +125,17 @@ Indexers may differentiate themselves by applying advanced techniques for making ## Infrastructure -At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. +At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a Subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. -- **PostgreSQL database** - The main store for the Graph Node, this is where subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. +- **PostgreSQL database** - The main store for the Graph Node, this is where Subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. -- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. +- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain Subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. -- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. +- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during Subgraph deployment to fetch the Subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. - **Indexer service** - Handles all required external communications with the network. Shares cost models and indexing statuses, passes query requests from gateways on to a Graph Node, and manages the query payments via state channels with the gateway. -- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing subgraph deployments to its Graph Node/s, and managing allocations. +- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing Subgraph deployments to its Graph Node/s, and managing allocations. - **Prometheus metrics server** - The Graph Node and Indexer components log their metrics to the metrics server. @@ -147,20 +147,20 @@ Note: To support agile scaling, it is recommended that query and indexing concer #### ग्राफ-नोड -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | -| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | -| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | -| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for Subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for Subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Service -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 7600 | GraphQL HTTP server
(for paid subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | -| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------------------------------------- | ----------------------------------------------------------- | --------------- | ---------------------- | +| 7600 | GraphQL HTTP server
(for paid Subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | +| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Agent @@ -295,7 +295,7 @@ Deploy all resources with `kubectl apply -k $dir`. ### ग्राफ-नोड -[Graph Node](https://github.com/graphprotocol/graph-node) is an open source Rust implementation that event sources the Ethereum blockchain to deterministically update a data store that can be queried via the GraphQL endpoint. Developers use subgraphs to define their schema, and a set of mappings for transforming the data sourced from the blockchain and the Graph Node handles syncing the entire chain, monitoring for new blocks, and serving it via a GraphQL endpoint. +[Graph Node](https://github.com/graphprotocol/graph-node) is an open source Rust implementation that event sources the Ethereum blockchain to deterministically update a data store that can be queried via the GraphQL endpoint. Developers use Subgraphs to define their schema, and a set of mappings for transforming the data sourced from the blockchain and the Graph Node handles syncing the entire chain, monitoring for new blocks, and serving it via a GraphQL endpoint. #### Getting started from source @@ -365,9 +365,9 @@ docker-compose up To successfully participate in the network requires almost constant monitoring and interaction, so we've built a suite of Typescript applications for facilitating an Indexers network participation. There are three Indexer components: -- **Indexer agent** - The agent monitors the network and the Indexer's own infrastructure and manages which subgraph deployments are indexed and allocated towards onchain and how much is allocated towards each. +- **Indexer agent** - The agent monitors the network and the Indexer's own infrastructure and manages which Subgraph deployments are indexed and allocated towards onchain and how much is allocated towards each. -- **Indexer service** - The only component that needs to be exposed externally, the service passes on subgraph queries to the graph node, manages state channels for query payments, shares important decision making information to clients like the gateways. +- **Indexer service** - The only component that needs to be exposed externally, the service passes on Subgraph queries to the graph node, manages state channels for query payments, shares important decision making information to clients like the gateways. - **Indexer CLI** - The command line interface for managing the Indexer agent. It allows Indexers to manage cost models, manual allocations, actions queue, and indexing rules. @@ -525,7 +525,7 @@ graph indexer status #### Indexer management using Indexer CLI -The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. +The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking Subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. #### Usage @@ -537,7 +537,7 @@ The **Indexer CLI** connects to the Indexer agent, typically through port-forwar - `graph indexer rules set [options] ...` - Set one or more indexing rules. -- `graph indexer rules start [options] ` - Start indexing a subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available subgraphs on the network will be indexed. +- `graph indexer rules start [options] ` - Start indexing a Subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available Subgraphs on the network will be indexed. - `graph indexer rules stop [options] ` - Stop indexing a deployment and set its `decisionBasis` to never, so it will skip this deployment when deciding on deployments to index. @@ -561,9 +561,9 @@ All commands which display rules in the output can choose between the supported #### Indexing rules -Indexing rules can either be applied as global defaults or for specific subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. +Indexing rules can either be applied as global defaults or for specific Subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the Subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. -For example, if the global rule has a `minStake` of **5** (GRT), any subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. +For example, if the global rule has a `minStake` of **5** (GRT), any Subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. Data model: @@ -679,7 +679,7 @@ graph indexer actions execute approve Note that supported action types for allocation management have different input requirements: -- `Allocate` - allocate stake to a specific subgraph deployment +- `Allocate` - allocate stake to a specific Subgraph deployment - required action params: - deploymentID @@ -694,7 +694,7 @@ Note that supported action types for allocation management have different input - poi - force (forces using the provided POI even if it doesn’t match what the graph-node provides) -- `Reallocate` - atomically close allocation and open a fresh allocation for the same subgraph deployment +- `Reallocate` - atomically close allocation and open a fresh allocation for the same Subgraph deployment - required action params: - allocationID @@ -706,7 +706,7 @@ Note that supported action types for allocation management have different input #### Cost models -Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. +Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each Subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. #### Agora @@ -782,7 +782,7 @@ Once an Indexer has staked GRT in the protocol, the [Indexer components](/indexi 6. Call `stake()` to stake GRT in the protocol. -7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. +7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on Subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. 8. (Optional) In order to control the distribution of rewards and strategically attract Delegators Indexers can update their delegation parameters by updating their indexingRewardCut (parts per million), queryFeeCut (parts per million), and cooldownBlocks (number of blocks). To do so call `setDelegationParameters()`. The following example sets the queryFeeCut to distribute 95% of query rebates to the Indexer and 5% to Delegators, set the indexingRewardCutto distribute 60% of indexing rewards to the Indexer and 40% to Delegators, and set `thecooldownBlocks` period to 500 blocks. @@ -810,8 +810,8 @@ To set the delegation parameters using Graph Explorer interface, follow these st After being created by an Indexer a healthy allocation goes through two states. -- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. +- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a Subgraph deployment, which allows them to claim indexing rewards and serve queries for that Subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. - **Closed** - An Indexer is free to close an allocation once 1 epoch has passed ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L335)) or their Indexer agent will automatically close the allocation after the **maxAllocationEpochs** (currently 28 days). When an allocation is closed with a valid proof of indexing (POI) their indexing rewards are distributed to the Indexer and its Delegators ([learn more](/indexing/overview/#how-are-indexing-rewards-distributed)). -Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. +Indexers are recommended to utilize offchain syncing functionality to sync Subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for Subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. From aeffb34f41f7b654ef5b08e68947cb78550c373a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:09:17 -0500 Subject: [PATCH 0073/1789] New translations overview.mdx (Swahili) --- website/src/pages/sw/indexing/overview.mdx | 817 +++++++++++++++++++++ 1 file changed, 817 insertions(+) create mode 100644 website/src/pages/sw/indexing/overview.mdx diff --git a/website/src/pages/sw/indexing/overview.mdx b/website/src/pages/sw/indexing/overview.mdx new file mode 100644 index 000000000000..8223b3cd348f --- /dev/null +++ b/website/src/pages/sw/indexing/overview.mdx @@ -0,0 +1,817 @@ +--- +title: Indexing Overview +sidebarTitle: Overview +--- + +Indexers are node operators in The Graph Network that stake Graph Tokens (GRT) in order to provide indexing and query processing services. Indexers earn query fees and indexing rewards for their services. They also earn query fees that are rebated according to an exponential rebate function. + +GRT that is staked in the protocol is subject to a thawing period and can be slashed if Indexers are malicious and serve incorrect data to applications or if they index incorrectly. Indexers also earn rewards for delegated stake from Delegators, to contribute to the network. + +Indexers select Subgraphs to index based on the Subgraph’s curation signal, where Curators stake GRT in order to indicate which Subgraphs are high-quality and should be prioritized. Consumers (eg. applications) can also set parameters for which Indexers process queries for their Subgraphs and set preferences for query fee pricing. + +## FAQ + +### What is the minimum stake required to be an Indexer on the network? + +The minimum stake for an Indexer is currently set to 100K GRT. + +### What are the revenue streams for an Indexer? + +**Query fee rebates** - Payments for serving queries on the network. These payments are mediated via state channels between an Indexer and a gateway. Each query request from a gateway contains a payment and the corresponding response a proof of query result validity. + +**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing Subgraph deployments for the network. + +### How are indexing rewards distributed? + +Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across Subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that Subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** + +Numerous tools have been created by the community for calculating rewards; you'll find a collection of them organized in the [Community Guides collection](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). You can also find an up to date list of tools in the #Delegators and #Indexers channels on the [Discord server](https://discord.gg/graphprotocol). Here we link a [recommended allocation optimiser](https://github.com/graphprotocol/allocation-optimizer) integrated with the indexer software stack. + +### What is a proof of indexing (POI)? + +POIs are used in the network to verify that an Indexer is indexing the Subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific Subgraph deployment up to and including that block. + +### When are indexing rewards distributed? + +Allocations are continuously accruing rewards while they're active and allocated within 28 epochs. Rewards are collected by the Indexers, and distributed whenever their allocations are closed. That happens either manually, whenever the Indexer wants to force close them, or after 28 epochs a Delegator can close the allocation for the Indexer, but this results in no rewards. 28 epochs is the max allocation lifetime (right now, one epoch lasts for ~24h). + +### Can pending indexing rewards be monitored? + +The RewardsManager contract has a read-only [getRewards](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/rewards/RewardsManager.sol#L316) function that can be used to check the pending rewards for a specific allocation. + +Many of the community-made dashboards include pending rewards values and they can be easily checked manually by following these steps: + +1. Query the [mainnet Subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: + +```graphql +query indexerAllocations { + indexer(id: "") { + allocations { + activeForIndexer { + allocations { + id + } + } + } + } +} +``` + +Use Etherscan to call `getRewards()`: + +- Navigate to [Etherscan interface to Rewards contract](https://etherscan.io/address/0x9Ac758AB77733b4150A901ebd659cbF8cB93ED66#readProxyContract) +- To call `getRewards()`: + - Expand the **9. getRewards** dropdown. + - Enter the **allocationID** in the input. + - Click the **Query** button. + +### What are disputes and where can I view them? + +Indexer's queries and allocations can both be disputed on The Graph during the dispute period. The dispute period varies, depending on the type of dispute. Queries/attestations have 7 epochs dispute window, whereas allocations have 56 epochs. After these periods pass, disputes cannot be opened against either of allocations or queries. When a dispute is opened, a deposit of a minimum of 10,000 GRT is required by the Fishermen, which will be locked until the dispute is finalized and a resolution has been given. Fisherman are any network participants that open disputes. + +Disputes have **three** possible outcomes, so does the deposit of the Fishermen. + +- If the dispute is rejected, the GRT deposited by the Fishermen will be burned, and the disputed Indexer will not be slashed. +- If the dispute is settled as a draw, the Fishermen's deposit will be returned, and the disputed Indexer will not be slashed. +- If the dispute is accepted, the GRT deposited by the Fishermen will be returned, the disputed Indexer will be slashed and the Fishermen will earn 50% of the slashed GRT. + +Disputes can be viewed in the UI in an Indexer's profile page under the `Disputes` tab. + +### What are query fee rebates and when are they distributed? + +Query fees are collected by the gateway and distributed to indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). The exponential rebate function is proposed as a way to ensure indexers achieve the best outcome by faithfully serving queries. It works by incentivizing Indexers to allocate a large amount of stake (which can be slashed for erring when serving a query) relative to the amount of query fees they may collect. + +Once an allocation has been closed the rebates are available to be claimed by the Indexer. Upon claiming, the query fee rebates are distributed to the Indexer and their Delegators based on the query fee cut and the exponential rebate function. + +### What is query fee cut and indexing reward cut? + +The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that the Indexer may set along with cooldownBlocks to control the distribution of GRT between the Indexer and their Delegators. See the last steps in [Staking in the Protocol](/indexing/overview/#stake-in-the-protocol) for instructions on setting the delegation parameters. + +- **queryFeeCut** - the % of query fee rebates that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the query fees earned when an allocation is closed with the other 5% going to the Delegators. + +- **indexingRewardCut** - the % of indexing rewards that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards when an allocation is closed and the Delegators will split the other 5%. + +### How do Indexers know which Subgraphs to index? + +Indexers may differentiate themselves by applying advanced techniques for making Subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate Subgraphs in the network: + +- **Curation signal** - The proportion of network curation signal applied to a particular Subgraph is a good indicator of the interest in that Subgraph, especially during the bootstrap phase when query voluming is ramping up. + +- **Query fees collected** - The historical data for volume of query fees collected for a specific Subgraph is a good indicator of future demand. + +- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific Subgraphs can allow an Indexer to monitor the supply side for Subgraph queries to identify Subgraphs that the network is showing confidence in or Subgraphs that may show a need for more supply. + +- **Subgraphs with no indexing rewards** - Some Subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a Subgraph if it is not generating indexing rewards. + +### What are the hardware requirements? + +- **Small** - Enough to get started indexing several Subgraphs, will likely need to be expanded. +- **Standard** - Default setup, this is what is used in the example k8s/terraform deployment manifests. +- **Medium** - Production Indexer supporting 100 Subgraphs and 200-500 requests per second. +- **Large** - Prepared to index all currently used Subgraphs and serve requests for the related traffic. + +| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | +| -------- | :------------------: | :---------------------------: | :-------------------------: | :-------------: | :----------------------: | +| Small | 4 | 8 | 1 | 4 | 16 | +| Standard | 8 | 30 | 1 | 12 | 48 | +| Medium | 16 | 64 | 2 | 32 | 64 | +| Large | 72 | 468 | 3.5 | 48 | 184 | + +### What are some basic security precautions an Indexer should take? + +- **Operator wallet** - Setting up an operator wallet is an important precaution because it allows an Indexer to maintain separation between their keys that control stake and those that are in control of day-to-day operations. See [Stake in Protocol](/indexing/overview/#stake-in-the-protocol) for instructions. + +- **Firewall** - Only the Indexer service needs to be exposed publicly and particular attention should be paid to locking down admin ports and database access: the Graph Node JSON-RPC endpoint (default port: 8030), the Indexer management API endpoint (default port: 18000), and the Postgres database endpoint (default port: 5432) should not be exposed. + +## Infrastructure + +At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a Subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. + +- **PostgreSQL database** - The main store for the Graph Node, this is where Subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. + +- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain Subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. + +- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during Subgraph deployment to fetch the Subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. + +- **Indexer service** - Handles all required external communications with the network. Shares cost models and indexing statuses, passes query requests from gateways on to a Graph Node, and manages the query payments via state channels with the gateway. + +- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing Subgraph deployments to its Graph Node/s, and managing allocations. + +- **Prometheus metrics server** - The Graph Node and Indexer components log their metrics to the metrics server. + +Note: To support agile scaling, it is recommended that query and indexing concerns are separated between different sets of nodes: query nodes and index nodes. + +### Ports overview + +> **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC and the Indexer management endpoints detailed below. + +#### Graph Node + +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for Subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for Subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | + +#### Indexer Service + +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------------------------------------- | ----------------------------------------------------------- | --------------- | ---------------------- | +| 7600 | GraphQL HTTP server
(for paid Subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | +| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | + +#### Indexer Agent + +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------- | ------ | -------------------------- | --------------------------------------- | +| 8000 | Indexer management API | / | \--indexer-management-port | `INDEXER_AGENT_INDEXER_MANAGEMENT_PORT` | + +### Setup server infrastructure using Terraform on Google Cloud + +> Note: Indexers can alternatively use AWS, Microsoft Azure, or Alibaba. + +#### Install prerequisites + +- Google Cloud SDK +- Kubectl command line tool +- Terraform + +#### Create a Google Cloud Project + +- Clone or navigate to the [Indexer repository](https://github.com/graphprotocol/indexer). + +- Navigate to the `./terraform` directory, this is where all commands should be executed. + +```sh +cd terraform +``` + +- Authenticate with Google Cloud and create a new project. + +```sh +gcloud auth login +project= +gcloud projects create --enable-cloud-apis $project +``` + +- Use the Google Cloud Console's billing page to enable billing for the new project. + +- Create a Google Cloud configuration. + +```sh +proj_id=$(gcloud projects list --format='get(project_id)' --filter="name=$project") +gcloud config configurations create $project +gcloud config set project "$proj_id" +gcloud config set compute/region us-central1 +gcloud config set compute/zone us-central1-a +``` + +- Enable required Google Cloud APIs. + +```sh +gcloud services enable compute.googleapis.com +gcloud services enable container.googleapis.com +gcloud services enable servicenetworking.googleapis.com +gcloud services enable sqladmin.googleapis.com +``` + +- Create a service account. + +```sh +svc_name= +gcloud iam service-accounts create $svc_name \ + --description="Service account for Terraform" \ + --display-name="$svc_name" +gcloud iam service-accounts list +# Get the email of the service account from the list +svc=$(gcloud iam service-accounts list --format='get(email)' +--filter="displayName=$svc_name") +gcloud iam service-accounts keys create .gcloud-credentials.json \ + --iam-account="$svc" +gcloud projects add-iam-policy-binding $proj_id \ + --member serviceAccount:$svc \ + --role roles/editor +``` + +- Enable peering between database and Kubernetes cluster that will be created in the next step. + +```sh +gcloud compute addresses create google-managed-services-default \ + --prefix-length=20 \ + --purpose=VPC_PEERING \ + --network default \ + --global \ + --description 'IP Range for peer networks.' +gcloud services vpc-peerings connect \ + --network=default \ + --ranges=google-managed-services-default +``` + +- Create minimal terraform configuration file (update as needed). + +```sh +indexer= +cat > terraform.tfvars < \ + -f Dockerfile.indexer-service \ + -t indexer-service:latest \ +# Indexer agent +docker build \ + --build-arg NPM_TOKEN= \ + -f Dockerfile.indexer-agent \ + -t indexer-agent:latest \ +``` + +- Run the components + +```sh +docker run -p 7600:7600 -it indexer-service:latest ... +docker run -p 18000:8000 -it indexer-agent:latest ... +``` + +**NOTE**: After starting the containers, the Indexer service should be accessible at [http://localhost:7600](http://localhost:7600) and the Indexer agent should be exposing the Indexer management API at [http://localhost:18000/](http://localhost:18000/). + +#### Using K8s and Terraform + +See the [Setup Server Infrastructure Using Terraform on Google Cloud](/indexing/overview/#setup-server-infrastructure-using-terraform-on-google-cloud) section + +#### Usage + +> **NOTE**: All runtime configuration variables may be applied either as parameters to the command on startup or using environment variables of the format `COMPONENT_NAME_VARIABLE_NAME`(ex. `INDEXER_AGENT_ETHEREUM`). + +#### Indexer agent + +```sh +graph-indexer-agent start \ + --ethereum \ + --ethereum-network mainnet \ + --mnemonic \ + --indexer-address \ + --graph-node-query-endpoint http://localhost:8000/ \ + --graph-node-status-endpoint http://localhost:8030/graphql \ + --graph-node-admin-endpoint http://localhost:8020/ \ + --public-indexer-url http://localhost:7600/ \ + --indexer-geo-coordinates \ + --index-node-ids default \ + --indexer-management-port 18000 \ + --metrics-port 7040 \ + --network-subgraph-endpoint http://query-node-0:8000/subgraphs/id/QmUzRg2HHMpbgf6Q4VHKNDbtBEJnyp5JWCh2gUX9AV6jXv \ + --default-allocation-amount 100 \ + --register true \ + --inject-dai true \ + --postgres-host localhost \ + --postgres-port 5432 \ + --postgres-username \ + --postgres-password \ + --postgres-database indexer \ + --allocation-management auto \ + | pino-pretty +``` + +#### Indexer service + +```sh +SERVER_HOST=localhost \ +SERVER_PORT=5432 \ +SERVER_DB_NAME=is_staging \ +SERVER_DB_USER= \ +SERVER_DB_PASSWORD= \ +graph-indexer-service start \ + --ethereum \ + --ethereum-network mainnet \ + --mnemonic \ + --indexer-address \ + --port 7600 \ + --metrics-port 7300 \ + --graph-node-query-endpoint http://localhost:8000/ \ + --graph-node-status-endpoint http://localhost:8030/graphql \ + --postgres-host localhost \ + --postgres-port 5432 \ + --postgres-username \ + --postgres-password \ + --postgres-database is_staging \ + --network-subgraph-endpoint http://query-node-0:8000/subgraphs/id/QmUzRg2HHMpbgf6Q4VHKNDbtBEJnyp5JWCh2gUX9AV6jXv \ + | pino-pretty +``` + +#### Indexer CLI + +The Indexer CLI is a plugin for [`@graphprotocol/graph-cli`](https://www.npmjs.com/package/@graphprotocol/graph-cli) accessible in the terminal at `graph indexer`. + +```sh +graph indexer connect http://localhost:18000 +graph indexer status +``` + +#### Indexer management using Indexer CLI + +The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking Subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. + +#### Usage + +The **Indexer CLI** connects to the Indexer agent, typically through port-forwarding, so the CLI does not need to run on the same server or cluster. To help you get started, and to provide some context, the CLI will briefly be described here. + +- `graph indexer connect ` - Connect to the Indexer management API. Typically the connection to the server is opened via port forwarding, so the CLI can be easily operated remotely. (Example: `kubectl port-forward pod/ 8000:8000`) + +- `graph indexer rules get [options] [ ...]` - Get one or more indexing rules using `all` as the `` to get all rules, or `global` to get the global defaults. An additional argument `--merged` can be used to specify that deployment specific rules are merged with the global rule. This is how they are applied in the Indexer agent. + +- `graph indexer rules set [options] ...` - Set one or more indexing rules. + +- `graph indexer rules start [options] ` - Start indexing a Subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available Subgraphs on the network will be indexed. + +- `graph indexer rules stop [options] ` - Stop indexing a deployment and set its `decisionBasis` to never, so it will skip this deployment when deciding on deployments to index. + +- `graph indexer rules maybe [options] ` — Set the `decisionBasis` for a deployment to `rules`, so that the Indexer agent will use indexing rules to decide whether to index this deployment. + +- `graph indexer actions get [options] ` - Fetch one or more actions using `all` or leave `action-id` empty to get all actions. An additional argument `--status` can be used to print out all actions of a certain status. + +- `graph indexer action queue allocate ` - Queue allocation action + +- `graph indexer action queue reallocate ` - Queue reallocate action + +- `graph indexer action queue unallocate ` - Queue unallocate action + +- `graph indexer actions cancel [ ...]` - Cancel all action in the queue if id is unspecified, otherwise cancel array of id with space as separator + +- `graph indexer actions approve [ ...]` - Approve multiple actions for execution + +- `graph indexer actions execute approve` - Force the worker to execute approved actions immediately + +All commands which display rules in the output can choose between the supported output formats (`table`, `yaml`, and `json`) using the `-output` argument. + +#### Indexing rules + +Indexing rules can either be applied as global defaults or for specific Subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the Subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. + +For example, if the global rule has a `minStake` of **5** (GRT), any Subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. + +Data model: + +```graphql +type IndexingRule { + identifier: string + identifierType: IdentifierType + decisionBasis: IndexingDecisionBasis! + allocationAmount: number | null + allocationLifetime: number | null + autoRenewal: boolean + parallelAllocations: number | null + maxAllocationPercentage: number | null + minSignal: string | null + maxSignal: string | null + minStake: string | null + minAverageQueryFees: string | null + custom: string | null + requireSupported: boolean | null + } + +IdentifierType { + deployment + subgraph + group +} + +IndexingDecisionBasis { + rules + never + always + offchain +} +``` + +Example usage of indexing rule: + +``` +graph indexer rules offchain QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK + +graph indexer rules set QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK decisionBasis always allocationAmount 123321 allocationLifetime 14 autoRenewal false requireSupported false + +graph indexer rules stop QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK + +graph indexer rules delete QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK +``` + +#### Actions queue CLI + +The indexer-cli provides an `actions` module for manually working with the action queue. It uses the **Graphql API** hosted by the indexer management server to interact with the actions queue. + +The action execution worker will only grab items from the queue to execute if they have `ActionStatus = approved`. In the recommended path actions are added to the queue with ActionStatus = queued, so they must then be approved in order to be executed onchain. The general flow will look like: + +- Action added to the queue by the 3rd party optimizer tool or indexer-cli user +- Indexer can use the `indexer-cli` to view all queued actions +- Indexer (or other software) can approve or cancel actions in the queue using the `indexer-cli`. The approve and cancel commands take an array of action ids as input. +- The execution worker regularly polls the queue for approved actions. It will grab the `approved` actions from the queue, attempt to execute them, and update the values in the db depending on the status of execution to `success` or `failed`. +- If an action is successful the worker will ensure that there is an indexing rule present that tells the agent how to manage the allocation moving forward, useful when taking manual actions while the agent is in `auto` or `oversight` mode. +- The indexer can monitor the action queue to see a history of action execution and if needed re-approve and update action items if they failed execution. The action queue provides a history of all actions queued and taken. + +Data model: + +```graphql +Type ActionInput { + status: ActionStatus + type: ActionType + deploymentID: string | null + allocationID: string | null + amount: string | null + poi: string | null + force: boolean | null + source: string + reason: string | null + priority: number | null +} + +ActionStatus { + queued + approved + pending + success + failed + canceled +} + +ActionType { + allocate + unallocate + reallocate + collect +} +``` + +Example usage from source: + +```bash +graph indexer actions get all + +graph indexer actions get --status queued + +graph indexer actions queue allocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 5000 + +graph indexer actions queue reallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae5 55000 + +graph indexer actions queue unallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae + +graph indexer actions cancel + +graph indexer actions approve 1 3 5 + +graph indexer actions execute approve +``` + +Note that supported action types for allocation management have different input requirements: + +- `Allocate` - allocate stake to a specific Subgraph deployment + + - required action params: + - deploymentID + - amount + +- `Unallocate` - close allocation, freeing up the stake to reallocate elsewhere + + - required action params: + - allocationID + - deploymentID + - optional action params: + - poi + - force (forces using the provided POI even if it doesn’t match what the graph-node provides) + +- `Reallocate` - atomically close allocation and open a fresh allocation for the same Subgraph deployment + + - required action params: + - allocationID + - deploymentID + - amount + - optional action params: + - poi + - force (forces using the provided POI even if it doesn’t match what the graph-node provides) + +#### Cost models + +Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each Subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. + +#### Agora + +The Agora language provides a flexible format for declaring cost models for queries. An Agora price model is a sequence of statements that execute in order for each top-level query in a GraphQL query. For each top-level query, the first statement which matches it determines the price for that query. + +A statement is comprised of a predicate, which is used for matching GraphQL queries, and a cost expression which when evaluated outputs a cost in decimal GRT. Values in the named argument position of a query may be captured in the predicate and used in the expression. Globals may also be set and substituted in for placeholders in an expression. + +Example cost model: + +``` +# This statement captures the skip value, +# uses a boolean expression in the predicate to match specific queries that use `skip` +# and a cost expression to calculate the cost based on the `skip` value and the SYSTEM_LOAD global +query { pairs(skip: $skip) { id } } when $skip > 2000 => 0.0001 * $skip * $SYSTEM_LOAD; + +# This default will match any GraphQL expression. +# It uses a Global substituted into the expression to calculate cost +default => 0.1 * $SYSTEM_LOAD; +``` + +Example query costing using the above model: + +| Query | Price | +| ---------------------------------------------------------------------------- | ------- | +| { pairs(skip: 5000) { id } } | 0.5 GRT | +| { tokens { symbol } } | 0.1 GRT | +| { pairs(skip: 5000) { id } tokens { symbol } } | 0.6 GRT | + +#### Applying the cost model + +Cost models are applied via the Indexer CLI, which passes them to the Indexer Management API of the Indexer agent for storing in the database. The Indexer Service will then pick them up and serve the cost models to gateways whenever they ask for them. + +```sh +indexer cost set variables '{ "SYSTEM_LOAD": 1.4 }' +indexer cost set model my_model.agora +``` + +## Interacting with the network + +### Stake in the protocol + +The first steps to participating in the network as an Indexer are to approve the protocol, stake funds, and (optionally) set up an operator address for day-to-day protocol interactions. + +> Note: For the purposes of these instructions Remix will be used for contract interaction, but feel free to use your tool of choice ([OneClickDapp](https://oneclickdapp.com/), [ABItopic](https://abitopic.io/), and [MyCrypto](https://www.mycrypto.com/account) are a few other known tools). + +Once an Indexer has staked GRT in the protocol, the [Indexer components](/indexing/overview/#indexer-components) can be started up and begin their interactions with the network. + +#### Approve tokens + +1. Open the [Remix app](https://remix.ethereum.org/) in a browser + +2. In the `File Explorer` create a file named **GraphToken.abi** with the [token ABI](https://raw.githubusercontent.com/graphprotocol/contracts/mainnet-deploy-build/build/abis/GraphToken.json). + +3. With `GraphToken.abi` selected and open in the editor, switch to the `Deploy and run transactions` section in the Remix interface. + +4. Under environment select `Injected Web3` and under `Account` select your Indexer address. + +5. Set the GraphToken contract address - Paste the GraphToken contract address (`0xc944E90C64B2c07662A292be6244BDf05Cda44a7`) next to `At Address` and click the `At address` button to apply. + +6. Call the `approve(spender, amount)` function to approve the Staking contract. Fill in `spender` with the Staking contract address (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) and `amount` with the tokens to stake (in wei). + +#### Stake tokens + +1. Open the [Remix app](https://remix.ethereum.org/) in a browser + +2. In the `File Explorer` create a file named **Staking.abi** with the staking ABI. + +3. With `Staking.abi` selected and open in the editor, switch to the `Deploy and run transactions` section in the Remix interface. + +4. Under environment select `Injected Web3` and under `Account` select your Indexer address. + +5. Set the Staking contract address - Paste the Staking contract address (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) next to `At Address` and click the `At address` button to apply. + +6. Call `stake()` to stake GRT in the protocol. + +7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on Subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. + +8. (Optional) In order to control the distribution of rewards and strategically attract Delegators Indexers can update their delegation parameters by updating their indexingRewardCut (parts per million), queryFeeCut (parts per million), and cooldownBlocks (number of blocks). To do so call `setDelegationParameters()`. The following example sets the queryFeeCut to distribute 95% of query rebates to the Indexer and 5% to Delegators, set the indexingRewardCutto distribute 60% of indexing rewards to the Indexer and 40% to Delegators, and set `thecooldownBlocks` period to 500 blocks. + +``` +setDelegationParameters(950000, 600000, 500) +``` + +### Setting delegation parameters + +The `setDelegationParameters()` function in the [staking contract](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol) is essential for Indexers, allowing them to set parameters that define their interactions with Delegators, influencing their reward sharing and delegation capacity. + +### How to set delegation parameters + +To set the delegation parameters using Graph Explorer interface, follow these steps: + +1. Navigate to [Graph Explorer](https://thegraph.com/explorer/). +2. Connect your wallet. Choose multisig (such as Gnosis Safe) and then select mainnet. Note: You will need to repeat this process for Arbitrum One. +3. Connect the wallet you have as a signer. +4. Navigate to the 'Settings' section and select 'Delegation Parameters'. These parameters should be configured to achieve an effective cut within the desired range. Upon entering values in the provided input fields, the interface will automatically calculate the effective cut. Adjust these values as necessary to attain the desired effective cut percentage. +5. Submit the transaction to the network. + +> Note: This transaction will need to be confirmed by the multisig wallet signers. + +### The life of an allocation + +After being created by an Indexer a healthy allocation goes through two states. + +- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a Subgraph deployment, which allows them to claim indexing rewards and serve queries for that Subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. + +- **Closed** - An Indexer is free to close an allocation once 1 epoch has passed ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L335)) or their Indexer agent will automatically close the allocation after the **maxAllocationEpochs** (currently 28 days). When an allocation is closed with a valid proof of indexing (POI) their indexing rewards are distributed to the Indexer and its Delegators ([learn more](/indexing/overview/#how-are-indexing-rewards-distributed)). + +Indexers are recommended to utilize offchain syncing functionality to sync Subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for Subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. From a75dc5bd98dd7d7c1027deced09359f33aa83a67 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:09:18 -0500 Subject: [PATCH 0074/1789] New translations tap.mdx (Romanian) --- website/src/pages/ro/indexing/tap.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/ro/indexing/tap.mdx b/website/src/pages/ro/indexing/tap.mdx index 3bab672ab211..e81b7af5421c 100644 --- a/website/src/pages/ro/indexing/tap.mdx +++ b/website/src/pages/ro/indexing/tap.mdx @@ -63,10 +63,10 @@ As long as you run `tap-agent` and `indexer-agent`, everything will be executed In addition to the typical requirements to run an indexer, you’ll need a `tap-escrow-subgraph` endpoint to query TAP updates. You can use The Graph Network to query or host yourself on your `graph-node`. -- [Graph TAP Arbitrum Sepolia subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) -- [Graph TAP Arbitrum One subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) +- [Graph TAP Arbitrum Sepolia Subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) +- [Graph TAP Arbitrum One Subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) -> Note: `indexer-agent` does not currently handle the indexing of this subgraph like it does for the network subgraph deployment. As a result, you have to index it manually. +> Note: `indexer-agent` does not currently handle the indexing of this Subgraph like it does for the network Subgraph deployment. As a result, you have to index it manually. ## Migration Guide @@ -128,18 +128,18 @@ query_url = "" status_url = "" [subgraphs.network] -# Query URL for the Graph Network subgraph. +# Query URL for the Graph Network Subgraph. query_url = "" # Optional, deployment to look for in the local `graph-node`, if locally indexed. -# Locally indexing the subgraph is recommended. +# Locally indexing the Subgraph is recommended. # NOTE: Use `query_url` or `deployment_id` only deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" [subgraphs.escrow] -# Query URL for the Escrow subgraph. +# Query URL for the Escrow Subgraph. query_url = "" # Optional, deployment to look for in the local `graph-node`, if locally indexed. -# Locally indexing the subgraph is recommended. +# Locally indexing the Subgraph is recommended. # NOTE: Use `query_url` or `deployment_id` only deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" From 0b4ebb184e4eb90b1144ef935a69a0acdaabefac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:09:20 -0500 Subject: [PATCH 0075/1789] New translations tap.mdx (French) --- website/src/pages/fr/indexing/tap.mdx | 96 +++++++++++++-------------- 1 file changed, 48 insertions(+), 48 deletions(-) diff --git a/website/src/pages/fr/indexing/tap.mdx b/website/src/pages/fr/indexing/tap.mdx index b378f70212be..acb00afa34ef 100644 --- a/website/src/pages/fr/indexing/tap.mdx +++ b/website/src/pages/fr/indexing/tap.mdx @@ -45,28 +45,28 @@ Tant que vous exécutez `tap-agent` et `indexer-agent`, tout sera exécuté auto ### Contrats -| Contrat | Mainnet Arbitrum (42161) | Arbitrum Sepolia (421614) | -| --- | --- | --- | -| TAP Verifier | `0x33f9E93266ce0E108fc85DdE2f71dab555A0F05a` | `0xfC24cE7a4428A6B89B52645243662A02BA734ECF` | -| AllocationIDTracker | `0x5B2F33d7Ca6Ec88f5586f2528f58c20843D9FE7c` | `0xAaC28a10d707bbc6e02029f1bfDAEB5084b2aD11` | -| Tiers de confiance (Escrow) | `0x8f477709eF277d4A880801D01A140a9CF88bA0d3` | `0x1e4dC4f9F95E102635D8F7ED71c5CdbFa20e2d02` | +| Contrat | Mainnet Arbitrum (42161) | Arbitrum Sepolia (421614) | +| ---------------------------------------- | -------------------------------------------- | -------------------------------------------- | +| TAP Verifier | `0x33f9E93266ce0E108fc85DdE2f71dab555A0F05a` | `0xfC24cE7a4428A6B89B52645243662A02BA734ECF` | +| AllocationIDTracker | `0x5B2F33d7Ca6Ec88f5586f2528f58c20843D9FE7c` | `0xAaC28a10d707bbc6e02029f1bfDAEB5084b2aD11` | +| Tiers de confiance (Escrow) | `0x8f477709eF277d4A880801D01A140a9CF88bA0d3` | `0x1e4dC4f9F95E102635D8F7ED71c5CdbFa20e2d02` | ### Passerelle (Gateway) -| Composant | Mainnet Node et Edge (Arbitrum Mainnet) | Testnet Node et Edge (Arbitrum Mainnet) | -| ----------- | --------------------------------------------- | --------------------------------------------- | -| Sender | `0xDDE4cfFd3D9052A9cb618fC05a1Cd02be1f2F467` | `0xC3dDf37906724732FfD748057FEBe23379b0710D` | -| Signataires | `0xfF4B7A5EfD00Ff2EC3518D4F250A27e4c29A2211` | `0xFb142dE83E261e43a81e9ACEADd1c66A0DB121FE` | -| Aggregateur | `https://tap-aggregator.network.thegraph.com` | `https://tap-aggregator.testnet.thegraph.com` | +| Composant | Mainnet Node et Edge (Arbitrum Mainnet) | Testnet Node et Edge (Arbitrum Mainnet) | +| -------------- | --------------------------------------------- | --------------------------------------------- | +| Sender | `0xDDE4cfFd3D9052A9cb618fC05a1Cd02be1f2F467` | `0xC3dDf37906724732FfD748057FEBe23379b0710D` | +| Signataires | `0xfF4B7A5EfD00Ff2EC3518D4F250A27e4c29A2211` | `0xFb142dE83E261e43a81e9ACEADd1c66A0DB121FE` | +| Aggregateur | `https://tap-aggregator.network.thegraph.com` | `https://tap-aggregator.testnet.thegraph.com` | ### Exigences En plus des conditions typiques pour faire fonctionner un Indexeur, vous aurez besoin d'un Endpoint `tap-escrow-subgraph` pour interroger les mises à jour de TAP. Vous pouvez utiliser The Graph Network pour interroger ou vous héberger vous-même sur votre `graph-node`. -- [Subgraph Graph TAP Arbitrum Sepolia (pour le testnet The Graph )](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) -- [Subgraph Graph TAP Arbitrum One (Pour le mainnet The Graph )](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) +- [Graph TAP Arbitrum Sepolia Subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) +- [Graph TAP Arbitrum One Subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) -> Note : `indexer-agent` ne gère pas actuellement l'indexation de ce subgraph comme il le fait pour le déploiement du subgraph réseau. Par conséquent, vous devez l'indexer manuellement. +> Note: `indexer-agent` does not currently handle the indexing of this Subgraph like it does for the network Subgraph deployment. As a result, you have to index it manually. ## Guide De Migration @@ -99,72 +99,72 @@ La version requise du logiciel peut être trouvée [ici](https://github.com/grap Pour une configuration minimale, utilisez le modèle suivant : ```bash -# Vous devrez modifier *toutes* les valeurs ci-dessous pour qu'elles correspondent à votre configuration. +# You will have to change *all* the values below to match your setup. # -# Certaines des configurations ci-dessous sont des valeurs globales de graph network, que vous pouvez trouver ici : +# Some of the config below are global graph network values, which you can find here: # # -# Astuce de pro : si vous devez charger certaines valeurs de l'environnement dans cette configuration, vous -# pouvez les écraser avec des variables d'environnement. Par exemple, ce qui suit peut être remplacé -# par [PREFIX]_DATABASE_POSTGRESURL, où PREFIX peut être `INDEXER_SERVICE` ou `TAP_AGENT` : +# Pro tip: if you need to load some values from the environment into this config, you +# can overwrite with environment variables. For example, the following can be replaced +# by [PREFIX]_DATABASE_POSTGRESURL, where PREFIX can be `INDEXER_SERVICE` or `TAP_AGENT`: # # [database] # postgres_url = "postgresql://indexer:${POSTGRES_PASSWORD}@postgres:5432/indexer_components_0" [indexer] -indexer_address = "0x111111111111111111111111111111111111111111" +indexer_address = "0x1111111111111111111111111111111111111111" operator_mnemonic = "celery smart tip orange scare van steel radio dragon joy alarm crane" [database] -# L'URL de la base de données Postgres utilisée pour les composants de l'Indexeur. La même base de données -# qui est utilisée par `indexer-agent`. Il est prévu que `indexer-agent` crée -# les tables nécessaires. +# The URL of the Postgres database used for the indexer components. The same database +# that is used by the `indexer-agent`. It is expected that `indexer-agent` will create +# the necessary tables. postgres_url = "postgres://postgres@postgres:5432/postgres" [graph_node] -# URL vers l'endpoint de requête de votre graph-node +# URL to your graph-node's query endpoint query_url = "" -# URL vers l'endpoint d'état de votre graph-node +# URL to your graph-node's status endpoint status_url = "" [subgraphs.network] -# URL de requête pour le subgraph Graph Network. +# Query URL for the Graph Network Subgraph. query_url = "" -# Facultatif, déploiement à rechercher dans le `graph-node` local, s'il est indexé localement. -# L'indexation locale du subgraph est recommandée. -# REMARQUE : utilisez uniquement `query_url` ou `deployment_id` -deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" +# Optional, deployment to look for in the local `graph-node`, if locally indexed. +# Locally indexing the Subgraph is recommended. +# NOTE: Use `query_url` or `deployment_id` only +deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" [subgraphs.escrow] -# URL de requête pour le subgraph Escrow. +# Query URL for the Escrow Subgraph. query_url = "" -# Facultatif, déploiement à rechercher dans le `graph-node` local, s'il est indexé localement. -# Il est recommandé d'indexer localement le subgraph. -# REMARQUE : utilisez uniquement `query_url` ou `deployment_id` -deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" +# Optional, deployment to look for in the local `graph-node`, if locally indexed. +# Locally indexing the Subgraph is recommended. +# NOTE: Use `query_url` or `deployment_id` only +deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" [blockchain] -# Le chain ID du réseau sur lequel The Graph Network s'exécute +# The chain ID of the network that the graph network is running on chain_id = 1337 -# Adresse du contrat du vérificateur de bon de réception agrégé (RAV) de TAP. -receives_verifier_address = "0x222222222222222222222222222222222222222222222" +# Contract address of TAP's receipt aggregate voucher (RAV) verifier. +receipts_verifier_address = "0x2222222222222222222222222222222222222222" -############################################ -# Configurations spécifiques à tap-agent # -########################################## +######################################## +# Specific configurations to tap-agent # +######################################## [tap] -# Il s'agit du montant des frais que vous êtes prêt à risquer à un moment donné. Par exemple, -# si l'expéditeur cesse de fournir des RAV pendant suffisamment longtemps et que les frais dépassent ce -# montant, le service d'indexation cessera d'accepter les requêtes de l'expéditeur -# jusqu'à ce que les frais soient agrégés. -# REMARQUE : utilisez des chaînes de caractère pour les valeurs décimales afin d'éviter les erreurs d'arrondi -# p. ex. : +# This is the amount of fees you are willing to risk at any given time. For ex. +# if the sender stops supplying RAVs for long enough and the fees exceed this +# amount, the indexer-service will stop accepting queries from the sender +# until the fees are aggregated. +# NOTE: Use strings for decimal values to prevent rounding errors +# e.g: # max_amount_willing_to_lose_grt = "0.1" max_amount_willing_to_lose_grt = 20 [tap.sender_aggregator_endpoints] -# Clé-valeur de tous les expéditeurs et de leurs endpoint d'agrégation -# Celle-ci ci-dessous concerne par exemple la passerelle de testnet E&N. +# Key-Value of all senders and their aggregator endpoints +# This one below is for the E&N testnet gateway for example. 0xDDE4cfFd3D9052A9cb618fC05a1Cd02be1f2F467 = "https://tap-aggregator.network.thegraph.com" ``` From aa2d235afe8f485df4b2e375052fc4eed682078a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:09:21 -0500 Subject: [PATCH 0076/1789] New translations tap.mdx (Spanish) --- website/src/pages/es/indexing/tap.mdx | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/website/src/pages/es/indexing/tap.mdx b/website/src/pages/es/indexing/tap.mdx index 36fb0939af81..b185605bcc5a 100644 --- a/website/src/pages/es/indexing/tap.mdx +++ b/website/src/pages/es/indexing/tap.mdx @@ -118,7 +118,6 @@ title: |+ Launchpad Actualmente, hay una versión en desarrollo de indexer-rs y tap-agent, que puedes encontrar aquí. - --- Learn about The Graph’s new payment system, **Timeline Aggregation Protocol, TAP**. This system provides fast, efficient microtransactions with minimized trust. @@ -182,10 +181,10 @@ As long as you run `tap-agent` and `indexer-agent`, everything will be executed In addition to the typical requirements to run an indexer, you’ll need a `tap-escrow-subgraph` endpoint to query TAP updates. You can use The Graph Network to query or host yourself on your `graph-node`. -- [Graph TAP Arbitrum Sepolia subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) -- [Graph TAP Arbitrum One subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) +- [Graph TAP Arbitrum Sepolia Subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) +- [Graph TAP Arbitrum One Subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) -> Note: `indexer-agent` does not currently handle the indexing of this subgraph like it does for the network subgraph deployment. As a result, you have to index it manually. +> Note: `indexer-agent` does not currently handle the indexing of this Subgraph like it does for the network Subgraph deployment. As a result, you have to index it manually. ## Migration Guide @@ -247,18 +246,18 @@ query_url = "" status_url = "" [subgraphs.network] -# Query URL for the Graph Network subgraph. +# Query URL for the Graph Network Subgraph. query_url = "" # Optional, deployment to look for in the local `graph-node`, if locally indexed. -# Locally indexing the subgraph is recommended. +# Locally indexing the Subgraph is recommended. # NOTE: Use `query_url` or `deployment_id` only deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" [subgraphs.escrow] -# Query URL for the Escrow subgraph. +# Query URL for the Escrow Subgraph. query_url = "" # Optional, deployment to look for in the local `graph-node`, if locally indexed. -# Locally indexing the subgraph is recommended. +# Locally indexing the Subgraph is recommended. # NOTE: Use `query_url` or `deployment_id` only deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" From d22d0054aa18612c3a3b6d3fc39fba521dbcb946 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:09:22 -0500 Subject: [PATCH 0077/1789] New translations tap.mdx (Arabic) --- website/src/pages/ar/indexing/tap.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/ar/indexing/tap.mdx b/website/src/pages/ar/indexing/tap.mdx index ee96a02cd5b8..786f8c8a9cfd 100644 --- a/website/src/pages/ar/indexing/tap.mdx +++ b/website/src/pages/ar/indexing/tap.mdx @@ -63,10 +63,10 @@ As long as you run `tap-agent` and `indexer-agent`, everything will be executed In addition to the typical requirements to run an indexer, you’ll need a `tap-escrow-subgraph` endpoint to query TAP updates. You can use The Graph Network to query or host yourself on your `graph-node`. -- [Graph TAP Arbitrum Sepolia subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) -- [Graph TAP Arbitrum One subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) +- [Graph TAP Arbitrum Sepolia Subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) +- [Graph TAP Arbitrum One Subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) -> Note: `indexer-agent` does not currently handle the indexing of this subgraph like it does for the network subgraph deployment. As a result, you have to index it manually. +> Note: `indexer-agent` does not currently handle the indexing of this Subgraph like it does for the network Subgraph deployment. As a result, you have to index it manually. ## Migration Guide @@ -128,18 +128,18 @@ query_url = "" status_url = "" [subgraphs.network] -# Query URL for the Graph Network subgraph. +# Query URL for the Graph Network Subgraph. query_url = "" # Optional, deployment to look for in the local `graph-node`, if locally indexed. -# Locally indexing the subgraph is recommended. +# Locally indexing the Subgraph is recommended. # NOTE: Use `query_url` or `deployment_id` only deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" [subgraphs.escrow] -# Query URL for the Escrow subgraph. +# Query URL for the Escrow Subgraph. query_url = "" # Optional, deployment to look for in the local `graph-node`, if locally indexed. -# Locally indexing the subgraph is recommended. +# Locally indexing the Subgraph is recommended. # NOTE: Use `query_url` or `deployment_id` only deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" From b95d8ce5d1b47940321310db03e5c1e0fd8dfe06 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:09:23 -0500 Subject: [PATCH 0078/1789] New translations tap.mdx (Czech) --- website/src/pages/cs/indexing/tap.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/cs/indexing/tap.mdx b/website/src/pages/cs/indexing/tap.mdx index f8d028634016..1efe337aab2d 100644 --- a/website/src/pages/cs/indexing/tap.mdx +++ b/website/src/pages/cs/indexing/tap.mdx @@ -63,10 +63,10 @@ As long as you run `tap-agent` and `indexer-agent`, everything will be executed In addition to the typical requirements to run an indexer, you’ll need a `tap-escrow-subgraph` endpoint to query TAP updates. You can use The Graph Network to query or host yourself on your `graph-node`. -- [Graph TAP Arbitrum Sepolia subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) -- [Graph TAP Arbitrum One subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) +- [Graph TAP Arbitrum Sepolia Subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) +- [Graph TAP Arbitrum One Subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) -> Note: `indexer-agent` does not currently handle the indexing of this subgraph like it does for the network subgraph deployment. As a result, you have to index it manually. +> Note: `indexer-agent` does not currently handle the indexing of this Subgraph like it does for the network Subgraph deployment. As a result, you have to index it manually. ## Migration Guide @@ -128,18 +128,18 @@ query_url = "" status_url = "" [subgraphs.network] -# Query URL for the Graph Network subgraph. +# Query URL for the Graph Network Subgraph. query_url = "" # Optional, deployment to look for in the local `graph-node`, if locally indexed. -# Locally indexing the subgraph is recommended. +# Locally indexing the Subgraph is recommended. # NOTE: Use `query_url` or `deployment_id` only deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" [subgraphs.escrow] -# Query URL for the Escrow subgraph. +# Query URL for the Escrow Subgraph. query_url = "" # Optional, deployment to look for in the local `graph-node`, if locally indexed. -# Locally indexing the subgraph is recommended. +# Locally indexing the Subgraph is recommended. # NOTE: Use `query_url` or `deployment_id` only deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" From 19e7d832ecfe2b21e693248b459599bfaa2958da Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:09:24 -0500 Subject: [PATCH 0079/1789] New translations tap.mdx (German) --- website/src/pages/de/indexing/tap.mdx | 114 +++++++++++++------------- 1 file changed, 57 insertions(+), 57 deletions(-) diff --git a/website/src/pages/de/indexing/tap.mdx b/website/src/pages/de/indexing/tap.mdx index 13fa3c754e0d..a78df1208080 100644 --- a/website/src/pages/de/indexing/tap.mdx +++ b/website/src/pages/de/indexing/tap.mdx @@ -45,28 +45,28 @@ Solange Sie `tap-agent` und `indexer-agent` ausführen, wird alles automatisch a ### Verträge -| Vertrag | Arbitrum Mainnet (42161) | Arbitrum Sepolia (421614) | -| ------------------- | -------------------------------------------- | -------------------------------------------- | -| TAP-Prüfer | `0x33f9E93266ce0E108fc85DdE2f71dab555A0F05a` | `0xfC24cE7a4428A6B89B52645243662A02BA734ECF` | -| AllocationIDTracker | `0x5B2F33d7Ca6Ec88f5586f2528f58c20843D9FE7c` | `0xAaC28a10d707bbc6e02029f1bfDAEB5084b2aD11` | -| Treuhandkonto | `0x8f477709eF277d4A880801D01A140a9CF88bA0d3` | `0x1e4dC4f9F95E102635D8F7ED71c5CdbFa20e2d02` | +| Vertrag | Arbitrum Mainnet (42161) | Arbitrum Sepolia (421614) | +| -------------------------- | -------------------------------------------- | -------------------------------------------- | +| TAP-Prüfer | `0x33f9E93266ce0E108fc85DdE2f71dab555A0F05a` | `0xfC24cE7a4428A6B89B52645243662A02BA734ECF` | +| AllocationIDTracker | `0x5B2F33d7Ca6Ec88f5586f2528f58c20843D9FE7c` | `0xAaC28a10d707bbc6e02029f1bfDAEB5084b2aD11` | +| Treuhandkonto | `0x8f477709eF277d4A880801D01A140a9CF88bA0d3` | `0x1e4dC4f9F95E102635D8F7ED71c5CdbFa20e2d02` | ### Gateway -| Komponente | Edge- und Node-Mainnet (Arbitrum-Mainnet) | Edge and Node Testnet (Arbitrum Sepolia) | -| ------------- | --------------------------------------------- | --------------------------------------------- | -| Sender | `0xDDE4cfFd3D9052A9cb618fC05a1Cd02be1f2F467` | `0xC3dDf37906724732FfD748057FEBe23379b0710D` | -| Unterzeichner | `0xfF4B7A5EfD00Ff2EC3518D4F250A27e4c29A2211` | `0xFb142dE83E261e43a81e9ACEADd1c66A0DB121FE` | -| Aggregator | `https://tap-aggregator.network.thegraph.com` | `https://tap-aggregator.testnet.thegraph.com` | +| Komponente | Edge- und Node-Mainnet (Arbitrum-Mainnet) | Edge and Node Testnet (Arbitrum Sepolia) | +| ---------------- | ---------------------------------------------- | --------------------------------------------- | +| Sender | `0xDDE4cfFd3D9052A9cb618fC05a1Cd02be1f2F467` | `0xC3dDf37906724732FfD748057FEBe23379b0710D` | +| Unterzeichner | `0xfF4B7A5EfD00Ff2EC3518D4F250A27e4c29A2211` | `0xFb142dE83E261e43a81e9ACEADd1c66A0DB121FE` | +| Aggregator | `https://tap-aggregator.network.thegraph.com` | `https://tap-aggregator.testnet.thegraph.com` | ### Anforderungen Zusätzlich zu den typischen Anforderungen für den Betrieb eines Indexers benötigen Sie einen `tap-escrow-subgraph`-Endpunkt, um TAP-Aktualisierungen abzufragen. Sie können The Graph Network zur Abfrage verwenden oder sich selbst auf Ihrem `graph-node` hosten. -- [Graph TAP Arbitrum Sepolia subgraph (für The Graph Testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) -- [Graph TAP Arbitrum One subgraph (für The Graph Mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) +- [Graph TAP Arbitrum Sepolia Subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) +- [Graph TAP Arbitrum One Subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) -> Hinweis: `indexer-agent` übernimmt derzeit nicht die Indizierung dieses Subgraphen, wie es bei der Bereitstellung von Netzwerk-Subgraphen der Fall ist. Daher müssen Sie ihn manuell indizieren. +> Note: `indexer-agent` does not currently handle the indexing of this Subgraph like it does for the network Subgraph deployment. As a result, you have to index it manually. ## Migrationsleitfaden @@ -99,73 +99,73 @@ Die erforderliche Softwareversion finden Sie [hier](https://github.com/graphprot Für eine minimale Konfiguration verwenden Sie die folgende Vorlage: ```bash -# Sie müssen *alle* nachstehenden Werte ändern, um sie an Ihre Einrichtung anzupassen. +# You will have to change *all* the values below to match your setup. # -# Einige der nachstehenden Konfigurationswerte sind globale Graphnetzwerkwerte, die Sie hier finden können: +# Some of the config below are global graph network values, which you can find here: # # -# Pro-Tipp: Wenn Sie einige Werte aus der Umgebung in diese Konfiguration laden müssen, -# können Sie sie mit Umgebungsvariablen überschreiben. Als Datenbeispiel kann folgendes ersetzt werden -# durch [PREFIX]_DATABASE_POSTGRESURL, wobei PREFIX `INDEXER_SERVICE` oder `TAP_AGENT` sein kann: +# Pro tip: if you need to load some values from the environment into this config, you +# can overwrite with environment variables. For example, the following can be replaced +# by [PREFIX]_DATABASE_POSTGRESURL, where PREFIX can be `INDEXER_SERVICE` or `TAP_AGENT`: # -# [Datenbank] -# postgres_url = „postgresql://indexer:${POSTGRES_PASSWORD}@postgres:5432/indexer_components_0“ +# [database] +# postgres_url = "postgresql://indexer:${POSTGRES_PASSWORD}@postgres:5432/indexer_components_0" [indexer] -indexer_address = „0x1111111111111111111111111111111111111111“ -operator_mnemonic = „celery smart tip orange scare van steel radio dragon joy alarm crane“ +indexer_address = "0x1111111111111111111111111111111111111111" +operator_mnemonic = "celery smart tip orange scare van steel radio dragon joy alarm crane" [database] -# Die URL der Postgres-Datenbank, die für die Indexer-Komponenten verwendet wird. Die gleiche Datenbank, -# die auch vom `indexer-agent` verwendet wird. Es wird erwartet, dass `indexer-agent` -# die notwendigen Tabellen erstellt. -postgres_url = „postgres://postgres@postgres:5432/postgres“ +# The URL of the Postgres database used for the indexer components. The same database +# that is used by the `indexer-agent`. It is expected that `indexer-agent` will create +# the necessary tables. +postgres_url = "postgres://postgres@postgres:5432/postgres" [graph_node] -# URL zum Abfrageendpunkt Ihres Graph-Knotens -query_url = „“ -# URL zum Status-Endpunkt Ihres Graph-Knotens -status_url = „“ +# URL to your graph-node's query endpoint +query_url = "" +# URL to your graph-node's status endpoint +status_url = "" [subgraphs.network] -# Abfrage-URL für den Graph Network Subgraph. -query_url = „“ -# Optional, Einsatz, nach dem im lokalen `graph-node` gesucht wird, falls er lokal indiziert ist. -# Es wird empfohlen, den Subgraphen lokal zu indizieren. -# HINWEIS: Verwenden Sie nur `query_url` oder `deployment_id`. -deployment_id = „Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa“ +# Query URL for the Graph Network Subgraph. +query_url = "" +# Optional, deployment to look for in the local `graph-node`, if locally indexed. +# Locally indexing the Subgraph is recommended. +# NOTE: Use `query_url` or `deployment_id` only +deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" [subgraphs.escrow] -# Abfrage-URL für den Subgraphen „Escrow“. -query_url = „“ -# Optional, Einsatz, nach dem im lokalen `graph-node` gesucht wird, falls er lokal indiziert ist. -# Es wird empfohlen, den Subgraphen lokal zu indizieren. -# HINWEIS: Verwenden Sie nur `query_url` oder `deployment_id`. -deployment_id = „Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa“ +# Query URL for the Escrow Subgraph. +query_url = "" +# Optional, deployment to look for in the local `graph-node`, if locally indexed. +# Locally indexing the Subgraph is recommended. +# NOTE: Use `query_url` or `deployment_id` only +deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" [blockchain] -# Die Ketten-ID des Netzwerks, auf dem das Graph-Netzwerk läuft +# The chain ID of the network that the graph network is running on chain_id = 1337 -# Vertragsadresse des RAV-Prüfers (receipt aggregate voucher) von TAP. -receipts_verifier_address = „0x2222222222222222222222222222222222222222“ +# Contract address of TAP's receipt aggregate voucher (RAV) verifier. +receipts_verifier_address = "0x2222222222222222222222222222222222222222" ######################################## -# Spezifische Konfigurationen für tap-agent # +# Specific configurations to tap-agent # ######################################## [tap] -# Dies ist die Höhe der Gebühren, die Sie bereit sind, zu einem bestimmten Zeitpunkt zu riskieren. Zum Beispiel, -# wenn der Sender lange genug keine RAVs mehr liefert und die Gebühren diesen Betrag -# übersteigt, wird der Indexer-Service keine Anfragen mehr vom Absender annehmen -# bis die Gebühren aggregiert sind. -# HINWEIS: Verwenden Sie Strings für dezimale Werte, um Rundungsfehler zu vermeiden. -# z.B.: -# max_amount_willing_to_lose_grt = „0.1“ -max_Betrag_willig_zu_verlieren_grt = 20 +# This is the amount of fees you are willing to risk at any given time. For ex. +# if the sender stops supplying RAVs for long enough and the fees exceed this +# amount, the indexer-service will stop accepting queries from the sender +# until the fees are aggregated. +# NOTE: Use strings for decimal values to prevent rounding errors +# e.g: +# max_amount_willing_to_lose_grt = "0.1" +max_amount_willing_to_lose_grt = 20 [tap.sender_aggregator_endpoints] -# Key-Value aller Absender und ihrer Aggregator-Endpunkte -# Das folgende Datenbeispiel gilt für das E&N Testnet-Gateway. -0xDDE4cfFd3D9052A9cb618fC05a1Cd02be1f2F467 = „https://tap-aggregator.network.thegraph.com“ +# Key-Value of all senders and their aggregator endpoints +# This one below is for the E&N testnet gateway for example. +0xDDE4cfFd3D9052A9cb618fC05a1Cd02be1f2F467 = "https://tap-aggregator.network.thegraph.com" ``` Anmerkungen: From 95758857bd23728beb770ee7eb2dd049c4af1246 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:09:25 -0500 Subject: [PATCH 0080/1789] New translations tap.mdx (Italian) --- website/src/pages/it/indexing/tap.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/it/indexing/tap.mdx b/website/src/pages/it/indexing/tap.mdx index 8604a92b41e7..e4fa03cf4e88 100644 --- a/website/src/pages/it/indexing/tap.mdx +++ b/website/src/pages/it/indexing/tap.mdx @@ -63,10 +63,10 @@ As long as you run `tap-agent` and `indexer-agent`, everything will be executed In addition to the typical requirements to run an indexer, you’ll need a `tap-escrow-subgraph` endpoint to query TAP updates. You can use The Graph Network to query or host yourself on your `graph-node`. -- [Graph TAP Arbitrum Sepolia subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) -- [Graph TAP Arbitrum One subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) +- [Graph TAP Arbitrum Sepolia Subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) +- [Graph TAP Arbitrum One Subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) -> Note: `indexer-agent` does not currently handle the indexing of this subgraph like it does for the network subgraph deployment. As a result, you have to index it manually. +> Note: `indexer-agent` does not currently handle the indexing of this Subgraph like it does for the network Subgraph deployment. As a result, you have to index it manually. ## Migration Guide @@ -128,18 +128,18 @@ query_url = "" status_url = "" [subgraphs.network] -# Query URL for the Graph Network subgraph. +# Query URL for the Graph Network Subgraph. query_url = "" # Optional, deployment to look for in the local `graph-node`, if locally indexed. -# Locally indexing the subgraph is recommended. +# Locally indexing the Subgraph is recommended. # NOTE: Use `query_url` or `deployment_id` only deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" [subgraphs.escrow] -# Query URL for the Escrow subgraph. +# Query URL for the Escrow Subgraph. query_url = "" # Optional, deployment to look for in the local `graph-node`, if locally indexed. -# Locally indexing the subgraph is recommended. +# Locally indexing the Subgraph is recommended. # NOTE: Use `query_url` or `deployment_id` only deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" From 33e81f7d732db3b58cb309ada7d3b9061fbc229b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:09:26 -0500 Subject: [PATCH 0081/1789] New translations tap.mdx (Japanese) --- website/src/pages/ja/indexing/tap.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/ja/indexing/tap.mdx b/website/src/pages/ja/indexing/tap.mdx index b1d43a4e628c..d18f7dacdc89 100644 --- a/website/src/pages/ja/indexing/tap.mdx +++ b/website/src/pages/ja/indexing/tap.mdx @@ -63,10 +63,10 @@ As long as you run `tap-agent` and `indexer-agent`, everything will be executed In addition to the typical requirements to run an indexer, you’ll need a `tap-escrow-subgraph` endpoint to query TAP updates. You can use The Graph Network to query or host yourself on your `graph-node`. -- [Graph TAP Arbitrum Sepolia subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) -- [Graph TAP Arbitrum One subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) +- [Graph TAP Arbitrum Sepolia Subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) +- [Graph TAP Arbitrum One Subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) -> Note: `indexer-agent` does not currently handle the indexing of this subgraph like it does for the network subgraph deployment. As a result, you have to index it manually. +> Note: `indexer-agent` does not currently handle the indexing of this Subgraph like it does for the network Subgraph deployment. As a result, you have to index it manually. ## Migration Guide @@ -128,18 +128,18 @@ query_url = "" status_url = "" [subgraphs.network] -# Query URL for the Graph Network subgraph. +# Query URL for the Graph Network Subgraph. query_url = "" # Optional, deployment to look for in the local `graph-node`, if locally indexed. -# Locally indexing the subgraph is recommended. +# Locally indexing the Subgraph is recommended. # NOTE: Use `query_url` or `deployment_id` only deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" [subgraphs.escrow] -# Query URL for the Escrow subgraph. +# Query URL for the Escrow Subgraph. query_url = "" # Optional, deployment to look for in the local `graph-node`, if locally indexed. -# Locally indexing the subgraph is recommended. +# Locally indexing the Subgraph is recommended. # NOTE: Use `query_url` or `deployment_id` only deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" From 923ab4eb23839f3b13789dc8ab26471d69b7aa8d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:09:27 -0500 Subject: [PATCH 0082/1789] New translations tap.mdx (Korean) --- website/src/pages/ko/indexing/tap.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/ko/indexing/tap.mdx b/website/src/pages/ko/indexing/tap.mdx index 3bab672ab211..e81b7af5421c 100644 --- a/website/src/pages/ko/indexing/tap.mdx +++ b/website/src/pages/ko/indexing/tap.mdx @@ -63,10 +63,10 @@ As long as you run `tap-agent` and `indexer-agent`, everything will be executed In addition to the typical requirements to run an indexer, you’ll need a `tap-escrow-subgraph` endpoint to query TAP updates. You can use The Graph Network to query or host yourself on your `graph-node`. -- [Graph TAP Arbitrum Sepolia subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) -- [Graph TAP Arbitrum One subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) +- [Graph TAP Arbitrum Sepolia Subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) +- [Graph TAP Arbitrum One Subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) -> Note: `indexer-agent` does not currently handle the indexing of this subgraph like it does for the network subgraph deployment. As a result, you have to index it manually. +> Note: `indexer-agent` does not currently handle the indexing of this Subgraph like it does for the network Subgraph deployment. As a result, you have to index it manually. ## Migration Guide @@ -128,18 +128,18 @@ query_url = "" status_url = "" [subgraphs.network] -# Query URL for the Graph Network subgraph. +# Query URL for the Graph Network Subgraph. query_url = "" # Optional, deployment to look for in the local `graph-node`, if locally indexed. -# Locally indexing the subgraph is recommended. +# Locally indexing the Subgraph is recommended. # NOTE: Use `query_url` or `deployment_id` only deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" [subgraphs.escrow] -# Query URL for the Escrow subgraph. +# Query URL for the Escrow Subgraph. query_url = "" # Optional, deployment to look for in the local `graph-node`, if locally indexed. -# Locally indexing the subgraph is recommended. +# Locally indexing the Subgraph is recommended. # NOTE: Use `query_url` or `deployment_id` only deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" From 0116edc5bfcbcac1145c585f176d73f6ffc18bb6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:09:28 -0500 Subject: [PATCH 0083/1789] New translations tap.mdx (Dutch) --- website/src/pages/nl/indexing/tap.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/nl/indexing/tap.mdx b/website/src/pages/nl/indexing/tap.mdx index 3bab672ab211..e81b7af5421c 100644 --- a/website/src/pages/nl/indexing/tap.mdx +++ b/website/src/pages/nl/indexing/tap.mdx @@ -63,10 +63,10 @@ As long as you run `tap-agent` and `indexer-agent`, everything will be executed In addition to the typical requirements to run an indexer, you’ll need a `tap-escrow-subgraph` endpoint to query TAP updates. You can use The Graph Network to query or host yourself on your `graph-node`. -- [Graph TAP Arbitrum Sepolia subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) -- [Graph TAP Arbitrum One subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) +- [Graph TAP Arbitrum Sepolia Subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) +- [Graph TAP Arbitrum One Subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) -> Note: `indexer-agent` does not currently handle the indexing of this subgraph like it does for the network subgraph deployment. As a result, you have to index it manually. +> Note: `indexer-agent` does not currently handle the indexing of this Subgraph like it does for the network Subgraph deployment. As a result, you have to index it manually. ## Migration Guide @@ -128,18 +128,18 @@ query_url = "" status_url = "" [subgraphs.network] -# Query URL for the Graph Network subgraph. +# Query URL for the Graph Network Subgraph. query_url = "" # Optional, deployment to look for in the local `graph-node`, if locally indexed. -# Locally indexing the subgraph is recommended. +# Locally indexing the Subgraph is recommended. # NOTE: Use `query_url` or `deployment_id` only deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" [subgraphs.escrow] -# Query URL for the Escrow subgraph. +# Query URL for the Escrow Subgraph. query_url = "" # Optional, deployment to look for in the local `graph-node`, if locally indexed. -# Locally indexing the subgraph is recommended. +# Locally indexing the Subgraph is recommended. # NOTE: Use `query_url` or `deployment_id` only deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" From 81f9829f307a82020ce0c3776a8ab2233f7a702b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:09:29 -0500 Subject: [PATCH 0084/1789] New translations tap.mdx (Polish) --- website/src/pages/pl/indexing/tap.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/pl/indexing/tap.mdx b/website/src/pages/pl/indexing/tap.mdx index 3bab672ab211..e81b7af5421c 100644 --- a/website/src/pages/pl/indexing/tap.mdx +++ b/website/src/pages/pl/indexing/tap.mdx @@ -63,10 +63,10 @@ As long as you run `tap-agent` and `indexer-agent`, everything will be executed In addition to the typical requirements to run an indexer, you’ll need a `tap-escrow-subgraph` endpoint to query TAP updates. You can use The Graph Network to query or host yourself on your `graph-node`. -- [Graph TAP Arbitrum Sepolia subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) -- [Graph TAP Arbitrum One subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) +- [Graph TAP Arbitrum Sepolia Subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) +- [Graph TAP Arbitrum One Subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) -> Note: `indexer-agent` does not currently handle the indexing of this subgraph like it does for the network subgraph deployment. As a result, you have to index it manually. +> Note: `indexer-agent` does not currently handle the indexing of this Subgraph like it does for the network Subgraph deployment. As a result, you have to index it manually. ## Migration Guide @@ -128,18 +128,18 @@ query_url = "" status_url = "" [subgraphs.network] -# Query URL for the Graph Network subgraph. +# Query URL for the Graph Network Subgraph. query_url = "" # Optional, deployment to look for in the local `graph-node`, if locally indexed. -# Locally indexing the subgraph is recommended. +# Locally indexing the Subgraph is recommended. # NOTE: Use `query_url` or `deployment_id` only deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" [subgraphs.escrow] -# Query URL for the Escrow subgraph. +# Query URL for the Escrow Subgraph. query_url = "" # Optional, deployment to look for in the local `graph-node`, if locally indexed. -# Locally indexing the subgraph is recommended. +# Locally indexing the Subgraph is recommended. # NOTE: Use `query_url` or `deployment_id` only deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" From 855f813463fa8ab34387b1ac4dcb3a4fd86ffb02 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:09:30 -0500 Subject: [PATCH 0085/1789] New translations tap.mdx (Portuguese) --- website/src/pages/pt/indexing/tap.mdx | 80 +++++++++++++-------------- 1 file changed, 40 insertions(+), 40 deletions(-) diff --git a/website/src/pages/pt/indexing/tap.mdx b/website/src/pages/pt/indexing/tap.mdx index 33f6583ea3c6..3e4d0b4de4bf 100644 --- a/website/src/pages/pt/indexing/tap.mdx +++ b/website/src/pages/pt/indexing/tap.mdx @@ -45,28 +45,28 @@ Tudo será executado automaticamente enquanto `tap-agent` e `indexer-agent` fore ### Contratos -| Contrato | Mainnet Arbitrum (42161) | Arbitrum Sepolia (421614) | -| ------------------- | -------------------------------------------- | -------------------------------------------- | -| Verificador do TAP | `0x33f9E93266ce0E108fc85DdE2f71dab555A0F05a` | `0xfC24cE7a4428A6B89B52645243662A02BA734ECF` | -| AllocationIDTracker | `0x5B2F33d7Ca6Ec88f5586f2528f58c20843D9FE7c` | `0xAaC28a10d707bbc6e02029f1bfDAEB5084b2aD11` | -| Escrow | `0x8f477709eF277d4A880801D01A140a9CF88bA0d3` | `0x1e4dC4f9F95E102635D8F7ED71c5CdbFa20e2d02` | +| Contrato | Mainnet Arbitrum (42161) | Arbitrum Sepolia (421614) | +| ------------------------- | -------------------------------------------- | -------------------------------------------- | +| Verificador do TAP | `0x33f9E93266ce0E108fc85DdE2f71dab555A0F05a` | `0xfC24cE7a4428A6B89B52645243662A02BA734ECF` | +| AllocationIDTracker | `0x5B2F33d7Ca6Ec88f5586f2528f58c20843D9FE7c` | `0xAaC28a10d707bbc6e02029f1bfDAEB5084b2aD11` | +| Escrow | `0x8f477709eF277d4A880801D01A140a9CF88bA0d3` | `0x1e4dC4f9F95E102635D8F7ED71c5CdbFa20e2d02` | ### Porta de Ligação -| Componente | Mainnet Edge and Note (Mainnet Arbitrum) | Testnet do Edge and Node (Arbitrum Sepolia) | -| ----------- | --------------------------------------------- | --------------------------------------------- | -| Remetente | `0xDDE4cfFd3D9052A9cb618fC05a1Cd02be1f2F467` | `0xC3dDf37906724732FfD748057FEBe23379b0710D` | -| Signatários | `0xfF4B7A5EfD00Ff2EC3518D4F250A27e4c29A2211` | `0xFb142dE83E261e43a81e9ACEADd1c66A0DB121FE` | -| Agregador | `https://tap-aggregator.network.thegraph.com` | `https://tap-aggregator.testnet.thegraph.com` | +| Componente | Mainnet Edge and Note (Mainnet Arbitrum) | Testnet do Edge and Node (Arbitrum Sepolia) | +| -------------- | --------------------------------------------- | ------------------------------------------------ | +| Remetente | `0xDDE4cfFd3D9052A9cb618fC05a1Cd02be1f2F467` | `0xC3dDf37906724732FfD748057FEBe23379b0710D` | +| Signatários | `0xfF4B7A5EfD00Ff2EC3518D4F250A27e4c29A2211` | `0xFb142dE83E261e43a81e9ACEADd1c66A0DB121FE` | +| Agregador | `https://tap-aggregator.network.thegraph.com` | `https://tap-aggregator.testnet.thegraph.com` | ### Requisitos Além dos requisitos típicos para executar um indexador, é necessário um endpoint `tap-escrow-subgraph` para fazer queries de atualizações do TAP. É possível usar o The Graph Network para fazer queries ou se hospedar no seu `graph-node`. -- [Subgraph do TAP do The Graph — Arbitrum Sepolia (para a testnet do The Graph)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) -- [Subgraph do TAP do The Graph (para a mainnet do The Graph)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) +- [Graph TAP Arbitrum Sepolia Subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) +- [Graph TAP Arbitrum One Subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) -> Nota: o `indexer-agent` atualmente não executa o indexamento deste subgraph como faz com o lançamento de subgraphs da rede. Portanto, ele deve ser anexado manualmente. +> Note: `indexer-agent` does not currently handle the indexing of this Subgraph like it does for the network Subgraph deployment. As a result, you have to index it manually. ## Guia de migração @@ -99,14 +99,14 @@ O software necessário está [aqui](https://github.com/graphprotocol/indexer/blo Para o mínimo de configuração, veja o exemplo abaixo: ```bash -# Você deve mudar *todos* os valores abaixo para mudar sua configuração. +# You will have to change *all* the values below to match your setup. # -# O abaixo inclui valores globais da Graph Network, como visto aqui: +# Some of the config below are global graph network values, which you can find here: # # -# Fica a dica: se precisar carregar alguns variáveis do ambiente nesta configuração, você -# pode substituí-los com variáveis do ambiente. Por exemplo: pode-se substituir -# o abaixo por [PREFIX]_DATABASE_POSTGRESURL, onde PREFIX pode ser `INDEXER_SERVICE` ou `TAP_AGENT`: +# Pro tip: if you need to load some values from the environment into this config, you +# can overwrite with environment variables. For example, the following can be replaced +# by [PREFIX]_DATABASE_POSTGRESURL, where PREFIX can be `INDEXER_SERVICE` or `TAP_AGENT`: # # [database] # postgres_url = "postgresql://indexer:${POSTGRES_PASSWORD}@postgres:5432/indexer_components_0" @@ -116,9 +116,9 @@ indexer_address = "0x1111111111111111111111111111111111111111" operator_mnemonic = "celery smart tip orange scare van steel radio dragon joy alarm crane" [database] -# A URL da base de dados Postgres usada para os componentes do indexador. -# A mesma base de dados usada pelo `indexer-agent`. Espera-se que o `indexer-agent` -# criará as tabelas necessárias. +# The URL of the Postgres database used for the indexer components. The same database +# that is used by the `indexer-agent`. It is expected that `indexer-agent` will create +# the necessary tables. postgres_url = "postgres://postgres@postgres:5432/postgres" [graph_node] @@ -128,44 +128,44 @@ query_url = "" status_url = "" [subgraphs.network] -# URL de query pro subgraph do Graph Network. +# Query URL for the Graph Network Subgraph. query_url = "" -# Opcional, procure o lançamento no `graph-node` local, se localmente indexado. -# Vale a pena indexar o subgraph localmente. -# NOTA: Usar apenas `query_url` ou `deployment_id` +# Optional, deployment to look for in the local `graph-node`, if locally indexed. +# Locally indexing the Subgraph is recommended. +# NOTE: Use `query_url` or `deployment_id` only deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" [subgraphs.escrow] -# Query URL for the Escrow subgraph. +# Query URL for the Escrow Subgraph. query_url = "" # Optional, deployment to look for in the local `graph-node`, if locally indexed. -# Locally indexing the subgraph is recommended. +# Locally indexing the Subgraph is recommended. # NOTE: Use `query_url` or `deployment_id` only deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" [blockchain] -# ID de chain da rede que está a executar o Graph Network +# The chain ID of the network that the graph network is running on chain_id = 1337 -# Endereço de contrato do verificador de prova de agregação de recibos do TAP. +# Contract address of TAP's receipt aggregate voucher (RAV) verifier. receipts_verifier_address = "0x2222222222222222222222222222222222222222" ######################################## -# Configurações específicas para o tap-agent # +# Specific configurations to tap-agent # ######################################## [tap] -# Esta é a quantia de taxas que você está disposto a arriscar. Por exemplo: -# se o remetente parar de enviar RAVs por tempo suficiente e as taxas passarem -# desta quantia, o indexer-service não aceitará mais queries deste remetente -# até que as taxas sejam agregadas. -# NOTA: Use strings para valores decimais, para evitar erros de arredondamento -# Por exemplo: -# max_amount_willing_to_lose_grt = "0,1" +# This is the amount of fees you are willing to risk at any given time. For ex. +# if the sender stops supplying RAVs for long enough and the fees exceed this +# amount, the indexer-service will stop accepting queries from the sender +# until the fees are aggregated. +# NOTE: Use strings for decimal values to prevent rounding errors +# e.g: +# max_amount_willing_to_lose_grt = "0.1" max_amount_willing_to_lose_grt = 20 [tap.sender_aggregator_endpoints] -# Valor-Chave de todos os remetentes e seus endpoints agregadores -# Por exemplo, o abaixo é para a ponte de ligação do testnet Edge & Node. -0xDDE4cfFd3D9052A9cb618fC05a1Cd02be1f2F467 = "https://t +# Key-Value of all senders and their aggregator endpoints +# This one below is for the E&N testnet gateway for example. +0xDDE4cfFd3D9052A9cb618fC05a1Cd02be1f2F467 = "https://tap-aggregator.network.thegraph.com" ``` Notas: From cddeedb5bd539ecd91b3acf2de387cde0edfb471 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:09:31 -0500 Subject: [PATCH 0086/1789] New translations tap.mdx (Russian) --- website/src/pages/ru/indexing/tap.mdx | 84 +++++++++++++-------------- 1 file changed, 42 insertions(+), 42 deletions(-) diff --git a/website/src/pages/ru/indexing/tap.mdx b/website/src/pages/ru/indexing/tap.mdx index fe3b7d982be4..b8a9dda1b681 100644 --- a/website/src/pages/ru/indexing/tap.mdx +++ b/website/src/pages/ru/indexing/tap.mdx @@ -45,28 +45,28 @@ TAP позволяет отправителю совершать несколь ### Контракты -| Контракт | Arbitrum Mainnet (42161) | Arbitrum Sepolia (421614) | -| ------------------- | -------------------------------------------- | -------------------------------------------- | -| TAP-верификатор | `0x33f9E93266ce0E108fc85DdE2f71dab555A0F05a` | `0xfC24cE7a4428A6B89B52645243662A02BA734ECF` | -| AllocationIDTracker | `0x5B2F33d7Ca6Ec88f5586f2528f58c20843D9FE7c` | `0xAaC28a10d707bbc6e02029f1bfDAEB5084b2aD11` | -| Escrow | `0x8f477709eF277d4A880801D01A140a9CF88bA0d3` | `0x1e4dC4f9F95E102635D8F7ED71c5CdbFa20e2d02` | +| Контракт | Arbitrum Mainnet (42161) | Arbitrum Sepolia (421614) | +| ---------------------- | -------------------------------------------- | -------------------------------------------- | +| TAP-верификатор | `0x33f9E93266ce0E108fc85DdE2f71dab555A0F05a` | `0xfC24cE7a4428A6B89B52645243662A02BA734ECF` | +| AllocationIDTracker | `0x5B2F33d7Ca6Ec88f5586f2528f58c20843D9FE7c` | `0xAaC28a10d707bbc6e02029f1bfDAEB5084b2aD11` | +| Escrow | `0x8f477709eF277d4A880801D01A140a9CF88bA0d3` | `0x1e4dC4f9F95E102635D8F7ED71c5CdbFa20e2d02` | ### Шлюз -| Компонент | Edge и Node Mainnet (Arbitrum Mainnet) | Edge и Node Testnet (Arbitrum Sepolia) | -| ----------- | --------------------------------------------- | --------------------------------------------- | -| Отправитель | `0xDDE4cfFd3D9052A9cb618fC05a1Cd02be1f2F467` | `0xC3dDf37906724732FfD748057FEBe23379b0710D` | -| Подписанты | `0xfF4B7A5EfD00Ff2EC3518D4F250A27e4c29A2211` | `0xFb142dE83E261e43a81e9ACEADd1c66A0DB121FE` | -| Агрегатор | `https://tap-aggregator.network.thegraph.com` | `https://tap-aggregator.testnet.thegraph.com` | +| Компонент | Edge и Node Mainnet (Arbitrum Mainnet) | Edge и Node Testnet (Arbitrum Sepolia) | +| --------------- | --------------------------------------------- | --------------------------------------------- | +| Отправитель | `0xDDE4cfFd3D9052A9cb618fC05a1Cd02be1f2F467` | `0xC3dDf37906724732FfD748057FEBe23379b0710D` | +| Подписанты | `0xfF4B7A5EfD00Ff2EC3518D4F250A27e4c29A2211` | `0xFb142dE83E261e43a81e9ACEADd1c66A0DB121FE` | +| Агрегатор | `https://tap-aggregator.network.thegraph.com` | `https://tap-aggregator.testnet.thegraph.com` | ### Требования Помимо типичных требований для запуска индексатора Вам понадобится конечная точка `tap-escrow-subgraph` для запроса обновлений TAP. Вы можете использовать The Graph Network для запроса или размещения себя на своей `graph-node`. -- [Субграф Graph TAP Arbitrum Sepolia (для тестовой сети The Graph)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) -- [Субграф Graph TAP Arbitrum One (для основной сети The Graph)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) +- [Graph TAP Arbitrum Sepolia Subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) +- [Graph TAP Arbitrum One Subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) -> Примечание: `indexer-agent` в настоящее время не обрабатывает индексирование этого субграфа, как это происходит при развертывании сетевого субграфа. В итоге Вам придется индексировать его вручную. +> Note: `indexer-agent` does not currently handle the indexing of this Subgraph like it does for the network Subgraph deployment. As a result, you have to index it manually. ## Руководство по миграции @@ -99,14 +99,14 @@ TAP позволяет отправителю совершать несколь Для минимальной конфигурации используйте следующий шаблон: ```bash -# Вам придется изменить *все* приведенные ниже значения, чтобы они соответствовали вашим настройкам. +# You will have to change *all* the values below to match your setup. # -# Некоторые из приведенных ниже конфигураций представляют собой глобальные значения graph network, которые Вы можете найти здесь: +# Some of the config below are global graph network values, which you can find here: # # -# Совет профессионала: если Вам нужно загрузить некоторые значения из среды в эту конфигурацию, Вы -# можете перезаписать их переменными среды. Например, следующее можно заменить -# на [PREFIX]_DATABASE_POSTGRESURL, where PREFIX can be `INDEXER_SERVICE` or `TAP_AGENT`: +# Pro tip: if you need to load some values from the environment into this config, you +# can overwrite with environment variables. For example, the following can be replaced +# by [PREFIX]_DATABASE_POSTGRESURL, where PREFIX can be `INDEXER_SERVICE` or `TAP_AGENT`: # # [database] # postgres_url = "postgresql://indexer:${POSTGRES_PASSWORD}@postgres:5432/indexer_components_0" @@ -116,55 +116,55 @@ indexer_address = "0x1111111111111111111111111111111111111111" operator_mnemonic = "celery smart tip orange scare van steel radio dragon joy alarm crane" [database] -# URL-адрес базы данных Postgres, используемой для компонентов индексатора. Та же база данных, -# которая используется `indexer-agent`. Ожидается, что `indexer-agent` создаст -# необходимые таблицы. +# The URL of the Postgres database used for the indexer components. The same database +# that is used by the `indexer-agent`. It is expected that `indexer-agent` will create +# the necessary tables. postgres_url = "postgres://postgres@postgres:5432/postgres" [graph_node] -# URL-адрес конечной точки запроса Вашей graph-node +# URL to your graph-node's query endpoint query_url = "" -# URL-адрес конечной точки статуса Вашей graph-node +# URL to your graph-node's status endpoint status_url = "" [subgraphs.network] -# URL-адрес запроса для субграфа Graph Network. +# Query URL for the Graph Network Subgraph. query_url = "" -# Необязательно, развертывание нужно искать в локальной `graph-node`, если оно локально проиндексировано. -# Рекомендуется индексировать субграф локально. -# ПРИМЕЧАНИЕ: используйте только `query_url` или `deployment_id` +# Optional, deployment to look for in the local `graph-node`, if locally indexed. +# Locally indexing the Subgraph is recommended. +# NOTE: Use `query_url` or `deployment_id` only deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" [subgraphs.escrow] -# URL-адрес запроса для субграфа Escrow. +# Query URL for the Escrow Subgraph. query_url = "" -# Необязательно, развертывание нужно искать в локальной `graph-node`, если оно локально проиндексировано. -# Рекомендуется индексировать субграф локально. -# ПРИМЕЧАНИЕ: используйте только `query_url` или `deployment_id` +# Optional, deployment to look for in the local `graph-node`, if locally indexed. +# Locally indexing the Subgraph is recommended. +# NOTE: Use `query_url` or `deployment_id` only deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" [blockchain] -# Идентификатор чейна сети, в которой работает the graph network работает на +# The chain ID of the network that the graph network is running on chain_id = 1337 -# Контрактный адрес верификатора receipt aggregate voucher (RAV) TAP +# Contract address of TAP's receipt aggregate voucher (RAV) verifier. receipts_verifier_address = "0x2222222222222222222222222222222222222222" ######################################## -# Специальные настройки для tap-agent # +# Specific configurations to tap-agent # ######################################## [tap] -# Это сумма комиссий, которой вы готовы рискнуть в любой момент времени. Например, -# если отправитель не совершает поставку RAV достаточно длительное время, и комиссии превышают это значение -# суммарно, служба-индексатор перестанет принимать запросы от отправителя -# до тех пор, пока комиссии не будут суммированы. -# ПРИМЕЧАНИЕ: Используйте строки для десятичных значений, чтобы избежать ошибок округления -# например: +# This is the amount of fees you are willing to risk at any given time. For ex. +# if the sender stops supplying RAVs for long enough and the fees exceed this +# amount, the indexer-service will stop accepting queries from the sender +# until the fees are aggregated. +# NOTE: Use strings for decimal values to prevent rounding errors +# e.g: # max_amount_willing_to_lose_grt = "0.1" max_amount_willing_to_lose_grt = 20 [tap.sender_aggregator_endpoints] -# Ключ-значение всех отправителей и их конечных точек агрегатора -# Ниже приведен пример шлюза тестовой сети E&N. +# Key-Value of all senders and their aggregator endpoints +# This one below is for the E&N testnet gateway for example. 0xDDE4cfFd3D9052A9cb618fC05a1Cd02be1f2F467 = "https://tap-aggregator.network.thegraph.com" ``` From b53b285be04e8e768048adb92b65827ff076af49 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:09:32 -0500 Subject: [PATCH 0087/1789] New translations tap.mdx (Swedish) --- website/src/pages/sv/indexing/tap.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/sv/indexing/tap.mdx b/website/src/pages/sv/indexing/tap.mdx index d69cb7b5bc91..52e643fc8dfc 100644 --- a/website/src/pages/sv/indexing/tap.mdx +++ b/website/src/pages/sv/indexing/tap.mdx @@ -63,10 +63,10 @@ As long as you run `tap-agent` and `indexer-agent`, everything will be executed In addition to the typical requirements to run an indexer, you’ll need a `tap-escrow-subgraph` endpoint to query TAP updates. You can use The Graph Network to query or host yourself on your `graph-node`. -- [Graph TAP Arbitrum Sepolia subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) -- [Graph TAP Arbitrum One subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) +- [Graph TAP Arbitrum Sepolia Subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) +- [Graph TAP Arbitrum One Subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) -> Note: `indexer-agent` does not currently handle the indexing of this subgraph like it does for the network subgraph deployment. As a result, you have to index it manually. +> Note: `indexer-agent` does not currently handle the indexing of this Subgraph like it does for the network Subgraph deployment. As a result, you have to index it manually. ## Migration Guide @@ -128,18 +128,18 @@ query_url = "" status_url = "" [subgraphs.network] -# Query URL for the Graph Network subgraph. +# Query URL for the Graph Network Subgraph. query_url = "" # Optional, deployment to look for in the local `graph-node`, if locally indexed. -# Locally indexing the subgraph is recommended. +# Locally indexing the Subgraph is recommended. # NOTE: Use `query_url` or `deployment_id` only deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" [subgraphs.escrow] -# Query URL for the Escrow subgraph. +# Query URL for the Escrow Subgraph. query_url = "" # Optional, deployment to look for in the local `graph-node`, if locally indexed. -# Locally indexing the subgraph is recommended. +# Locally indexing the Subgraph is recommended. # NOTE: Use `query_url` or `deployment_id` only deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" From b3de0cadd7a27434f756536e431c423ee9387f76 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:09:33 -0500 Subject: [PATCH 0088/1789] New translations tap.mdx (Turkish) --- website/src/pages/tr/indexing/tap.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/tr/indexing/tap.mdx b/website/src/pages/tr/indexing/tap.mdx index 5ad4f2dc020e..1fe17b13c2fa 100644 --- a/website/src/pages/tr/indexing/tap.mdx +++ b/website/src/pages/tr/indexing/tap.mdx @@ -63,10 +63,10 @@ As long as you run `tap-agent` and `indexer-agent`, everything will be executed In addition to the typical requirements to run an indexer, you’ll need a `tap-escrow-subgraph` endpoint to query TAP updates. You can use The Graph Network to query or host yourself on your `graph-node`. -- [Graph TAP Arbitrum Sepolia subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) -- [Graph TAP Arbitrum One subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) +- [Graph TAP Arbitrum Sepolia Subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) +- [Graph TAP Arbitrum One Subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) -> Note: `indexer-agent` does not currently handle the indexing of this subgraph like it does for the network subgraph deployment. As a result, you have to index it manually. +> Note: `indexer-agent` does not currently handle the indexing of this Subgraph like it does for the network Subgraph deployment. As a result, you have to index it manually. ## Migration Guide @@ -128,18 +128,18 @@ query_url = "" status_url = "" [subgraphs.network] -# Query URL for the Graph Network subgraph. +# Query URL for the Graph Network Subgraph. query_url = "" # Optional, deployment to look for in the local `graph-node`, if locally indexed. -# Locally indexing the subgraph is recommended. +# Locally indexing the Subgraph is recommended. # NOTE: Use `query_url` or `deployment_id` only deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" [subgraphs.escrow] -# Query URL for the Escrow subgraph. +# Query URL for the Escrow Subgraph. query_url = "" # Optional, deployment to look for in the local `graph-node`, if locally indexed. -# Locally indexing the subgraph is recommended. +# Locally indexing the Subgraph is recommended. # NOTE: Use `query_url` or `deployment_id` only deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" From 08375c4aad1e0ac955f72ca24074af06e51d73d9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:09:34 -0500 Subject: [PATCH 0089/1789] New translations tap.mdx (Ukrainian) --- website/src/pages/uk/indexing/tap.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/uk/indexing/tap.mdx b/website/src/pages/uk/indexing/tap.mdx index 3bab672ab211..e81b7af5421c 100644 --- a/website/src/pages/uk/indexing/tap.mdx +++ b/website/src/pages/uk/indexing/tap.mdx @@ -63,10 +63,10 @@ As long as you run `tap-agent` and `indexer-agent`, everything will be executed In addition to the typical requirements to run an indexer, you’ll need a `tap-escrow-subgraph` endpoint to query TAP updates. You can use The Graph Network to query or host yourself on your `graph-node`. -- [Graph TAP Arbitrum Sepolia subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) -- [Graph TAP Arbitrum One subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) +- [Graph TAP Arbitrum Sepolia Subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) +- [Graph TAP Arbitrum One Subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) -> Note: `indexer-agent` does not currently handle the indexing of this subgraph like it does for the network subgraph deployment. As a result, you have to index it manually. +> Note: `indexer-agent` does not currently handle the indexing of this Subgraph like it does for the network Subgraph deployment. As a result, you have to index it manually. ## Migration Guide @@ -128,18 +128,18 @@ query_url = "" status_url = "" [subgraphs.network] -# Query URL for the Graph Network subgraph. +# Query URL for the Graph Network Subgraph. query_url = "" # Optional, deployment to look for in the local `graph-node`, if locally indexed. -# Locally indexing the subgraph is recommended. +# Locally indexing the Subgraph is recommended. # NOTE: Use `query_url` or `deployment_id` only deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" [subgraphs.escrow] -# Query URL for the Escrow subgraph. +# Query URL for the Escrow Subgraph. query_url = "" # Optional, deployment to look for in the local `graph-node`, if locally indexed. -# Locally indexing the subgraph is recommended. +# Locally indexing the Subgraph is recommended. # NOTE: Use `query_url` or `deployment_id` only deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" From 20022a204d7a2b66b0ddbd053063f1485c906826 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:09:35 -0500 Subject: [PATCH 0090/1789] New translations tap.mdx (Chinese Simplified) --- website/src/pages/zh/indexing/tap.mdx | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/website/src/pages/zh/indexing/tap.mdx b/website/src/pages/zh/indexing/tap.mdx index de09d72fa74a..ed5221bd8710 100644 --- a/website/src/pages/zh/indexing/tap.mdx +++ b/website/src/pages/zh/indexing/tap.mdx @@ -4,7 +4,7 @@ title: TAP Migration Guide Learn about The Graph’s new payment system, **Timeline Aggregation Protocol, TAP**. This system provides fast, efficient microtransactions with minimized trust. -## 概述 +## Overview [TAP](https://docs.rs/tap_core/latest/tap_core/index.html) is a drop-in replacement to the Scalar payment system currently in place. It provides the following key features: @@ -63,10 +63,10 @@ As long as you run `tap-agent` and `indexer-agent`, everything will be executed In addition to the typical requirements to run an indexer, you’ll need a `tap-escrow-subgraph` endpoint to query TAP updates. You can use The Graph Network to query or host yourself on your `graph-node`. -- [Graph TAP Arbitrum Sepolia subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) -- [Graph TAP Arbitrum One subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) +- [Graph TAP Arbitrum Sepolia Subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) +- [Graph TAP Arbitrum One Subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) -> Note: `indexer-agent` does not currently handle the indexing of this subgraph like it does for the network subgraph deployment. As a result, you have to index it manually. +> Note: `indexer-agent` does not currently handle the indexing of this Subgraph like it does for the network Subgraph deployment. As a result, you have to index it manually. ## Migration Guide @@ -128,18 +128,18 @@ query_url = "" status_url = "" [subgraphs.network] -# Query URL for the Graph Network subgraph. +# Query URL for the Graph Network Subgraph. query_url = "" # Optional, deployment to look for in the local `graph-node`, if locally indexed. -# Locally indexing the subgraph is recommended. +# Locally indexing the Subgraph is recommended. # NOTE: Use `query_url` or `deployment_id` only deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" [subgraphs.escrow] -# Query URL for the Escrow subgraph. +# Query URL for the Escrow Subgraph. query_url = "" # Optional, deployment to look for in the local `graph-node`, if locally indexed. -# Locally indexing the subgraph is recommended. +# Locally indexing the Subgraph is recommended. # NOTE: Use `query_url` or `deployment_id` only deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" From ae39894825902df80438d7d89f24c264031ec433 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:09:36 -0500 Subject: [PATCH 0091/1789] New translations tap.mdx (Urdu (Pakistan)) --- website/src/pages/ur/indexing/tap.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/ur/indexing/tap.mdx b/website/src/pages/ur/indexing/tap.mdx index 227fbfc0593f..a7f1e235a676 100644 --- a/website/src/pages/ur/indexing/tap.mdx +++ b/website/src/pages/ur/indexing/tap.mdx @@ -63,10 +63,10 @@ As long as you run `tap-agent` and `indexer-agent`, everything will be executed In addition to the typical requirements to run an indexer, you’ll need a `tap-escrow-subgraph` endpoint to query TAP updates. You can use The Graph Network to query or host yourself on your `graph-node`. -- [Graph TAP Arbitrum Sepolia subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) -- [Graph TAP Arbitrum One subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) +- [Graph TAP Arbitrum Sepolia Subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) +- [Graph TAP Arbitrum One Subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) -> Note: `indexer-agent` does not currently handle the indexing of this subgraph like it does for the network subgraph deployment. As a result, you have to index it manually. +> Note: `indexer-agent` does not currently handle the indexing of this Subgraph like it does for the network Subgraph deployment. As a result, you have to index it manually. ## Migration Guide @@ -128,18 +128,18 @@ query_url = "" status_url = "" [subgraphs.network] -# Query URL for the Graph Network subgraph. +# Query URL for the Graph Network Subgraph. query_url = "" # Optional, deployment to look for in the local `graph-node`, if locally indexed. -# Locally indexing the subgraph is recommended. +# Locally indexing the Subgraph is recommended. # NOTE: Use `query_url` or `deployment_id` only deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" [subgraphs.escrow] -# Query URL for the Escrow subgraph. +# Query URL for the Escrow Subgraph. query_url = "" # Optional, deployment to look for in the local `graph-node`, if locally indexed. -# Locally indexing the subgraph is recommended. +# Locally indexing the Subgraph is recommended. # NOTE: Use `query_url` or `deployment_id` only deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" From 6c1400a4ed04ac8ec776cfd1e2b5f4e34f294664 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:09:37 -0500 Subject: [PATCH 0092/1789] New translations tap.mdx (Vietnamese) --- website/src/pages/vi/indexing/tap.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/vi/indexing/tap.mdx b/website/src/pages/vi/indexing/tap.mdx index eccf6efc1d41..b0fbc030dae0 100644 --- a/website/src/pages/vi/indexing/tap.mdx +++ b/website/src/pages/vi/indexing/tap.mdx @@ -63,10 +63,10 @@ As long as you run `tap-agent` and `indexer-agent`, everything will be executed In addition to the typical requirements to run an indexer, you’ll need a `tap-escrow-subgraph` endpoint to query TAP updates. You can use The Graph Network to query or host yourself on your `graph-node`. -- [Graph TAP Arbitrum Sepolia subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) -- [Graph TAP Arbitrum One subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) +- [Graph TAP Arbitrum Sepolia Subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) +- [Graph TAP Arbitrum One Subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) -> Note: `indexer-agent` does not currently handle the indexing of this subgraph like it does for the network subgraph deployment. As a result, you have to index it manually. +> Note: `indexer-agent` does not currently handle the indexing of this Subgraph like it does for the network Subgraph deployment. As a result, you have to index it manually. ## Migration Guide @@ -128,18 +128,18 @@ query_url = "" status_url = "" [subgraphs.network] -# Query URL for the Graph Network subgraph. +# Query URL for the Graph Network Subgraph. query_url = "" # Optional, deployment to look for in the local `graph-node`, if locally indexed. -# Locally indexing the subgraph is recommended. +# Locally indexing the Subgraph is recommended. # NOTE: Use `query_url` or `deployment_id` only deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" [subgraphs.escrow] -# Query URL for the Escrow subgraph. +# Query URL for the Escrow Subgraph. query_url = "" # Optional, deployment to look for in the local `graph-node`, if locally indexed. -# Locally indexing the subgraph is recommended. +# Locally indexing the Subgraph is recommended. # NOTE: Use `query_url` or `deployment_id` only deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" From b2be155c40a802585a0ee1425e3001675c15178e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:09:38 -0500 Subject: [PATCH 0093/1789] New translations tap.mdx (Marathi) --- website/src/pages/mr/indexing/tap.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/mr/indexing/tap.mdx b/website/src/pages/mr/indexing/tap.mdx index f6248123d886..ae2e05ece447 100644 --- a/website/src/pages/mr/indexing/tap.mdx +++ b/website/src/pages/mr/indexing/tap.mdx @@ -63,10 +63,10 @@ As long as you run `tap-agent` and `indexer-agent`, everything will be executed In addition to the typical requirements to run an indexer, you’ll need a `tap-escrow-subgraph` endpoint to query TAP updates. You can use The Graph Network to query or host yourself on your `graph-node`. -- [Graph TAP Arbitrum Sepolia subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) -- [Graph TAP Arbitrum One subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) +- [Graph TAP Arbitrum Sepolia Subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) +- [Graph TAP Arbitrum One Subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) -> Note: `indexer-agent` does not currently handle the indexing of this subgraph like it does for the network subgraph deployment. As a result, you have to index it manually. +> Note: `indexer-agent` does not currently handle the indexing of this Subgraph like it does for the network Subgraph deployment. As a result, you have to index it manually. ## Migration Guide @@ -128,18 +128,18 @@ query_url = "" status_url = "" [subgraphs.network] -# Query URL for the Graph Network subgraph. +# Query URL for the Graph Network Subgraph. query_url = "" # Optional, deployment to look for in the local `graph-node`, if locally indexed. -# Locally indexing the subgraph is recommended. +# Locally indexing the Subgraph is recommended. # NOTE: Use `query_url` or `deployment_id` only deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" [subgraphs.escrow] -# Query URL for the Escrow subgraph. +# Query URL for the Escrow Subgraph. query_url = "" # Optional, deployment to look for in the local `graph-node`, if locally indexed. -# Locally indexing the subgraph is recommended. +# Locally indexing the Subgraph is recommended. # NOTE: Use `query_url` or `deployment_id` only deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" From 36888955edc18af7435383f9041533f71a294496 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:09:39 -0500 Subject: [PATCH 0094/1789] New translations tap.mdx (Hindi) --- website/src/pages/hi/indexing/tap.mdx | 91 +++++++++++++-------------- 1 file changed, 45 insertions(+), 46 deletions(-) diff --git a/website/src/pages/hi/indexing/tap.mdx b/website/src/pages/hi/indexing/tap.mdx index d2a42ac00ea5..bba1a5dd0e8f 100644 --- a/website/src/pages/hi/indexing/tap.mdx +++ b/website/src/pages/hi/indexing/tap.mdx @@ -51,22 +51,22 @@ TAP एक प्रेषक को एक प्राप्तकर्ता | AllocationIDTracker | `0x5B2F33d7Ca6Ec88f5586f2528f58c20843D9FE7c` | `0xAaC28a10d707bbc6e02029f1bfDAEB5084b2aD11` | | Escrow | `0x8f477709eF277d4A880801D01A140a9CF88bA0d3` | `0x1e4dC4f9F95E102635D8F7ED71c5CdbFa20e2d02` | -### गेटवे +### गेटवे -| घटक | Edge and Node Mainnet (Arbitrum Mainnet) | Edge and Node Testnet (Arbitrum Sepolia) | -| ---------------- | --------------------------------------------- | --------------------------------------------- | -| प्रेषक | `0xDDE4cfFd3D9052A9cb618fC05a1Cd02be1f2F467` | `0xC3dDf37906724732FfD748057FEBe23379b0710D` | -| हस्ताक्षरकर्ता | `0xfF4B7A5EfD00Ff2EC3518D4F250A27e4c29A2211` | `0xFb142dE83E261e43a81e9ACEADd1c66A0DB121FE` | -| संकेन्द्रीयकर्ता | `https://tap-aggregator.network.thegraph.com` | `https://tap-aggregator.testnet.thegraph.com` | +| घटक | Edge and Node Mainnet (Arbitrum Mainnet) | Edge and Node Testnet (Arbitrum Sepolia) | +| ----------------- | --------------------------------------------- | --------------------------------------------- | +| प्रेषक | `0xDDE4cfFd3D9052A9cb618fC05a1Cd02be1f2F467` | `0xC3dDf37906724732FfD748057FEBe23379b0710D` | +| हस्ताक्षरकर्ता | `0xfF4B7A5EfD00Ff2EC3518D4F250A27e4c29A2211` | `0xFb142dE83E261e43a81e9ACEADd1c66A0DB121FE` | +| संकेन्द्रीयकर्ता | `https://tap-aggregator.network.thegraph.com` | `https://tap-aggregator.testnet.thegraph.com` | ### Requirements एक Indexer चलाने की सामान्य आवश्यकताओं के अलावा, आपको TAP अपडेट को क्वेरी करने के लिए एक tap-escrow-subgraph एंडपॉइंट की आवश्यकता होगी। आप TAP को क्वेरी करने के लिए The Graph Network का उपयोग कर सकते हैं या अपने graph-node पर स्वयं होस्ट कर सकते हैं। -- [Graph TAP Arbitrum Sepolia subgraph (The Graph टेस्टनेट के लिए)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) -- [Graph TAP Arbitrum One subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) +- [Graph TAP Arbitrum Sepolia Subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) +- [Graph TAP Arbitrum One Subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) -> नोट: `indexer-agent` वर्तमान में इस subgraph का indexing नेटवर्क subgraph डिप्लॉयमेंट की तरह नहीं करता है। इसके परिणामस्वरूप, आपको इसे मैन्युअल रूप से इंडेक्स करना होगा। +> Note: `indexer-agent` does not currently handle the indexing of this Subgraph like it does for the network Subgraph deployment. As a result, you have to index it manually. ## माइग्रेशन गाइड @@ -99,73 +99,72 @@ TAP एक प्रेषक को एक प्राप्तकर्ता "कम से कम कॉन्फ़िगरेशन के लिए, निम्नलिखित टेम्पलेट का उपयोग करें:" ```bash -#आपको नीचे दिए गए *सभी* मान अपनी सेटअप के अनुसार बदलने होंगे। -*नीचे दिए गए कुछ कॉन्फ़िग वैल्यू ग्लोबल ग्राफ नेटवर्क वैल्यू हैं, जिन्हें आप यहां पा सकते हैं: +# You will have to change *all* the values below to match your setup. # - +# Some of the config below are global graph network values, which you can find here: +# # -#प्रो टिप: यदि आपको इस कॉन्फ़िग में कुछ मान environment से लोड करने की आवश्यकता है, तो आप environment वेरिएबल्स का उपयोग करके ओवरराइट कर सकते हैं। उदाहरण के लिए, निम्नलिखित को [PREFIX]_DATABASE_POSTGRESURL से बदला जा सकता है, जहां PREFIX `INDEXER_SERVICE` या `TAP_AGENT` हो सकता है: -[database] -#postgres_url = "postgresql://indexer:${POSTGRES_PASSWORD}@postgres:5432/indexer_components_0" +# Pro tip: if you need to load some values from the environment into this config, you +# can overwrite with environment variables. For example, the following can be replaced +# by [PREFIX]_DATABASE_POSTGRESURL, where PREFIX can be `INDEXER_SERVICE` or `TAP_AGENT`: +# +# [database] +# postgres_url = "postgresql://indexer:${POSTGRES_PASSWORD}@postgres:5432/indexer_components_0" + [indexer] indexer_address = "0x1111111111111111111111111111111111111111" operator_mnemonic = "celery smart tip orange scare van steel radio dragon joy alarm crane" [database] - -Postgres डेटाबेस का URL जो indexer components के लिए उपयोग किया जाता है। वही डेटाबेस -जो indexer-agent द्वारा उपयोग किया जाता है। यह अपेक्षित है कि indexer-agent आवश्यक तालिकाएं बनाएगा। +# The URL of the Postgres database used for the indexer components. The same database +# that is used by the `indexer-agent`. It is expected that `indexer-agent` will create +# the necessary tables. postgres_url = "postgres://postgres@postgres:5432/postgres" [graph_node] -आपके graph-node के क्वेरी एंडपॉइंट का URL +# URL to your graph-node's query endpoint query_url = "" - -आपके graph-node के स्टेटस एंडपॉइंट का URL +# URL to your graph-node's status endpoint status_url = "" [subgraphs.network] -Graph Network subgraph के लिए क्वेरी URL। +# Query URL for the Graph Network Subgraph. query_url = "" - -वैकल्पिक, local graph-node में देखने के लिए deployment, यदि स्थानीय रूप से इंडेक्स किया गया है। -subgraph को स्थानीय रूप से इंडेक्स करना अनुशंसित है। -नोट: केवल query_url या deployment_id का उपयोग करें +# Optional, deployment to look for in the local `graph-node`, if locally indexed. +# Locally indexing the Subgraph is recommended. +# NOTE: Use `query_url` or `deployment_id` only deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" [subgraphs.escrow] -Escrow subgraph के लिए क्वेरी URL। +# Query URL for the Escrow Subgraph. query_url = "" - -वैकल्पिक, local graph-node में देखने के लिए deployment, यदि स्थानीय रूप से इंडेक्स किया गया है। -subgraph को स्थानीय रूप से इंडेक्स करना अनुशंसित है। -नोट: केवल query_url या deployment_id का उपयोग करें +# Optional, deployment to look for in the local `graph-node`, if locally indexed. +# Locally indexing the Subgraph is recommended. +# NOTE: Use `query_url` or `deployment_id` only deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" [blockchain] - -उस नेटवर्क का chain ID जिस पर graph network चल रहा है +# The chain ID of the network that the graph network is running on chain_id = 1337 - -TAP के receipt aggregate voucher (RAV) verifier का कॉन्ट्रैक्ट एड्रेस। +# Contract address of TAP's receipt aggregate voucher (RAV) verifier. receipts_verifier_address = "0x2222222222222222222222222222222222222222" ######################################## -#tap-agent के लिए विशिष्ट कॉन्फ़िगरेशन# +# Specific configurations to tap-agent # ######################################## [tap] -#यह वह फीस की मात्रा है जिसे आप किसी भी समय जोखिम में डालने के लिए तैयार हैं। उदाहरण के लिए, -#यदि sender लंबे समय तक RAVs प्रदान करना बंद कर देता है और फीस इस -#राशि से अधिक हो जाती है, तो indexer-service sender से क्वेरी स्वीकार करना बंद कर देगा -#जब तक कि फीस को समेकित नहीं किया जाता। -#नोट: राउंडिंग त्रुटियों से बचने के लिए दशमलव मानों के लिए strings का उपयोग करें -#जैसे: -#max_amount_willing_to_lose_grt = "0.1" +# This is the amount of fees you are willing to risk at any given time. For ex. +# if the sender stops supplying RAVs for long enough and the fees exceed this +# amount, the indexer-service will stop accepting queries from the sender +# until the fees are aggregated. +# NOTE: Use strings for decimal values to prevent rounding errors +# e.g: +# max_amount_willing_to_lose_grt = "0.1" max_amount_willing_to_lose_grt = 20 [tap.sender_aggregator_endpoints] -सभी senders और उनके aggregator endpoints के key-value -नीचे दिया गया यह उदाहरण E&N टेस्टनेट गेटवे के लिए है। +# Key-Value of all senders and their aggregator endpoints +# This one below is for the E&N testnet gateway for example. 0xDDE4cfFd3D9052A9cb618fC05a1Cd02be1f2F467 = "https://tap-aggregator.network.thegraph.com" ``` @@ -187,7 +186,7 @@ max_amount_willing_to_lose_grt = 20 ### Grafana डैशबोर्ड -आप Grafana Dashboard (https://github.com/graphprotocol/indexer-rs/blob/main/docs/dashboard.json) डाउनलोड कर सकते हैं और इम्पोर्ट कर सकते हैं। +आप Grafana Dashboard (https://github.com/graphprotocol/indexer-rs/blob/main/docs/dashboard.json) डाउनलोड कर सकते हैं और इम्पोर्ट कर सकते हैं। ### लॉन्चपैड From 68cd2ce76540066d11924e5680af4f29ee147516 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:09:40 -0500 Subject: [PATCH 0095/1789] New translations tap.mdx (Swahili) --- website/src/pages/sw/indexing/tap.mdx | 193 ++++++++++++++++++++++++++ 1 file changed, 193 insertions(+) create mode 100644 website/src/pages/sw/indexing/tap.mdx diff --git a/website/src/pages/sw/indexing/tap.mdx b/website/src/pages/sw/indexing/tap.mdx new file mode 100644 index 000000000000..e81b7af5421c --- /dev/null +++ b/website/src/pages/sw/indexing/tap.mdx @@ -0,0 +1,193 @@ +--- +title: TAP Migration Guide +--- + +Learn about The Graph’s new payment system, **Timeline Aggregation Protocol, TAP**. This system provides fast, efficient microtransactions with minimized trust. + +## Overview + +[TAP](https://docs.rs/tap_core/latest/tap_core/index.html) is a drop-in replacement to the Scalar payment system currently in place. It provides the following key features: + +- Efficiently handles micropayments. +- Adds a layer of consolidations to onchain transactions and costs. +- Allows Indexers control of receipts and payments, guaranteeing payment for queries. +- It enables decentralized, trustless gateways and improves `indexer-service` performance for multiple senders. + +## Specifics + +TAP allows a sender to make multiple payments to a receiver, **TAP Receipts**, which aggregates these payments into a single payment, a **Receipt Aggregate Voucher**, also known as a **RAV**. This aggregated payment can then be verified on the blockchain, reducing the number of transactions and simplifying the payment process. + +For each query, the gateway will send you a `signed receipt` that is stored on your database. Then, these queries will be aggregated by a `tap-agent` through a request. Afterwards, you’ll receive a RAV. You can update a RAV by sending it with newer receipts and this will generate a new RAV with an increased value. + +### RAV Details + +- It’s money that is waiting to be sent to the blockchain. + +- It will continue to send requests to aggregate and ensure that the total value of non-aggregated receipts does not exceed the `amount willing to lose`. + +- Each RAV can be redeemed once in the contracts, which is why they are sent after the allocation is closed. + +### Redeeming RAV + +As long as you run `tap-agent` and `indexer-agent`, everything will be executed automatically. The following provides a detailed breakdown of the process: + +1. An Indexer closes allocation. + +2. ` period, tap-agent` takes all pending receipts for that specific allocation and requests an aggregation into a RAV, marking it as `last`. + +3. `indexer-agent` takes all the last RAVS and sends redeem requests to the blockchain, which will update the value of `redeem_at`. + +4. During the `` period, `indexer-agent` monitors if the blockchain has any reorganizations that revert the transaction. + + - If it was reverted, the RAV is resent to the blockchain. If it was not reverted, it gets marked as `final`. + +## Blockchain Addresses + +### Contracts + +| Contract | Arbitrum Mainnet (42161) | Arbitrum Sepolia (421614) | +| ------------------- | -------------------------------------------- | -------------------------------------------- | +| TAP Verifier | `0x33f9E93266ce0E108fc85DdE2f71dab555A0F05a` | `0xfC24cE7a4428A6B89B52645243662A02BA734ECF` | +| AllocationIDTracker | `0x5B2F33d7Ca6Ec88f5586f2528f58c20843D9FE7c` | `0xAaC28a10d707bbc6e02029f1bfDAEB5084b2aD11` | +| Escrow | `0x8f477709eF277d4A880801D01A140a9CF88bA0d3` | `0x1e4dC4f9F95E102635D8F7ED71c5CdbFa20e2d02` | + +### Gateway + +| Component | Edge and Node Mainnet (Arbitrum Mainnet) | Edge and Node Testnet (Arbitrum Sepolia) | +| ---------- | --------------------------------------------- | --------------------------------------------- | +| Sender | `0xDDE4cfFd3D9052A9cb618fC05a1Cd02be1f2F467` | `0xC3dDf37906724732FfD748057FEBe23379b0710D` | +| Signers | `0xfF4B7A5EfD00Ff2EC3518D4F250A27e4c29A2211` | `0xFb142dE83E261e43a81e9ACEADd1c66A0DB121FE` | +| Aggregator | `https://tap-aggregator.network.thegraph.com` | `https://tap-aggregator.testnet.thegraph.com` | + +### Requirements + +In addition to the typical requirements to run an indexer, you’ll need a `tap-escrow-subgraph` endpoint to query TAP updates. You can use The Graph Network to query or host yourself on your `graph-node`. + +- [Graph TAP Arbitrum Sepolia Subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) +- [Graph TAP Arbitrum One Subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) + +> Note: `indexer-agent` does not currently handle the indexing of this Subgraph like it does for the network Subgraph deployment. As a result, you have to index it manually. + +## Migration Guide + +### Software versions + +The required software version can be found [here](https://github.com/graphprotocol/indexer/blob/main/docs/networks/arbitrum-one.md#latest-releases). + +### Steps + +1. **Indexer Agent** + + - Follow the [same process](https://github.com/graphprotocol/indexer/pkgs/container/indexer-agent#graph-protocol-indexer-components). + - Give the new argument `--tap-subgraph-endpoint` to activate the new TAP codepaths and enable redeeming of TAP RAVs. + +2. **Indexer Service** + + - Fully replace your current configuration with the [new Indexer Service rs](https://github.com/graphprotocol/indexer-rs). It's recommend that you use the [container image](https://github.com/orgs/graphprotocol/packages?repo_name=indexer-rs). + - Like the older version, you can scale Indexer Service horizontally easily. It is still stateless. + +3. **TAP Agent** + + - Run _one_ single instance of [TAP Agent](https://github.com/graphprotocol/indexer-rs) at all times. It's recommend that you use the [container image](https://github.com/orgs/graphprotocol/packages?repo_name=indexer-rs). + +4. **Configure Indexer Service and TAP Agent** + + Configuration is a TOML file shared between `indexer-service` and `tap-agent`, supplied with the argument `--config /path/to/config.toml`. + + Check out the full [configuration](https://github.com/graphprotocol/indexer-rs/blob/main/config/maximal-config-example.toml) and the [default values](https://github.com/graphprotocol/indexer-rs/blob/main/config/default_values.toml) + +For minimal configuration, use the following template: + +```bash +# You will have to change *all* the values below to match your setup. +# +# Some of the config below are global graph network values, which you can find here: +# +# +# Pro tip: if you need to load some values from the environment into this config, you +# can overwrite with environment variables. For example, the following can be replaced +# by [PREFIX]_DATABASE_POSTGRESURL, where PREFIX can be `INDEXER_SERVICE` or `TAP_AGENT`: +# +# [database] +# postgres_url = "postgresql://indexer:${POSTGRES_PASSWORD}@postgres:5432/indexer_components_0" + +[indexer] +indexer_address = "0x1111111111111111111111111111111111111111" +operator_mnemonic = "celery smart tip orange scare van steel radio dragon joy alarm crane" + +[database] +# The URL of the Postgres database used for the indexer components. The same database +# that is used by the `indexer-agent`. It is expected that `indexer-agent` will create +# the necessary tables. +postgres_url = "postgres://postgres@postgres:5432/postgres" + +[graph_node] +# URL to your graph-node's query endpoint +query_url = "" +# URL to your graph-node's status endpoint +status_url = "" + +[subgraphs.network] +# Query URL for the Graph Network Subgraph. +query_url = "" +# Optional, deployment to look for in the local `graph-node`, if locally indexed. +# Locally indexing the Subgraph is recommended. +# NOTE: Use `query_url` or `deployment_id` only +deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + +[subgraphs.escrow] +# Query URL for the Escrow Subgraph. +query_url = "" +# Optional, deployment to look for in the local `graph-node`, if locally indexed. +# Locally indexing the Subgraph is recommended. +# NOTE: Use `query_url` or `deployment_id` only +deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + +[blockchain] +# The chain ID of the network that the graph network is running on +chain_id = 1337 +# Contract address of TAP's receipt aggregate voucher (RAV) verifier. +receipts_verifier_address = "0x2222222222222222222222222222222222222222" + +######################################## +# Specific configurations to tap-agent # +######################################## +[tap] +# This is the amount of fees you are willing to risk at any given time. For ex. +# if the sender stops supplying RAVs for long enough and the fees exceed this +# amount, the indexer-service will stop accepting queries from the sender +# until the fees are aggregated. +# NOTE: Use strings for decimal values to prevent rounding errors +# e.g: +# max_amount_willing_to_lose_grt = "0.1" +max_amount_willing_to_lose_grt = 20 + +[tap.sender_aggregator_endpoints] +# Key-Value of all senders and their aggregator endpoints +# This one below is for the E&N testnet gateway for example. +0xDDE4cfFd3D9052A9cb618fC05a1Cd02be1f2F467 = "https://tap-aggregator.network.thegraph.com" +``` + +Notes: + +- Values for `tap.sender_aggregator_endpoints` can be found in the [gateway section](/indexing/tap/#gateway). +- Values for `blockchain.receipts_verifier_address` must be used accordingly to the [Blockchain addresses section](/indexing/tap/#contracts) using the appropriate chain id. + +**Log Level** + +- You can set the log level by using the `RUST_LOG` environment variable. +- It’s recommended that you set it to `RUST_LOG=indexer_tap_agent=debug,info`. + +## Monitoring + +### Metrics + +All components expose the port 7300 to be queried by prometheus. + +### Grafana Dashboard + +You can download [Grafana Dashboard](https://github.com/graphprotocol/indexer-rs/blob/main/docs/dashboard.json) and import. + +### Launchpad + +Currently, there is a WIP version of `indexer-rs` and `tap-agent` that can be found [here](https://github.com/graphops/launchpad-charts/tree/main/charts/graph-network-indexer) From a7a586fa69706dea15e5d3000f80eb1ea3025b70 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:09:41 -0500 Subject: [PATCH 0096/1789] New translations graph-node.mdx (Romanian) --- .../pages/ro/indexing/tooling/graph-node.mdx | 72 +++++++++---------- 1 file changed, 36 insertions(+), 36 deletions(-) diff --git a/website/src/pages/ro/indexing/tooling/graph-node.mdx b/website/src/pages/ro/indexing/tooling/graph-node.mdx index 0250f14a3d08..f5778789213d 100644 --- a/website/src/pages/ro/indexing/tooling/graph-node.mdx +++ b/website/src/pages/ro/indexing/tooling/graph-node.mdx @@ -2,31 +2,31 @@ title: Graph Node --- -Graph Node is the component which indexes subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. +Graph Node is the component which indexes Subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. This provides a contextual overview of Graph Node, and some of the more advanced options available to indexers. Detailed documentation and instructions can be found in the [Graph Node repository](https://github.com/graphprotocol/graph-node). ## Graph Node -[Graph Node](https://github.com/graphprotocol/graph-node) is the reference implementation for indexing Subgraphs on The Graph Network, connecting to blockchain clients, indexing subgraphs and making indexed data available to query. +[Graph Node](https://github.com/graphprotocol/graph-node) is the reference implementation for indexing Subgraphs on The Graph Network, connecting to blockchain clients, indexing Subgraphs and making indexed data available to query. Graph Node (and the whole indexer stack) can be run on bare metal, or in a cloud environment. This flexibility of the central indexing component is crucial to the robustness of The Graph Protocol. Similarly, Graph Node can be [built from source](https://github.com/graphprotocol/graph-node), or indexers can use one of the [provided Docker Images](https://hub.docker.com/r/graphprotocol/graph-node). ### PostgreSQL database -The main store for the Graph Node, this is where subgraph data is stored, as well as metadata about subgraphs, and subgraph-agnostic network data such as the block cache, and eth_call cache. +The main store for the Graph Node, this is where Subgraph data is stored, as well as metadata about Subgraphs, and Subgraph-agnostic network data such as the block cache, and eth_call cache. ### Network clients In order to index a network, Graph Node needs access to a network client via an EVM-compatible JSON-RPC API. This RPC may connect to a single client or it could be a more complex setup that load balances across multiple. -While some subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). +While some Subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically Subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and Subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). **Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an Indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). ### IPFS Nodes -Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network indexers do not need to host their own IPFS node. An IPFS node for the network is hosted at https://ipfs.network.thegraph.com. +Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during Subgraph deployment to fetch the Subgraph manifest and all linked files. Network indexers do not need to host their own IPFS node. An IPFS node for the network is hosted at https://ipfs.network.thegraph.com. ### Prometheus metrics server @@ -77,19 +77,19 @@ A complete Kubernetes example configuration can be found in the [indexer reposit When it is running Graph Node exposes the following ports: -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | -| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | -| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | -| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for Subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for Subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | > **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC endpoint. ## Advanced Graph Node configuration -At its simplest, Graph Node can be operated with a single instance of Graph Node, a single PostgreSQL database, an IPFS node, and the network clients as required by the subgraphs to be indexed. +At its simplest, Graph Node can be operated with a single instance of Graph Node, a single PostgreSQL database, an IPFS node, and the network clients as required by the Subgraphs to be indexed. This setup can be scaled horizontally, by adding multiple Graph Nodes, and multiple databases to support those Graph Nodes. Advanced users may want to take advantage of some of the horizontal scaling capabilities of Graph Node, as well as some of the more advanced configuration options, via the `config.toml` file and Graph Node's environment variables. @@ -114,13 +114,13 @@ Full documentation of `config.toml` can be found in the [Graph Node docs](https: #### Multiple Graph Nodes -Graph Node indexing can scale horizontally, running multiple instances of Graph Node to split indexing and querying across different nodes. This can be done simply by running Graph Nodes configured with a different `node_id` on startup (e.g. in the Docker Compose file), which can then be used in the `config.toml` file to specify [dedicated query nodes](#dedicated-query-nodes), [block ingestors](#dedicated-block-ingestion), and splitting subgraphs across nodes with [deployment rules](#deployment-rules). +Graph Node indexing can scale horizontally, running multiple instances of Graph Node to split indexing and querying across different nodes. This can be done simply by running Graph Nodes configured with a different `node_id` on startup (e.g. in the Docker Compose file), which can then be used in the `config.toml` file to specify [dedicated query nodes](#dedicated-query-nodes), [block ingestors](#dedicated-block-ingestion), and splitting Subgraphs across nodes with [deployment rules](#deployment-rules). > Note that multiple Graph Nodes can all be configured to use the same database, which itself can be horizontally scaled via sharding. #### Deployment rules -Given multiple Graph Nodes, it is necessary to manage deployment of new subgraphs so that the same subgraph isn't being indexed by two different nodes, which would lead to collisions. This can be done by using deployment rules, which can also specify which `shard` a subgraph's data should be stored in, if database sharding is being used. Deployment rules can match on the subgraph name and the network that the deployment is indexing in order to make a decision. +Given multiple Graph Nodes, it is necessary to manage deployment of new Subgraphs so that the same Subgraph isn't being indexed by two different nodes, which would lead to collisions. This can be done by using deployment rules, which can also specify which `shard` a Subgraph's data should be stored in, if database sharding is being used. Deployment rules can match on the Subgraph name and the network that the deployment is indexing in order to make a decision. Example deployment rule configuration: @@ -138,7 +138,7 @@ indexers = [ "index_node_kovan_0" ] match = { network = [ "xdai", "poa-core" ] } indexers = [ "index_node_other_0" ] [[deployment.rule]] -# There's no 'match', so any subgraph matches +# There's no 'match', so any Subgraph matches shards = [ "sharda", "shardb" ] indexers = [ "index_node_community_0", @@ -167,11 +167,11 @@ Any node whose --node-id matches the regular expression will be set up to only r For most use cases, a single Postgres database is sufficient to support a graph-node instance. When a graph-node instance outgrows a single Postgres database, it is possible to split the storage of graph-node's data across multiple Postgres databases. All databases together form the store of the graph-node instance. Each individual database is called a shard. -Shards can be used to split subgraph deployments across multiple databases, and can also be used to use replicas to spread query load across databases. This includes configuring the number of available database connections each `graph-node` should keep in its connection pool for each database, which becomes increasingly important as more subgraphs are being indexed. +Shards can be used to split Subgraph deployments across multiple databases, and can also be used to use replicas to spread query load across databases. This includes configuring the number of available database connections each `graph-node` should keep in its connection pool for each database, which becomes increasingly important as more Subgraphs are being indexed. Sharding becomes useful when your existing database can't keep up with the load that Graph Node puts on it, and when it's not possible to increase the database size anymore. -> It is generally better make a single database as big as possible, before starting with shards. One exception is where query traffic is split very unevenly between subgraphs; in those situations it can help dramatically if the high-volume subgraphs are kept in one shard and everything else in another because that setup makes it more likely that the data for the high-volume subgraphs stays in the db-internal cache and doesn't get replaced by data that's not needed as much from low-volume subgraphs. +> It is generally better make a single database as big as possible, before starting with shards. One exception is where query traffic is split very unevenly between Subgraphs; in those situations it can help dramatically if the high-volume Subgraphs are kept in one shard and everything else in another because that setup makes it more likely that the data for the high-volume Subgraphs stays in the db-internal cache and doesn't get replaced by data that's not needed as much from low-volume Subgraphs. In terms of configuring connections, start with max_connections in postgresql.conf set to 400 (or maybe even 200) and look at the store_connection_wait_time_ms and store_connection_checkout_count Prometheus metrics. Noticeable wait times (anything above 5ms) is an indication that there are too few connections available; high wait times there will also be caused by the database being very busy (like high CPU load). However if the database seems otherwise stable, high wait times indicate a need to increase the number of connections. In the configuration, how many connections each graph-node instance can use is an upper limit, and Graph Node will not keep connections open if it doesn't need them. @@ -188,7 +188,7 @@ ingestor = "block_ingestor_node" #### Supporting multiple networks -The Graph Protocol is increasing the number of networks supported for indexing rewards, and there exist many subgraphs indexing unsupported networks which an indexer would like to process. The `config.toml` file allows for expressive and flexible configuration of: +The Graph Protocol is increasing the number of networks supported for indexing rewards, and there exist many Subgraphs indexing unsupported networks which an indexer would like to process. The `config.toml` file allows for expressive and flexible configuration of: - Multiple networks - Multiple providers per network (this can allow splitting of load across providers, and can also allow for configuration of full nodes as well as archive nodes, with Graph Node preferring cheaper providers if a given workload allows). @@ -225,11 +225,11 @@ Users who are operating a scaled indexing setup with advanced configuration may ### Managing Graph Node -Given a running Graph Node (or Graph Nodes!), the challenge is then to manage deployed subgraphs across those nodes. Graph Node surfaces a range of tools to help with managing subgraphs. +Given a running Graph Node (or Graph Nodes!), the challenge is then to manage deployed Subgraphs across those nodes. Graph Node surfaces a range of tools to help with managing Subgraphs. #### Logging -Graph Node's logs can provide useful information for debugging and optimisation of Graph Node and specific subgraphs. Graph Node supports different log levels via the `GRAPH_LOG` environment variable, with the following levels: error, warn, info, debug or trace. +Graph Node's logs can provide useful information for debugging and optimisation of Graph Node and specific Subgraphs. Graph Node supports different log levels via the `GRAPH_LOG` environment variable, with the following levels: error, warn, info, debug or trace. In addition setting `GRAPH_LOG_QUERY_TIMING` to `gql` provides more details about how GraphQL queries are running (though this will generate a large volume of logs). @@ -247,11 +247,11 @@ The graphman command is included in the official containers, and you can docker Full documentation of `graphman` commands is available in the Graph Node repository. See [/docs/graphman.md] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) in the Graph Node `/docs` -### Working with subgraphs +### Working with Subgraphs #### Indexing status API -Available on port 8030/graphql by default, the indexing status API exposes a range of methods for checking indexing status for different subgraphs, checking proofs of indexing, inspecting subgraph features and more. +Available on port 8030/graphql by default, the indexing status API exposes a range of methods for checking indexing status for different Subgraphs, checking proofs of indexing, inspecting Subgraph features and more. The full schema is available [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). @@ -263,7 +263,7 @@ There are three separate parts of the indexing process: - Processing events in order with the appropriate handlers (this can involve calling the chain for state, and fetching data from the store) - Writing the resulting data to the store -These stages are pipelined (i.e. they can be executed in parallel), but they are dependent on one another. Where subgraphs are slow to index, the underlying cause will depend on the specific subgraph. +These stages are pipelined (i.e. they can be executed in parallel), but they are dependent on one another. Where Subgraphs are slow to index, the underlying cause will depend on the specific Subgraph. Common causes of indexing slowness: @@ -276,24 +276,24 @@ Common causes of indexing slowness: - The provider itself falling behind the chain head - Slowness in fetching new receipts at the chain head from the provider -Subgraph indexing metrics can help diagnose the root cause of indexing slowness. In some cases, the problem lies with the subgraph itself, but in others, improved network providers, reduced database contention and other configuration improvements can markedly improve indexing performance. +Subgraph indexing metrics can help diagnose the root cause of indexing slowness. In some cases, the problem lies with the Subgraph itself, but in others, improved network providers, reduced database contention and other configuration improvements can markedly improve indexing performance. -#### Failed subgraphs +#### Failed Subgraphs -During indexing subgraphs might fail, if they encounter data that is unexpected, some component not working as expected, or if there is some bug in the event handlers or configuration. There are two general types of failure: +During indexing Subgraphs might fail, if they encounter data that is unexpected, some component not working as expected, or if there is some bug in the event handlers or configuration. There are two general types of failure: - Deterministic failures: these are failures which will not be resolved with retries - Non-deterministic failures: these might be down to issues with the provider, or some unexpected Graph Node error. When a non-deterministic failure occurs, Graph Node will retry the failing handlers, backing off over time. -In some cases a failure might be resolvable by the indexer (for example if the error is a result of not having the right kind of provider, adding the required provider will allow indexing to continue). However in others, a change in the subgraph code is required. +In some cases a failure might be resolvable by the indexer (for example if the error is a result of not having the right kind of provider, adding the required provider will allow indexing to continue). However in others, a change in the Subgraph code is required. -> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-deterministic failures are not, as the subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. +> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-deterministic failures are not, as the Subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the Subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. #### Block and call cache -Graph Node caches certain data in the store in order to save refetching from the provider. Blocks are cached, as are the results of `eth_calls` (the latter being cached as of a specific block). This caching can dramatically increase indexing speed during "resyncing" of a slightly altered subgraph. +Graph Node caches certain data in the store in order to save refetching from the provider. Blocks are cached, as are the results of `eth_calls` (the latter being cached as of a specific block). This caching can dramatically increase indexing speed during "resyncing" of a slightly altered Subgraph. -However, in some instances, if an Ethereum node has provided incorrect data for some period, that can make its way into the cache, leading to incorrect data or failed subgraphs. In this case indexers can use `graphman` to clear the poisoned cache, and then rewind the affected subgraphs, which will then fetch fresh data from the (hopefully) healthy provider. +However, in some instances, if an Ethereum node has provided incorrect data for some period, that can make its way into the cache, leading to incorrect data or failed Subgraphs. In this case indexers can use `graphman` to clear the poisoned cache, and then rewind the affected Subgraphs, which will then fetch fresh data from the (hopefully) healthy provider. If a block cache inconsistency is suspected, such as a tx receipt missing event: @@ -304,7 +304,7 @@ If a block cache inconsistency is suspected, such as a tx receipt missing event: #### Querying issues and errors -Once a subgraph has been indexed, indexers can expect to serve queries via the subgraph's dedicated query endpoint. If the indexer is hoping to serve significant query volume, a dedicated query node is recommended, and in case of very high query volumes, indexers may want to configure replica shards so that queries don't impact the indexing process. +Once a Subgraph has been indexed, indexers can expect to serve queries via the Subgraph's dedicated query endpoint. If the indexer is hoping to serve significant query volume, a dedicated query node is recommended, and in case of very high query volumes, indexers may want to configure replica shards so that queries don't impact the indexing process. However, even with a dedicated query node and replicas, certain queries can take a long time to execute, and in some cases increase memory usage and negatively impact the query time for other users. @@ -316,7 +316,7 @@ Graph Node caches GraphQL queries by default, which can significantly reduce dat ##### Analysing queries -Problematic queries most often surface in one of two ways. In some cases, users themselves report that a given query is slow. In that case the challenge is to diagnose the reason for the slowness - whether it is a general issue, or specific to that subgraph or query. And then of course to resolve it, if possible. +Problematic queries most often surface in one of two ways. In some cases, users themselves report that a given query is slow. In that case the challenge is to diagnose the reason for the slowness - whether it is a general issue, or specific to that Subgraph or query. And then of course to resolve it, if possible. In other cases, the trigger might be high memory usage on a query node, in which case the challenge is first to identify the query causing the issue. @@ -336,10 +336,10 @@ In general, tables where the number of distinct entities are less than 1% of the Once a table has been determined to be account-like, running `graphman stats account-like .` will turn on the account-like optimization for queries against that table. The optimization can be turned off again with `graphman stats account-like --clear .
` It takes up to 5 minutes for query nodes to notice that the optimization has been turned on or off. After turning the optimization on, it is necessary to verify that the change does not in fact make queries slower for that table. If you have configured Grafana to monitor Postgres, slow queries would show up in `pg_stat_activity`in large numbers, taking several seconds. In that case, the optimization needs to be turned off again. -For Uniswap-like subgraphs, the `pair` and `token` tables are prime candidates for this optimization, and can have a dramatic effect on database load. +For Uniswap-like Subgraphs, the `pair` and `token` tables are prime candidates for this optimization, and can have a dramatic effect on database load. -#### Removing subgraphs +#### Removing Subgraphs > This is new functionality, which will be available in Graph Node 0.29.x -At some point an indexer might want to remove a given subgraph. This can be easily done via `graphman drop`, which deletes a deployment and all it's indexed data. The deployment can be specified as either a subgraph name, an IPFS hash `Qm..`, or the database namespace `sgdNNN`. Further documentation is available [here](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). +At some point an indexer might want to remove a given Subgraph. This can be easily done via `graphman drop`, which deletes a deployment and all it's indexed data. The deployment can be specified as either a Subgraph name, an IPFS hash `Qm..`, or the database namespace `sgdNNN`. Further documentation is available [here](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). From 9df669507150bf47759fd102563ba9eca11e8ede Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:09:42 -0500 Subject: [PATCH 0097/1789] New translations graph-node.mdx (French) --- .../pages/fr/indexing/tooling/graph-node.mdx | 72 +++++++++---------- 1 file changed, 36 insertions(+), 36 deletions(-) diff --git a/website/src/pages/fr/indexing/tooling/graph-node.mdx b/website/src/pages/fr/indexing/tooling/graph-node.mdx index 6476aad5aa73..362df7bd33cf 100644 --- a/website/src/pages/fr/indexing/tooling/graph-node.mdx +++ b/website/src/pages/fr/indexing/tooling/graph-node.mdx @@ -2,31 +2,31 @@ title: Nœud de The Graph --- -Graph Node est le composant qui indexe les subgraphs et rend les données résultantes disponibles pour interrogation via une API GraphQL. En tant que tel, il est au cœur de la pile de l’indexeur, et le bon fonctionnement de Graph Node est crucial pour exécuter un indexeur réussi. +Graph Node is the component which indexes Subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. Ceci fournit un aperçu contextuel de Graph Node et de certaines des options les plus avancées disponibles pour les Indexeurs. Une documentation et des instructions détaillées peuvent être trouvées dans le dépôt [Graph Node ](https://github.com/graphprotocol/graph-node). ## Nœud de The Graph -[Graph Node](https://github.com/graphprotocol/graph-node) est l'implémentation de référence pour l'indexation des subgraphs sur The Graph Network, la connexion aux clients de la blockchain, l'indexation des subgraphs et la mise à disposition des données indexées pour les requêtes. +[Graph Node](https://github.com/graphprotocol/graph-node) is the reference implementation for indexing Subgraphs on The Graph Network, connecting to blockchain clients, indexing Subgraphs and making indexed data available to query. Graph Node (et l'ensemble de la pile de l’indexeur) peut être exécuté sur serveur dédié (bare metal) ou dans un environnement cloud. Cette souplesse du composant central d'indexation est essentielle à la solidité du protocole The Graph. De même, Graph Node peut être [compilé à partir du code source](https://github.com/graphprotocol/graph-node), ou les Indexeurs peuvent utiliser l'une des [images Docker fournies](https://hub.docker.com/r/graphprotocol/graph-node). ### PostgreSQL database -Le magasin principal du nœud de graph, c'est là que les données des sous-graphes sont stockées, ainsi que les métadonnées sur les subgraphs et les données réseau indépendantes des subgraphs telles que le cache de blocs et le cache eth_call. +The main store for the Graph Node, this is where Subgraph data is stored, as well as metadata about Subgraphs, and Subgraph-agnostic network data such as the block cache, and eth_call cache. ### Clients réseau Pour indexer un réseau, Graph Node doit avoir accès à un client réseau via une API JSON-RPC compatible avec EVM. Cette RPC peut se connecter à un seul client ou à une configuration plus complexe qui équilibre la charge entre plusieurs clients. -Alors que certains subgraphs peuvent ne nécessiter qu'un nœud complet, d'autres peuvent avoir des caractéristiques d'indexation qui nécessitent des fonctionnalités RPC supplémentaires. En particulier, les subgraphs qui font des `eth_calls` dans le cadre de l'indexation nécessiteront un noeud d'archive qui supporte [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), et les subgraphs avec des `callHandlers`, ou des `blockHandlers` avec un filtre `call`, nécessitent le support de `trace_filter` ([voir la documentation du module trace ici](https://openethereum.github.io/JSONRPC-trace-module)). +While some Subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically Subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and Subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). \*\*Network Firehoses : un Firehose est un service gRPC fournissant un flux de blocs ordonné, mais compatible avec les fork, développé par les principaux développeurs de The Graph pour mieux prendre en charge une indexation performante à l'échelle. Il ne s'agit pas actuellement d'une exigence de l'Indexeur, mais les Indexeurs sont encouragés à se familiariser avec la technologie, en avance sur la prise en charge complète du réseau. Pour en savoir plus sur le Firehose [ici](https://firehose.streamingfast.io/). ### Nœuds IPFS -Les métadonnées de déploiement de subgraphs sont stockées sur le réseau IPFS. The Graph Node accède principalement au noed IPFS pendant le déploiement du subgraph pour récupérer le manifeste du subgraph et tous les fichiers liés. Les indexeurs de réseau n'ont pas besoin d'héberger leur propre noed IPFS. Un noed IPFS pour le réseau est hébergé sur https://ipfs.network.thegraph.com. +Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during Subgraph deployment to fetch the Subgraph manifest and all linked files. Network indexers do not need to host their own IPFS node. An IPFS node for the network is hosted at https://ipfs.network.thegraph.com. ### Serveur de métriques Prometheus @@ -77,19 +77,19 @@ Un exemple complet de configuration Kubernetes se trouve dans le [dépôt d'Inde Lorsqu'il est en cours d'exécution, Graph Node expose les ports suivants : -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | -| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | -| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | -| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for Subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for Subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | > **Important** : Soyez prudent lorsque vous exposez des ports publiquement - les **ports d'administration** doivent être verrouillés. Ceci inclut l'endpoint JSON-RPC de Graph Node. ## Configuration avancée du nœud graph -Dans sa forme la plus simple, Graph Node peut être utilisé avec une seule instance de Graph Node, une seule base de données PostgreSQL, un nœud IPFS et les clients réseau selon les besoins des subgraphs à indexer. +At its simplest, Graph Node can be operated with a single instance of Graph Node, a single PostgreSQL database, an IPFS node, and the network clients as required by the Subgraphs to be indexed. Cette configuration peut être mise à l'échelle horizontalement, en ajoutant plusieurs Graph Nodes, et plusieurs bases de données pour supporter ces Graph Nodes. Les utilisateurs avancés voudront peut-être profiter de certaines des capacités de mise à l'échelle horizontale de Graph Node, ainsi que de certaines des options de configuration les plus avancées, via le fichier `config.toml` et les variables d'environnement de Graph Node. @@ -114,13 +114,13 @@ La documentation complète de `config.toml` peut être trouvée dans la [documen #### Multiple Graph Nodes -L'indexation Graph Node peut être mise à l'échelle horizontalement, en exécutant plusieurs instances de Graph Node pour répartir l'indexation et l'interrogation sur différents nœuds. Cela peut être fait simplement en exécutant des Graph Nodes configurés avec un `node_id` différent au démarrage (par exemple dans le fichier Docker Compose), qui peut ensuite être utilisé dans le fichier `config.toml` pour spécifier les [nœuds de requête dédiés](#dedicated-query-nodes), les [ingesteurs de blocs](#dedicated-block-ingestion) et en répartissant les subgraphs sur les nœuds avec des [règles de déploiement](#deployment-rules). +Graph Node indexing can scale horizontally, running multiple instances of Graph Node to split indexing and querying across different nodes. This can be done simply by running Graph Nodes configured with a different `node_id` on startup (e.g. in the Docker Compose file), which can then be used in the `config.toml` file to specify [dedicated query nodes](#dedicated-query-nodes), [block ingestors](#dedicated-block-ingestion), and splitting Subgraphs across nodes with [deployment rules](#deployment-rules). > Notez que plusieurs nœuds de graph peuvent tous être configurés pour utiliser la même base de données, qui elle-même peut être mise à l'échelle horizontalement via le partitionnement. #### Règles de déploiement -Étant donné plusieurs Graph Node, il est nécessaire de gérer le déploiement de nouveaux subgraphs afin que le même subgraph ne soit pas indexé par deux nœuds différents, ce qui entraînerait des collisions. Cela peut être fait en utilisant des règles de déploiement, qui peuvent également spécifier dans quel `shard` les données d'un subgraph doivent être stockées, si le partitionnement de base de données est utilisé. Les règles de déploiement peuvent correspondre au nom du subgraph et au réseau que le déploiement indexe afin de prendre une décision. +Given multiple Graph Nodes, it is necessary to manage deployment of new Subgraphs so that the same Subgraph isn't being indexed by two different nodes, which would lead to collisions. This can be done by using deployment rules, which can also specify which `shard` a Subgraph's data should be stored in, if database sharding is being used. Deployment rules can match on the Subgraph name and the network that the deployment is indexing in order to make a decision. Exemple de configuration de règle de déploiement : @@ -138,7 +138,7 @@ indexers = [ "index_node_kovan_0" ] match = { network = [ "xdai", "poa-core" ] } indexers = [ "index_node_other_0" ] [[deployment.rule]] -# There's no 'match', so any subgraph matches +# There's no 'match', so any Subgraph matches shards = [ "sharda", "shardb" ] indexers = [ "index_node_community_0", @@ -167,11 +167,11 @@ Tout nœud dont --node-id correspond à l'expression régulière sera configuré Pour la plupart des cas d'utilisation, une seule base de données Postgres suffit pour prendre en charge une instance de nœud graph. Lorsqu'une instance de nœud graph dépasse une seule base de données Postgres, il est possible de diviser le stockage des données de nœud graph sur plusieurs bases de données Postgres. Toutes les bases de données forment ensemble le magasin de l’instance de nœud graph. Chaque base de données individuelle est appelée une partition. -Les fragments peuvent être utilisés pour diviser les déploiements de subgraph sur plusieurs bases de données et peuvent également être utilisés pour faire intervenir des réplicas afin de répartir la charge de requête sur plusieurs bases de données. Cela inclut la configuration du nombre de connexions de base de données disponibles que chaque `graph-node` doit conserver dans son pool de connexions pour chaque base de données, ce qui devient de plus en plus important à mesure que davantage de subgraph sont indexés. +Shards can be used to split Subgraph deployments across multiple databases, and can also be used to use replicas to spread query load across databases. This includes configuring the number of available database connections each `graph-node` should keep in its connection pool for each database, which becomes increasingly important as more Subgraphs are being indexed. Le partage devient utile lorsque votre base de données existante ne peut pas suivre la charge que Graph Node lui impose et lorsqu'il n'est plus possible d'augmenter la taille de la base de données. -> Il est généralement préférable de créer une base de données unique aussi grande que possible avant de commencer avec des fragments. Une exception est lorsque le trafic des requêtes est réparti de manière très inégale entre les subgraphs ; dans ces situations, cela peut être considérablement utile si les subgraphs à volume élevé sont conservés dans une partition et tout le reste dans une autre, car cette configuration rend plus probable que les données des subgraphs à volume élevé restent dans le cache interne de la base de données et ne le font pas. sont remplacés par des données qui ne sont pas autant nécessaires à partir de subgraphs à faible volume. +> It is generally better make a single database as big as possible, before starting with shards. One exception is where query traffic is split very unevenly between Subgraphs; in those situations it can help dramatically if the high-volume Subgraphs are kept in one shard and everything else in another because that setup makes it more likely that the data for the high-volume Subgraphs stays in the db-internal cache and doesn't get replaced by data that's not needed as much from low-volume Subgraphs. En termes de configuration des connexions, commencez par max_connections dans postgresql.conf défini sur 400 (ou peut-être même 200) et regardez les métriques store_connection_wait_time_ms et store_connection_checkout_count Prometheus. Des temps d'attente notables (tout ce qui dépasse 5 ms) indiquent qu'il y a trop peu de connexions disponibles ; des temps d'attente élevés seront également dus au fait que la base de données est très occupée (comme une charge CPU élevée). Cependant, si la base de données semble par ailleurs stable, des temps d'attente élevés indiquent la nécessité d'augmenter le nombre de connexions. Dans la configuration, le nombre de connexions que chaque instance de nœud graph peut utiliser constitue une limite supérieure, et Graph Node ne maintiendra pas les connexions ouvertes s'il n'en a pas besoin. @@ -188,7 +188,7 @@ ingestor = "block_ingestor_node" #### Prise en charge de plusieurs réseaux -The Graph Protocol augmente le nombre de réseaux pris en charge pour l'indexation des récompenses, et il existe de nombreux subgraphs indexant des réseaux non pris en charge. Un indexeur peut choisir de les indexer malgré tout. Le fichier `config.toml` permet une configuration riche et flexible : +The Graph Protocol is increasing the number of networks supported for indexing rewards, and there exist many Subgraphs indexing unsupported networks which an indexer would like to process. The `config.toml` file allows for expressive and flexible configuration of: - Plusieurs réseaux - Plusieurs fournisseurs par réseau (cela peut permettre de répartir la charge entre les fournisseurs, et peut également permettre la configuration de nœuds complets ainsi que de nœuds d'archives, Graph Node préférant les fournisseurs moins chers si une charge de travail donnée le permet). @@ -225,11 +225,11 @@ Les utilisateurs qui utilisent une configuration d'indexation à grande échelle ### Gestion du nœud de graph -Étant donné un nœud de graph en cours d'exécution (ou des nœuds de graph !), le défi consiste alors à gérer les subgraphs déployés sur ces nœuds. Graph Node propose une gamme d'outils pour vous aider à gérer les subgraphs. +Given a running Graph Node (or Graph Nodes!), the challenge is then to manage deployed Subgraphs across those nodes. Graph Node surfaces a range of tools to help with managing Subgraphs. #### Journal de bord -Les logs de Graph Node peuvent fournir des informations utiles pour le débogage et l'optimisation de Graph Node et de subgraphs spécifiques. Graph Node supporte différents niveaux de logs via la variable d'environnement `GRAPH_LOG`, avec les niveaux suivants : error, warn, info, debug ou trace. +Graph Node's logs can provide useful information for debugging and optimisation of Graph Node and specific Subgraphs. Graph Node supports different log levels via the `GRAPH_LOG` environment variable, with the following levels: error, warn, info, debug or trace. De plus, fixer `GRAPH_LOG_QUERY_TIMING` à `gql` fournit plus de détails sur la façon dont les requêtes GraphQL s'exécutent (bien que cela génère un grand volume de logs). @@ -247,11 +247,11 @@ La commande graphman est incluse dans les conteneurs officiels, et vous pouvez d La documentation complète des commandes `graphman` est disponible dans le dépôt Graph Node. Voir [/docs/graphman.md](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) dans le dépôt Graph Node `/docs` -### Travailler avec des subgraphs +### Working with Subgraphs #### API d'état d'indexation -Disponible sur le port 8030/graphql par défaut, l'API d'état d'indexation expose une gamme de méthodes pour vérifier l'état d'indexation de différents subgraphs, vérifier les preuves d'indexation, inspecter les fonctionnalités des subgraphs et bien plus encore. +Available on port 8030/graphql by default, the indexing status API exposes a range of methods for checking indexing status for different Subgraphs, checking proofs of indexing, inspecting Subgraph features and more. Le schéma complet est disponible [ici](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). @@ -263,7 +263,7 @@ Le processus d'indexation comporte trois parties distinctes : - Traiter les événements dans l'ordre avec les gestionnaires appropriés (cela peut impliquer d'appeler la chaîne pour connaître l'état et de récupérer les données du magasin) - Écriture des données résultantes dans le magasin -Ces étapes sont pipeline (c’est-à-dire qu’elles peuvent être exécutées en parallèle), mais elles dépendent les unes des autres. Lorsque les subgraphs sont lents à indexer, la cause sous-jacente dépendra du subgraph spécifique. +These stages are pipelined (i.e. they can be executed in parallel), but they are dependent on one another. Where Subgraphs are slow to index, the underlying cause will depend on the specific Subgraph. Causes courantes de lenteur d’indexation : @@ -276,24 +276,24 @@ Causes courantes de lenteur d’indexation : - Le prestataire lui-même prend du retard sur la tête de la chaîne - Lenteur dans la récupération des nouvelles recettes en tête de chaîne auprès du prestataire -Les métriques d’indexation de subgraphs peuvent aider à diagnostiquer la cause première de la lenteur de l’indexation. Dans certains cas, le problème réside dans le subgraph lui-même, mais dans d'autres, des fournisseurs de réseau améliorés, une réduction des conflits de base de données et d'autres améliorations de configuration peuvent améliorer considérablement les performances d'indexation. +Subgraph indexing metrics can help diagnose the root cause of indexing slowness. In some cases, the problem lies with the Subgraph itself, but in others, improved network providers, reduced database contention and other configuration improvements can markedly improve indexing performance. -#### Subgraphs ayant échoué +#### Failed Subgraphs -Lors de l'indexation, les subgraphs peuvent échouer s'ils rencontrent des données inattendues, si certains composants ne fonctionnent pas comme prévu ou s'il y a un bogue dans les gestionnaires d'événements ou la configuration. Il existe deux types généraux de pannes : +During indexing Subgraphs might fail, if they encounter data that is unexpected, some component not working as expected, or if there is some bug in the event handlers or configuration. There are two general types of failure: - Échecs déterministes : ce sont des échecs qui ne seront pas résolus par de nouvelles tentatives - Échecs non déterministes : ils peuvent être dus à des problèmes avec le fournisseur ou à une erreur inattendue de Graph Node. Lorsqu'un échec non déterministe se produit, Graph Node réessaiera les gestionnaires défaillants, en reculant au fil du temps. -Dans certains cas, un échec peut être résolu par l'indexeur (par exemple, si l'erreur est due au fait de ne pas disposer du bon type de fournisseur, l'ajout du fournisseur requis permettra de poursuivre l'indexation). Cependant, dans d'autres cas, une modification du code du subgraph est requise. +In some cases a failure might be resolvable by the indexer (for example if the error is a result of not having the right kind of provider, adding the required provider will allow indexing to continue). However in others, a change in the Subgraph code is required. -> Les défaillances déterministes sont considérés comme "final" (définitifs), avec une preuve d'indexation générée pour le bloc défaillant, alors que les défaillances non déterministes ne le sont pas, car le subgraph pourait "se rétablir " et poursuivre l'indexation. Dans certains cas, l'étiquette non déterministe est incorrecte et le subgraph ne surmontera jamais l'erreur ; de tels défaillances doivent être signalés en tant que problèmes sur le dépôt de Graph Node. +> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-deterministic failures are not, as the Subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the Subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. #### Bloquer et appeler le cache -Graph Node met en cache certaines données dans le store afin d'éviter de les récupérer auprès du fournisseur. Les blocs sont mis en cache, ainsi que les résultats des `eth_calls` (ces derniers étant mis en cache à partir d'un bloc spécifique). Cette mise en cache peut augmenter considérablement la vitesse d'indexation lors de la « resynchronisation » d'un subgraph légèrement modifié. +Graph Node caches certain data in the store in order to save refetching from the provider. Blocks are cached, as are the results of `eth_calls` (the latter being cached as of a specific block). This caching can dramatically increase indexing speed during "resyncing" of a slightly altered Subgraph. -Cependant, dans certains cas, si un nœud Ethereum a fourni des données incorrectes pendant une certaine période, cela peut se retrouver dans le cache, conduisant à des données incorrectes ou à des subgraphs défaillants. Dans ce cas, les Indexeurs peuvent utiliser `graphman` pour effacer le cache empoisonné, puis rembobiner les subgraph affectés, ce qui permettra de récupérer des données fraîches auprès du fournisseur (que l'on espère sain). +However, in some instances, if an Ethereum node has provided incorrect data for some period, that can make its way into the cache, leading to incorrect data or failed Subgraphs. In this case indexers can use `graphman` to clear the poisoned cache, and then rewind the affected Subgraphs, which will then fetch fresh data from the (hopefully) healthy provider. Si une incohérence du cache de blocs est suspectée, telle qu'un événement de réception de transmission manquant : @@ -304,7 +304,7 @@ Si une incohérence du cache de blocs est suspectée, telle qu'un événement de #### Interroger les problèmes et les erreurs -Une fois qu'un subgraph a été indexé, les indexeurs peuvent s'attendre à traiter les requêtes via le point de terminaison de requête dédié du subgraph. Si l'indexeur espère traiter un volume de requêtes important, un nœud de requête dédié est recommandé, et en cas de volumes de requêtes très élevés, les indexeurs peuvent souhaiter configurer des fragments de réplique afin que les requêtes n'aient pas d'impact sur le processus d'indexation. +Once a Subgraph has been indexed, indexers can expect to serve queries via the Subgraph's dedicated query endpoint. If the indexer is hoping to serve significant query volume, a dedicated query node is recommended, and in case of very high query volumes, indexers may want to configure replica shards so that queries don't impact the indexing process. Cependant, même avec un nœud de requête et des répliques dédiés, certaines requêtes peuvent prendre beaucoup de temps à exécuter et, dans certains cas, augmenter l'utilisation de la mémoire et avoir un impact négatif sur le temps de requête des autres utilisateurs. @@ -316,7 +316,7 @@ Graph Node met en cache les requêtes GraphQL par défaut, ce qui peut réduire ##### Analyser les requêtes -Les requêtes problématiques apparaissent le plus souvent de deux manières. Dans certains cas, les utilisateurs eux-mêmes signalent qu'une requête donnée est lente. Dans ce cas, le défi consiste à diagnostiquer la raison de la lenteur, qu'il s'agisse d'un problème général ou spécifique à ce subgraph ou à cette requête. Et puis bien sûr de le résoudre, si possible. +Problematic queries most often surface in one of two ways. In some cases, users themselves report that a given query is slow. In that case the challenge is to diagnose the reason for the slowness - whether it is a general issue, or specific to that Subgraph or query. And then of course to resolve it, if possible. Dans d'autres cas, le déclencheur peut être une utilisation élevée de la mémoire sur un nœud de requête, auquel cas le défi consiste d'abord à identifier la requête à l'origine du problème. @@ -336,10 +336,10 @@ En général, les tables où le nombre d'entités distinctes est inférieur à 1 Une fois qu'une table a été déterminée comme étant de type compte, l'exécution de `graphman stats account-like .
` activera l'optimisation de type compte pour les requêtes sur cette table. L'optimisation peut être désactivée à nouveau avec `graphman stats account-like --clear .
` Il faut compter jusqu'à 5 minutes pour que les noeuds de requêtes remarquent que l'optimisation a été activée ou désactivée. Après avoir activé l'optimisation, il est nécessaire de vérifier que le changement ne ralentit pas les requêtes pour cette table. Si vous avez configuré Grafana pour surveiller Postgres, les requêtes lentes apparaîtront dans `pg_stat_activity` en grand nombre, prenant plusieurs secondes. Dans ce cas, l'optimisation doit être désactivée à nouveau. -Pour les subgraphs de type Uniswap, les tables `pair` et `token` sont les meilleurs candidats pour cette optimisation, et peuvent avoir un effet considérable sur la charge de la base de données. +For Uniswap-like Subgraphs, the `pair` and `token` tables are prime candidates for this optimization, and can have a dramatic effect on database load. -#### Supprimer des subgraphs +#### Removing Subgraphs > Il s'agit d'une nouvelle fonctionnalité qui sera disponible dans Graph Node 0.29.x -A un moment donné, un Indexeur peut vouloir supprimer un subgraph donné. Cela peut être facilement fait via `graphman drop`, qui supprime un déploiement et toutes ses données indexées. Le déploiement peut être spécifié soit comme un nom de subgraph, soit comme un hash IPFS `Qm..`, ou alors comme le namespace `sgdNN` de la base de données . Une documentation plus détaillée est disponible [ici](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). +At some point an indexer might want to remove a given Subgraph. This can be easily done via `graphman drop`, which deletes a deployment and all it's indexed data. The deployment can be specified as either a Subgraph name, an IPFS hash `Qm..`, or the database namespace `sgdNNN`. Further documentation is available [here](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). From 4eeb711c8e692580c8ef4d9cd14b30d8d643ccf6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:09:43 -0500 Subject: [PATCH 0098/1789] New translations graph-node.mdx (Spanish) --- .../pages/es/indexing/tooling/graph-node.mdx | 72 +++++++++---------- 1 file changed, 36 insertions(+), 36 deletions(-) diff --git a/website/src/pages/es/indexing/tooling/graph-node.mdx b/website/src/pages/es/indexing/tooling/graph-node.mdx index 7fadb2a27660..c2522201c5f5 100644 --- a/website/src/pages/es/indexing/tooling/graph-node.mdx +++ b/website/src/pages/es/indexing/tooling/graph-node.mdx @@ -2,31 +2,31 @@ title: Graph Node --- -Graph Node es el componente que indexa los subgrafos, y hace que los datos resultantes estén disponibles para su consulta a través de una API GraphQL. Como tal, es fundamental para el stack del Indexador, y el correcto funcionamiento de Graph Node es crucial para ejecutar un Indexador con éxito. +Graph Node is the component which indexes Subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. This provides a contextual overview of Graph Node, and some of the more advanced options available to indexers. Detailed documentation and instructions can be found in the [Graph Node repository](https://github.com/graphprotocol/graph-node). ## Graph Node -[Graph Node](https://github.com/graphprotocol/graph-node) is the reference implementation for indexing Subgraphs on The Graph Network, connecting to blockchain clients, indexing subgraphs and making indexed data available to query. +[Graph Node](https://github.com/graphprotocol/graph-node) is the reference implementation for indexing Subgraphs on The Graph Network, connecting to blockchain clients, indexing Subgraphs and making indexed data available to query. Graph Node (and the whole indexer stack) can be run on bare metal, or in a cloud environment. This flexibility of the central indexing component is crucial to the robustness of The Graph Protocol. Similarly, Graph Node can be [built from source](https://github.com/graphprotocol/graph-node), or indexers can use one of the [provided Docker Images](https://hub.docker.com/r/graphprotocol/graph-node). ### Base de datos PostgreSQL -El almacén principal para Graph Node, aquí es donde se almacenan los datos de los subgrafos, así como los metadatos de los subgrafos, y los datos de una red subgrafo-agnóstica como el caché de bloques, y el caché eth_call. +The main store for the Graph Node, this is where Subgraph data is stored, as well as metadata about Subgraphs, and Subgraph-agnostic network data such as the block cache, and eth_call cache. ### Clientes de red Para indexar una red, Graph Node necesita acceso a un cliente de red a través de una API JSON-RPC compatible con EVM. Esta RPC puede conectarse a un solo cliente o puede ser una configuración más compleja que equilibre la carga entre varios clientes. -While some subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). +While some Subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically Subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and Subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). **Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an Indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). ### Nodos IPFS -Los metadatos de deploy del subgrafo se almacenan en la red IPFS. El Graph Node accede principalmente al nodo IPFS durante el deploy del subgrafo para obtener el manifiesto del subgrafo y todos los archivos vinculados. Los Indexadores de red no necesitan alojar su propio nodo IPFS. En https://ipfs.network.thegraph.com se aloja un nodo IPFS para la red. +Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during Subgraph deployment to fetch the Subgraph manifest and all linked files. Network indexers do not need to host their own IPFS node. An IPFS node for the network is hosted at https://ipfs.network.thegraph.com. ### Servidor de métricas Prometheus @@ -77,19 +77,19 @@ A complete Kubernetes example configuration can be found in the [indexer reposit Cuando está funcionando, Graph Node muestra los siguientes puertos: -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | -| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | -| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | -| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for Subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for Subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | > **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC endpoint. ## Configuración avanzada de Graph Node -En su forma más simple, Graph Node puede funcionar con una única instancia de Graph Node, una única base de datos PostgreSQL, un nodo IPFS y los clientes de red que requieran los subgrafos a indexar. +At its simplest, Graph Node can be operated with a single instance of Graph Node, a single PostgreSQL database, an IPFS node, and the network clients as required by the Subgraphs to be indexed. This setup can be scaled horizontally, by adding multiple Graph Nodes, and multiple databases to support those Graph Nodes. Advanced users may want to take advantage of some of the horizontal scaling capabilities of Graph Node, as well as some of the more advanced configuration options, via the `config.toml` file and Graph Node's environment variables. @@ -114,13 +114,13 @@ Full documentation of `config.toml` can be found in the [Graph Node docs](https: #### Graph Nodes múltiples -Graph Node indexing can scale horizontally, running multiple instances of Graph Node to split indexing and querying across different nodes. This can be done simply by running Graph Nodes configured with a different `node_id` on startup (e.g. in the Docker Compose file), which can then be used in the `config.toml` file to specify [dedicated query nodes](#dedicated-query-nodes), [block ingestors](#dedicated-block-ingestion), and splitting subgraphs across nodes with [deployment rules](#deployment-rules). +Graph Node indexing can scale horizontally, running multiple instances of Graph Node to split indexing and querying across different nodes. This can be done simply by running Graph Nodes configured with a different `node_id` on startup (e.g. in the Docker Compose file), which can then be used in the `config.toml` file to specify [dedicated query nodes](#dedicated-query-nodes), [block ingestors](#dedicated-block-ingestion), and splitting Subgraphs across nodes with [deployment rules](#deployment-rules). > Ten en cuenta que varios Graph Nodes pueden configurarse para utilizar la misma base de datos, que a su vez puede escalarse horizontalmente mediante sharding. #### Reglas de deploy -Given multiple Graph Nodes, it is necessary to manage deployment of new subgraphs so that the same subgraph isn't being indexed by two different nodes, which would lead to collisions. This can be done by using deployment rules, which can also specify which `shard` a subgraph's data should be stored in, if database sharding is being used. Deployment rules can match on the subgraph name and the network that the deployment is indexing in order to make a decision. +Given multiple Graph Nodes, it is necessary to manage deployment of new Subgraphs so that the same Subgraph isn't being indexed by two different nodes, which would lead to collisions. This can be done by using deployment rules, which can also specify which `shard` a Subgraph's data should be stored in, if database sharding is being used. Deployment rules can match on the Subgraph name and the network that the deployment is indexing in order to make a decision. Ejemplo de configuración de reglas de deploy: @@ -138,7 +138,7 @@ indexers = [ "index_node_kovan_0" ] match = { network = [ "xdai", "poa-core" ] } indexers = [ "index_node_other_0" ] [[deployment.rule]] -# There's no 'match', so any subgraph matches +# There's no 'match', so any Subgraph matches shards = [ "sharda", "shardb" ] indexers = [ "index_node_community_0", @@ -167,11 +167,11 @@ Cualquier nodo cuyo --node-id coincida con la expresión regular se configurará Para la mayoría de los casos de uso, una única base de datos Postgres es suficiente para soportar una instancia de graph-node. Cuando una instancia de graph-node supera una única base de datos Postgres, es posible dividir el almacenamiento de los datos de graph-node en varias bases de datos Postgres. Todas las bases de datos juntas forman el almacén de la instancia graph-node. Cada base de datos individual se denomina shard. -Shards can be used to split subgraph deployments across multiple databases, and can also be used to use replicas to spread query load across databases. This includes configuring the number of available database connections each `graph-node` should keep in its connection pool for each database, which becomes increasingly important as more subgraphs are being indexed. +Shards can be used to split Subgraph deployments across multiple databases, and can also be used to use replicas to spread query load across databases. This includes configuring the number of available database connections each `graph-node` should keep in its connection pool for each database, which becomes increasingly important as more Subgraphs are being indexed. El Sharding resulta útil cuando la base de datos existente no puede soportar la carga que le impone Graph Node y cuando ya no es posible aumentar el tamaño de la base de datos. -> En general, es mejor hacer una única base de datos lo más grande posible, antes de empezar con los shards. Una excepción es cuando el tráfico de consultas se divide de forma muy desigual entre los subgrafos; en esas situaciones puede ayudar dramáticamente si los subgrafos de alto volumen se mantienen en un shard y todo lo demás en otro, porque esa configuración hace que sea más probable que los datos de los subgrafos de alto volumen permanezcan en la caché interna de la base de datos y no sean reemplazados por datos que no se necesitan tanto de los subgrafos de bajo volumen. +> It is generally better make a single database as big as possible, before starting with shards. One exception is where query traffic is split very unevenly between Subgraphs; in those situations it can help dramatically if the high-volume Subgraphs are kept in one shard and everything else in another because that setup makes it more likely that the data for the high-volume Subgraphs stays in the db-internal cache and doesn't get replaced by data that's not needed as much from low-volume Subgraphs. En términos de configuración de las conexiones, comienza con max_connections en postgresql.conf establecido en 400 (o tal vez incluso 200) y mira las métricas de Prometheus store_connection_wait_time_ms y store_connection_checkout_count. Tiempos de espera notables (cualquier cosa por encima de 5ms) es una indicación de que hay muy pocas conexiones disponibles; altos tiempos de espera allí también serán causados por la base de datos que está muy ocupada (como alta carga de CPU). Sin embargo, si la base de datos parece estable, los tiempos de espera elevados indican la necesidad de aumentar el número de conexiones. En la configuración, el número de conexiones que puede utilizar cada instancia de Graph Node es un límite superior, y Graph Node no mantendrá conexiones abiertas si no las necesita. @@ -188,7 +188,7 @@ ingestor = "block_ingestor_node" #### Soporte de múltiples redes -The Graph Protocol is increasing the number of networks supported for indexing rewards, and there exist many subgraphs indexing unsupported networks which an indexer would like to process. The `config.toml` file allows for expressive and flexible configuration of: +The Graph Protocol is increasing the number of networks supported for indexing rewards, and there exist many Subgraphs indexing unsupported networks which an indexer would like to process. The `config.toml` file allows for expressive and flexible configuration of: - Redes múltiples - Múltiples proveedores por red (esto puede permitir dividir la carga entre los proveedores, y también puede permitir la configuración de nodos completos, así como nodos de archivo, con Graph Node prefiriendo proveedores más baratos si una carga de trabajo dada lo permite). @@ -225,11 +225,11 @@ Los usuarios que están operando una configuración de indexación escalada con ### Operar Graph Node -Dado un Graph Node en funcionamiento (¡o Graph Nodes!), el reto consiste en gestionar los subgrafos deployados en esos nodos. Graph Node ofrece una serie de herramientas para ayudar a gestionar los subgrafos. +Given a running Graph Node (or Graph Nodes!), the challenge is then to manage deployed Subgraphs across those nodes. Graph Node surfaces a range of tools to help with managing Subgraphs. #### Logging -Graph Node's logs can provide useful information for debugging and optimisation of Graph Node and specific subgraphs. Graph Node supports different log levels via the `GRAPH_LOG` environment variable, with the following levels: error, warn, info, debug or trace. +Graph Node's logs can provide useful information for debugging and optimisation of Graph Node and specific Subgraphs. Graph Node supports different log levels via the `GRAPH_LOG` environment variable, with the following levels: error, warn, info, debug or trace. In addition setting `GRAPH_LOG_QUERY_TIMING` to `gql` provides more details about how GraphQL queries are running (though this will generate a large volume of logs). @@ -247,11 +247,11 @@ The graphman command is included in the official containers, and you can docker Full documentation of `graphman` commands is available in the Graph Node repository. See [/docs/graphman.md] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) in the Graph Node `/docs` -### Trabajar con subgrafos +### Working with Subgraphs #### API de estado de indexación -Disponible por defecto en el puerto 8030/graphql, la API de estado de indexación expone una serie de métodos para comprobar el estado de indexación de diferentes subgrafos, comprobar pruebas de indexación, inspeccionar características de subgrafos y mucho más. +Available on port 8030/graphql by default, the indexing status API exposes a range of methods for checking indexing status for different Subgraphs, checking proofs of indexing, inspecting Subgraph features and more. The full schema is available [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). @@ -263,7 +263,7 @@ El proceso de indexación consta de tres partes diferenciadas: - Procesar los eventos en orden con los handlers apropiados (esto puede implicar llamar a la cadena para obtener el estado y obtener datos del store) - Escribir los datos resultantes en el store -"Estas etapas están en serie (es decir, se pueden ejecutar en paralelo), pero dependen una de la otra. Cuando los subgrafos son lentos en indexarse, la causa subyacente dependerá del subgrafo específico. +These stages are pipelined (i.e. they can be executed in parallel), but they are dependent on one another. Where Subgraphs are slow to index, the underlying cause will depend on the specific Subgraph. Causas habituales de la lentitud de indexación: @@ -276,24 +276,24 @@ Causas habituales de la lentitud de indexación: - El proveedor en sí mismo se está quedando rezagado con respecto a la cabeza de la cadena - Lentitud en la obtención de nuevos recibos en la cabeza de la cadena desde el proveedor -Las métricas de indexación de subgrafos pueden ayudar a diagnosticar la causa raíz de la lentitud de la indexación. En algunos casos, el problema reside en el propio subgrafo, pero en otros, la mejora de los proveedores de red, la reducción de la contención de la base de datos y otras mejoras de configuración pueden mejorar notablemente el rendimiento de la indexación. +Subgraph indexing metrics can help diagnose the root cause of indexing slowness. In some cases, the problem lies with the Subgraph itself, but in others, improved network providers, reduced database contention and other configuration improvements can markedly improve indexing performance. -#### Subgrafos fallidos +#### Failed Subgraphs -Durante la indexación, los subgrafos pueden fallar si encuentran datos inesperados, si algún componente no funciona como se esperaba o si hay algún error en los event handlers o en la configuración. Hay dos tipos generales de fallo: +During indexing Subgraphs might fail, if they encounter data that is unexpected, some component not working as expected, or if there is some bug in the event handlers or configuration. There are two general types of failure: - Fallos deterministas: son fallos que no se resolverán con reintentos - Fallos no deterministas: pueden deberse a problemas con el proveedor o a algún error inesperado de Graph Node. Cuando se produce un fallo no determinista, Graph Node reintentará los handlers que han fallado, retrocediendo en el tiempo. -En algunos casos, un fallo puede ser resuelto por el Indexador (por ejemplo, si el error es resultado de no tener el tipo correcto de proveedor, añadir el proveedor necesario permitirá continuar con la indexación). Sin embargo, en otros, se requiere un cambio en el código del subgrafo. +In some cases a failure might be resolvable by the indexer (for example if the error is a result of not having the right kind of provider, adding the required provider will allow indexing to continue). However in others, a change in the Subgraph code is required. -> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-deterministic failures are not, as the subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. +> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-deterministic failures are not, as the Subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the Subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. #### Caché de bloques y llamadas -Graph Node caches certain data in the store in order to save refetching from the provider. Blocks are cached, as are the results of `eth_calls` (the latter being cached as of a specific block). This caching can dramatically increase indexing speed during "resyncing" of a slightly altered subgraph. +Graph Node caches certain data in the store in order to save refetching from the provider. Blocks are cached, as are the results of `eth_calls` (the latter being cached as of a specific block). This caching can dramatically increase indexing speed during "resyncing" of a slightly altered Subgraph. -However, in some instances, if an Ethereum node has provided incorrect data for some period, that can make its way into the cache, leading to incorrect data or failed subgraphs. In this case indexers can use `graphman` to clear the poisoned cache, and then rewind the affected subgraphs, which will then fetch fresh data from the (hopefully) healthy provider. +However, in some instances, if an Ethereum node has provided incorrect data for some period, that can make its way into the cache, leading to incorrect data or failed Subgraphs. In this case indexers can use `graphman` to clear the poisoned cache, and then rewind the affected Subgraphs, which will then fetch fresh data from the (hopefully) healthy provider. Si se sospecha de una inconsistencia en el caché de bloques, como un evento de falta de recepción tx: @@ -304,7 +304,7 @@ Si se sospecha de una inconsistencia en el caché de bloques, como un evento de #### Consulta de problemas y errores -Una vez que un subgrafo ha sido indexado, los Indexadores pueden esperar servir consultas a través del endpoint de consulta dedicado del subgrafo. Si el Indexador espera servir un volumen de consultas significativo, se recomienda un nodo de consulta dedicado, y en caso de volúmenes de consulta muy altos, los Indexadores pueden querer configurar shards de réplica para que las consultas no impacten en el proceso de indexación. +Once a Subgraph has been indexed, indexers can expect to serve queries via the Subgraph's dedicated query endpoint. If the indexer is hoping to serve significant query volume, a dedicated query node is recommended, and in case of very high query volumes, indexers may want to configure replica shards so that queries don't impact the indexing process. Sin embargo, incluso con un nodo de consulta dedicado y réplicas, ciertas consultas pueden llevar mucho tiempo para ejecutarse y, en algunos casos, aumentar el uso de memoria y afectar negativamente el tiempo de consulta de otros usuarios. @@ -316,7 +316,7 @@ Graph Node caches GraphQL queries by default, which can significantly reduce dat ##### Análisis de consultas -Las consultas problemáticas suelen surgir de dos maneras. En algunos casos, los propios usuarios informan de que una consulta determinada es lenta. En ese caso, el reto consiste en diagnosticar el motivo de la lentitud, ya sea un problema general o específico de ese subgrafo o consulta. Y, por supuesto, resolverlo, si es posible. +Problematic queries most often surface in one of two ways. In some cases, users themselves report that a given query is slow. In that case the challenge is to diagnose the reason for the slowness - whether it is a general issue, or specific to that Subgraph or query. And then of course to resolve it, if possible. En otros casos, el desencadenante puede ser un uso elevado de memoria en un nodo de consulta, en cuyo caso el reto consiste primero en identificar la consulta causante del problema. @@ -336,10 +336,10 @@ In general, tables where the number of distinct entities are less than 1% of the Once a table has been determined to be account-like, running `graphman stats account-like .
` will turn on the account-like optimization for queries against that table. The optimization can be turned off again with `graphman stats account-like --clear .
` It takes up to 5 minutes for query nodes to notice that the optimization has been turned on or off. After turning the optimization on, it is necessary to verify that the change does not in fact make queries slower for that table. If you have configured Grafana to monitor Postgres, slow queries would show up in `pg_stat_activity`in large numbers, taking several seconds. In that case, the optimization needs to be turned off again. -For Uniswap-like subgraphs, the `pair` and `token` tables are prime candidates for this optimization, and can have a dramatic effect on database load. +For Uniswap-like Subgraphs, the `pair` and `token` tables are prime candidates for this optimization, and can have a dramatic effect on database load. -#### Eliminar subgrafos +#### Removing Subgraphs > Se trata de una nueva funcionalidad, que estará disponible en Graph Node 0.29.x -At some point an indexer might want to remove a given subgraph. This can be easily done via `graphman drop`, which deletes a deployment and all it's indexed data. The deployment can be specified as either a subgraph name, an IPFS hash `Qm..`, or the database namespace `sgdNNN`. Further documentation is available [here](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). +At some point an indexer might want to remove a given Subgraph. This can be easily done via `graphman drop`, which deletes a deployment and all it's indexed data. The deployment can be specified as either a Subgraph name, an IPFS hash `Qm..`, or the database namespace `sgdNNN`. Further documentation is available [here](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). From d71e852b3416875fa253b77a9eec8cfc5b2add7c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:09:45 -0500 Subject: [PATCH 0099/1789] New translations graph-node.mdx (Arabic) --- .../pages/ar/indexing/tooling/graph-node.mdx | 72 +++++++++---------- 1 file changed, 36 insertions(+), 36 deletions(-) diff --git a/website/src/pages/ar/indexing/tooling/graph-node.mdx b/website/src/pages/ar/indexing/tooling/graph-node.mdx index 0250f14a3d08..f5778789213d 100644 --- a/website/src/pages/ar/indexing/tooling/graph-node.mdx +++ b/website/src/pages/ar/indexing/tooling/graph-node.mdx @@ -2,31 +2,31 @@ title: Graph Node --- -Graph Node is the component which indexes subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. +Graph Node is the component which indexes Subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. This provides a contextual overview of Graph Node, and some of the more advanced options available to indexers. Detailed documentation and instructions can be found in the [Graph Node repository](https://github.com/graphprotocol/graph-node). ## Graph Node -[Graph Node](https://github.com/graphprotocol/graph-node) is the reference implementation for indexing Subgraphs on The Graph Network, connecting to blockchain clients, indexing subgraphs and making indexed data available to query. +[Graph Node](https://github.com/graphprotocol/graph-node) is the reference implementation for indexing Subgraphs on The Graph Network, connecting to blockchain clients, indexing Subgraphs and making indexed data available to query. Graph Node (and the whole indexer stack) can be run on bare metal, or in a cloud environment. This flexibility of the central indexing component is crucial to the robustness of The Graph Protocol. Similarly, Graph Node can be [built from source](https://github.com/graphprotocol/graph-node), or indexers can use one of the [provided Docker Images](https://hub.docker.com/r/graphprotocol/graph-node). ### PostgreSQL database -The main store for the Graph Node, this is where subgraph data is stored, as well as metadata about subgraphs, and subgraph-agnostic network data such as the block cache, and eth_call cache. +The main store for the Graph Node, this is where Subgraph data is stored, as well as metadata about Subgraphs, and Subgraph-agnostic network data such as the block cache, and eth_call cache. ### Network clients In order to index a network, Graph Node needs access to a network client via an EVM-compatible JSON-RPC API. This RPC may connect to a single client or it could be a more complex setup that load balances across multiple. -While some subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). +While some Subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically Subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and Subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). **Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an Indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). ### IPFS Nodes -Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network indexers do not need to host their own IPFS node. An IPFS node for the network is hosted at https://ipfs.network.thegraph.com. +Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during Subgraph deployment to fetch the Subgraph manifest and all linked files. Network indexers do not need to host their own IPFS node. An IPFS node for the network is hosted at https://ipfs.network.thegraph.com. ### Prometheus metrics server @@ -77,19 +77,19 @@ A complete Kubernetes example configuration can be found in the [indexer reposit When it is running Graph Node exposes the following ports: -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | -| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | -| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | -| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for Subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for Subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | > **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC endpoint. ## Advanced Graph Node configuration -At its simplest, Graph Node can be operated with a single instance of Graph Node, a single PostgreSQL database, an IPFS node, and the network clients as required by the subgraphs to be indexed. +At its simplest, Graph Node can be operated with a single instance of Graph Node, a single PostgreSQL database, an IPFS node, and the network clients as required by the Subgraphs to be indexed. This setup can be scaled horizontally, by adding multiple Graph Nodes, and multiple databases to support those Graph Nodes. Advanced users may want to take advantage of some of the horizontal scaling capabilities of Graph Node, as well as some of the more advanced configuration options, via the `config.toml` file and Graph Node's environment variables. @@ -114,13 +114,13 @@ Full documentation of `config.toml` can be found in the [Graph Node docs](https: #### Multiple Graph Nodes -Graph Node indexing can scale horizontally, running multiple instances of Graph Node to split indexing and querying across different nodes. This can be done simply by running Graph Nodes configured with a different `node_id` on startup (e.g. in the Docker Compose file), which can then be used in the `config.toml` file to specify [dedicated query nodes](#dedicated-query-nodes), [block ingestors](#dedicated-block-ingestion), and splitting subgraphs across nodes with [deployment rules](#deployment-rules). +Graph Node indexing can scale horizontally, running multiple instances of Graph Node to split indexing and querying across different nodes. This can be done simply by running Graph Nodes configured with a different `node_id` on startup (e.g. in the Docker Compose file), which can then be used in the `config.toml` file to specify [dedicated query nodes](#dedicated-query-nodes), [block ingestors](#dedicated-block-ingestion), and splitting Subgraphs across nodes with [deployment rules](#deployment-rules). > Note that multiple Graph Nodes can all be configured to use the same database, which itself can be horizontally scaled via sharding. #### Deployment rules -Given multiple Graph Nodes, it is necessary to manage deployment of new subgraphs so that the same subgraph isn't being indexed by two different nodes, which would lead to collisions. This can be done by using deployment rules, which can also specify which `shard` a subgraph's data should be stored in, if database sharding is being used. Deployment rules can match on the subgraph name and the network that the deployment is indexing in order to make a decision. +Given multiple Graph Nodes, it is necessary to manage deployment of new Subgraphs so that the same Subgraph isn't being indexed by two different nodes, which would lead to collisions. This can be done by using deployment rules, which can also specify which `shard` a Subgraph's data should be stored in, if database sharding is being used. Deployment rules can match on the Subgraph name and the network that the deployment is indexing in order to make a decision. Example deployment rule configuration: @@ -138,7 +138,7 @@ indexers = [ "index_node_kovan_0" ] match = { network = [ "xdai", "poa-core" ] } indexers = [ "index_node_other_0" ] [[deployment.rule]] -# There's no 'match', so any subgraph matches +# There's no 'match', so any Subgraph matches shards = [ "sharda", "shardb" ] indexers = [ "index_node_community_0", @@ -167,11 +167,11 @@ Any node whose --node-id matches the regular expression will be set up to only r For most use cases, a single Postgres database is sufficient to support a graph-node instance. When a graph-node instance outgrows a single Postgres database, it is possible to split the storage of graph-node's data across multiple Postgres databases. All databases together form the store of the graph-node instance. Each individual database is called a shard. -Shards can be used to split subgraph deployments across multiple databases, and can also be used to use replicas to spread query load across databases. This includes configuring the number of available database connections each `graph-node` should keep in its connection pool for each database, which becomes increasingly important as more subgraphs are being indexed. +Shards can be used to split Subgraph deployments across multiple databases, and can also be used to use replicas to spread query load across databases. This includes configuring the number of available database connections each `graph-node` should keep in its connection pool for each database, which becomes increasingly important as more Subgraphs are being indexed. Sharding becomes useful when your existing database can't keep up with the load that Graph Node puts on it, and when it's not possible to increase the database size anymore. -> It is generally better make a single database as big as possible, before starting with shards. One exception is where query traffic is split very unevenly between subgraphs; in those situations it can help dramatically if the high-volume subgraphs are kept in one shard and everything else in another because that setup makes it more likely that the data for the high-volume subgraphs stays in the db-internal cache and doesn't get replaced by data that's not needed as much from low-volume subgraphs. +> It is generally better make a single database as big as possible, before starting with shards. One exception is where query traffic is split very unevenly between Subgraphs; in those situations it can help dramatically if the high-volume Subgraphs are kept in one shard and everything else in another because that setup makes it more likely that the data for the high-volume Subgraphs stays in the db-internal cache and doesn't get replaced by data that's not needed as much from low-volume Subgraphs. In terms of configuring connections, start with max_connections in postgresql.conf set to 400 (or maybe even 200) and look at the store_connection_wait_time_ms and store_connection_checkout_count Prometheus metrics. Noticeable wait times (anything above 5ms) is an indication that there are too few connections available; high wait times there will also be caused by the database being very busy (like high CPU load). However if the database seems otherwise stable, high wait times indicate a need to increase the number of connections. In the configuration, how many connections each graph-node instance can use is an upper limit, and Graph Node will not keep connections open if it doesn't need them. @@ -188,7 +188,7 @@ ingestor = "block_ingestor_node" #### Supporting multiple networks -The Graph Protocol is increasing the number of networks supported for indexing rewards, and there exist many subgraphs indexing unsupported networks which an indexer would like to process. The `config.toml` file allows for expressive and flexible configuration of: +The Graph Protocol is increasing the number of networks supported for indexing rewards, and there exist many Subgraphs indexing unsupported networks which an indexer would like to process. The `config.toml` file allows for expressive and flexible configuration of: - Multiple networks - Multiple providers per network (this can allow splitting of load across providers, and can also allow for configuration of full nodes as well as archive nodes, with Graph Node preferring cheaper providers if a given workload allows). @@ -225,11 +225,11 @@ Users who are operating a scaled indexing setup with advanced configuration may ### Managing Graph Node -Given a running Graph Node (or Graph Nodes!), the challenge is then to manage deployed subgraphs across those nodes. Graph Node surfaces a range of tools to help with managing subgraphs. +Given a running Graph Node (or Graph Nodes!), the challenge is then to manage deployed Subgraphs across those nodes. Graph Node surfaces a range of tools to help with managing Subgraphs. #### Logging -Graph Node's logs can provide useful information for debugging and optimisation of Graph Node and specific subgraphs. Graph Node supports different log levels via the `GRAPH_LOG` environment variable, with the following levels: error, warn, info, debug or trace. +Graph Node's logs can provide useful information for debugging and optimisation of Graph Node and specific Subgraphs. Graph Node supports different log levels via the `GRAPH_LOG` environment variable, with the following levels: error, warn, info, debug or trace. In addition setting `GRAPH_LOG_QUERY_TIMING` to `gql` provides more details about how GraphQL queries are running (though this will generate a large volume of logs). @@ -247,11 +247,11 @@ The graphman command is included in the official containers, and you can docker Full documentation of `graphman` commands is available in the Graph Node repository. See [/docs/graphman.md] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) in the Graph Node `/docs` -### Working with subgraphs +### Working with Subgraphs #### Indexing status API -Available on port 8030/graphql by default, the indexing status API exposes a range of methods for checking indexing status for different subgraphs, checking proofs of indexing, inspecting subgraph features and more. +Available on port 8030/graphql by default, the indexing status API exposes a range of methods for checking indexing status for different Subgraphs, checking proofs of indexing, inspecting Subgraph features and more. The full schema is available [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). @@ -263,7 +263,7 @@ There are three separate parts of the indexing process: - Processing events in order with the appropriate handlers (this can involve calling the chain for state, and fetching data from the store) - Writing the resulting data to the store -These stages are pipelined (i.e. they can be executed in parallel), but they are dependent on one another. Where subgraphs are slow to index, the underlying cause will depend on the specific subgraph. +These stages are pipelined (i.e. they can be executed in parallel), but they are dependent on one another. Where Subgraphs are slow to index, the underlying cause will depend on the specific Subgraph. Common causes of indexing slowness: @@ -276,24 +276,24 @@ Common causes of indexing slowness: - The provider itself falling behind the chain head - Slowness in fetching new receipts at the chain head from the provider -Subgraph indexing metrics can help diagnose the root cause of indexing slowness. In some cases, the problem lies with the subgraph itself, but in others, improved network providers, reduced database contention and other configuration improvements can markedly improve indexing performance. +Subgraph indexing metrics can help diagnose the root cause of indexing slowness. In some cases, the problem lies with the Subgraph itself, but in others, improved network providers, reduced database contention and other configuration improvements can markedly improve indexing performance. -#### Failed subgraphs +#### Failed Subgraphs -During indexing subgraphs might fail, if they encounter data that is unexpected, some component not working as expected, or if there is some bug in the event handlers or configuration. There are two general types of failure: +During indexing Subgraphs might fail, if they encounter data that is unexpected, some component not working as expected, or if there is some bug in the event handlers or configuration. There are two general types of failure: - Deterministic failures: these are failures which will not be resolved with retries - Non-deterministic failures: these might be down to issues with the provider, or some unexpected Graph Node error. When a non-deterministic failure occurs, Graph Node will retry the failing handlers, backing off over time. -In some cases a failure might be resolvable by the indexer (for example if the error is a result of not having the right kind of provider, adding the required provider will allow indexing to continue). However in others, a change in the subgraph code is required. +In some cases a failure might be resolvable by the indexer (for example if the error is a result of not having the right kind of provider, adding the required provider will allow indexing to continue). However in others, a change in the Subgraph code is required. -> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-deterministic failures are not, as the subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. +> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-deterministic failures are not, as the Subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the Subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. #### Block and call cache -Graph Node caches certain data in the store in order to save refetching from the provider. Blocks are cached, as are the results of `eth_calls` (the latter being cached as of a specific block). This caching can dramatically increase indexing speed during "resyncing" of a slightly altered subgraph. +Graph Node caches certain data in the store in order to save refetching from the provider. Blocks are cached, as are the results of `eth_calls` (the latter being cached as of a specific block). This caching can dramatically increase indexing speed during "resyncing" of a slightly altered Subgraph. -However, in some instances, if an Ethereum node has provided incorrect data for some period, that can make its way into the cache, leading to incorrect data or failed subgraphs. In this case indexers can use `graphman` to clear the poisoned cache, and then rewind the affected subgraphs, which will then fetch fresh data from the (hopefully) healthy provider. +However, in some instances, if an Ethereum node has provided incorrect data for some period, that can make its way into the cache, leading to incorrect data or failed Subgraphs. In this case indexers can use `graphman` to clear the poisoned cache, and then rewind the affected Subgraphs, which will then fetch fresh data from the (hopefully) healthy provider. If a block cache inconsistency is suspected, such as a tx receipt missing event: @@ -304,7 +304,7 @@ If a block cache inconsistency is suspected, such as a tx receipt missing event: #### Querying issues and errors -Once a subgraph has been indexed, indexers can expect to serve queries via the subgraph's dedicated query endpoint. If the indexer is hoping to serve significant query volume, a dedicated query node is recommended, and in case of very high query volumes, indexers may want to configure replica shards so that queries don't impact the indexing process. +Once a Subgraph has been indexed, indexers can expect to serve queries via the Subgraph's dedicated query endpoint. If the indexer is hoping to serve significant query volume, a dedicated query node is recommended, and in case of very high query volumes, indexers may want to configure replica shards so that queries don't impact the indexing process. However, even with a dedicated query node and replicas, certain queries can take a long time to execute, and in some cases increase memory usage and negatively impact the query time for other users. @@ -316,7 +316,7 @@ Graph Node caches GraphQL queries by default, which can significantly reduce dat ##### Analysing queries -Problematic queries most often surface in one of two ways. In some cases, users themselves report that a given query is slow. In that case the challenge is to diagnose the reason for the slowness - whether it is a general issue, or specific to that subgraph or query. And then of course to resolve it, if possible. +Problematic queries most often surface in one of two ways. In some cases, users themselves report that a given query is slow. In that case the challenge is to diagnose the reason for the slowness - whether it is a general issue, or specific to that Subgraph or query. And then of course to resolve it, if possible. In other cases, the trigger might be high memory usage on a query node, in which case the challenge is first to identify the query causing the issue. @@ -336,10 +336,10 @@ In general, tables where the number of distinct entities are less than 1% of the Once a table has been determined to be account-like, running `graphman stats account-like .
` will turn on the account-like optimization for queries against that table. The optimization can be turned off again with `graphman stats account-like --clear .
` It takes up to 5 minutes for query nodes to notice that the optimization has been turned on or off. After turning the optimization on, it is necessary to verify that the change does not in fact make queries slower for that table. If you have configured Grafana to monitor Postgres, slow queries would show up in `pg_stat_activity`in large numbers, taking several seconds. In that case, the optimization needs to be turned off again. -For Uniswap-like subgraphs, the `pair` and `token` tables are prime candidates for this optimization, and can have a dramatic effect on database load. +For Uniswap-like Subgraphs, the `pair` and `token` tables are prime candidates for this optimization, and can have a dramatic effect on database load. -#### Removing subgraphs +#### Removing Subgraphs > This is new functionality, which will be available in Graph Node 0.29.x -At some point an indexer might want to remove a given subgraph. This can be easily done via `graphman drop`, which deletes a deployment and all it's indexed data. The deployment can be specified as either a subgraph name, an IPFS hash `Qm..`, or the database namespace `sgdNNN`. Further documentation is available [here](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). +At some point an indexer might want to remove a given Subgraph. This can be easily done via `graphman drop`, which deletes a deployment and all it's indexed data. The deployment can be specified as either a Subgraph name, an IPFS hash `Qm..`, or the database namespace `sgdNNN`. Further documentation is available [here](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). From 70c56cbdbec11731b5057f8c7202760c8a1cabc0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:09:46 -0500 Subject: [PATCH 0100/1789] New translations graph-node.mdx (Czech) --- .../pages/cs/indexing/tooling/graph-node.mdx | 72 +++++++++---------- 1 file changed, 36 insertions(+), 36 deletions(-) diff --git a/website/src/pages/cs/indexing/tooling/graph-node.mdx b/website/src/pages/cs/indexing/tooling/graph-node.mdx index 88ddb88813fb..3b71056d71f9 100644 --- a/website/src/pages/cs/indexing/tooling/graph-node.mdx +++ b/website/src/pages/cs/indexing/tooling/graph-node.mdx @@ -2,31 +2,31 @@ title: Uzel Graf --- -Graf Uzel je komponenta, která indexuje podgrafy a zpřístupňuje výsledná data k dotazování prostřednictvím rozhraní GraphQL API. Jako taková je ústředním prvkem zásobníku indexeru a její správná činnost je pro úspěšný provoz indexeru klíčová. +Graph Node is the component which indexes Subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. This provides a contextual overview of Graph Node, and some of the more advanced options available to indexers. Detailed documentation and instructions can be found in the [Graph Node repository](https://github.com/graphprotocol/graph-node). ## Uzel Graf -[Graph Node](https://github.com/graphprotocol/graph-node) is the reference implementation for indexing Subgraphs on The Graph Network, connecting to blockchain clients, indexing subgraphs and making indexed data available to query. +[Graph Node](https://github.com/graphprotocol/graph-node) is the reference implementation for indexing Subgraphs on The Graph Network, connecting to blockchain clients, indexing Subgraphs and making indexed data available to query. Graph Node (and the whole indexer stack) can be run on bare metal, or in a cloud environment. This flexibility of the central indexing component is crucial to the robustness of The Graph Protocol. Similarly, Graph Node can be [built from source](https://github.com/graphprotocol/graph-node), or indexers can use one of the [provided Docker Images](https://hub.docker.com/r/graphprotocol/graph-node). ### Databáze PostgreSQL -Hlavní úložiště pro uzel Graf Uzel, kde jsou uložena data podgrafů, metadata o podgraf a síťová data týkající se podgrafů, jako je bloková cache a cache eth_call. +The main store for the Graph Node, this is where Subgraph data is stored, as well as metadata about Subgraphs, and Subgraph-agnostic network data such as the block cache, and eth_call cache. ### Síťoví klienti Aby mohl uzel Graph Node indexovat síť, potřebuje přístup k síťovému klientovi prostřednictvím rozhraní API JSON-RPC kompatibilního s EVM. Toto RPC se může připojit k jedinému klientovi nebo může jít o složitější nastavení, které vyrovnává zátěž mezi více klienty. -While some subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). +While some Subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically Subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and Subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). **Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an Indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). ### IPFS uzly -Metadata nasazení podgrafů jsou uložena v síti IPFS. Uzel Graf přistupuje během nasazení podgrafu především k uzlu IPFS, aby načetl manifest podgrafu a všechny propojené soubory. Síťové indexery nemusí hostit vlastní uzel IPFS. Uzel IPFS pro síť je hostován na adrese https://ipfs.network.thegraph.com. +Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during Subgraph deployment to fetch the Subgraph manifest and all linked files. Network indexers do not need to host their own IPFS node. An IPFS node for the network is hosted at https://ipfs.network.thegraph.com. ### Metrický server Prometheus @@ -77,19 +77,19 @@ A complete Kubernetes example configuration can be found in the [indexer reposit Když je Graf Uzel spuštěn, zpřístupňuje následující ports: -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | -| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | -| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | -| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for Subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for Subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | > **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC endpoint. ## Pokročilá konfigurace uzlu Graf -V nejjednodušším případě lze Graf Uzel provozovat s jednou instancí Graf Uzel, jednou databází PostgreSQL, uzlem IPFS a síťovými klienty podle potřeby indexovaných podgrafů. +At its simplest, Graph Node can be operated with a single instance of Graph Node, a single PostgreSQL database, an IPFS node, and the network clients as required by the Subgraphs to be indexed. This setup can be scaled horizontally, by adding multiple Graph Nodes, and multiple databases to support those Graph Nodes. Advanced users may want to take advantage of some of the horizontal scaling capabilities of Graph Node, as well as some of the more advanced configuration options, via the `config.toml` file and Graph Node's environment variables. @@ -114,13 +114,13 @@ Full documentation of `config.toml` can be found in the [Graph Node docs](https: #### Více uzlů graf -Graph Node indexing can scale horizontally, running multiple instances of Graph Node to split indexing and querying across different nodes. This can be done simply by running Graph Nodes configured with a different `node_id` on startup (e.g. in the Docker Compose file), which can then be used in the `config.toml` file to specify [dedicated query nodes](#dedicated-query-nodes), [block ingestors](#dedicated-block-ingestion), and splitting subgraphs across nodes with [deployment rules](#deployment-rules). +Graph Node indexing can scale horizontally, running multiple instances of Graph Node to split indexing and querying across different nodes. This can be done simply by running Graph Nodes configured with a different `node_id` on startup (e.g. in the Docker Compose file), which can then be used in the `config.toml` file to specify [dedicated query nodes](#dedicated-query-nodes), [block ingestors](#dedicated-block-ingestion), and splitting Subgraphs across nodes with [deployment rules](#deployment-rules). > Všimněte si, že více graf uzlů lze nakonfigurovat tak, aby používaly stejnou databázi, kterou lze horizontálně škálovat pomocí sharding. #### Pravidla nasazení -Given multiple Graph Nodes, it is necessary to manage deployment of new subgraphs so that the same subgraph isn't being indexed by two different nodes, which would lead to collisions. This can be done by using deployment rules, which can also specify which `shard` a subgraph's data should be stored in, if database sharding is being used. Deployment rules can match on the subgraph name and the network that the deployment is indexing in order to make a decision. +Given multiple Graph Nodes, it is necessary to manage deployment of new Subgraphs so that the same Subgraph isn't being indexed by two different nodes, which would lead to collisions. This can be done by using deployment rules, which can also specify which `shard` a Subgraph's data should be stored in, if database sharding is being used. Deployment rules can match on the Subgraph name and the network that the deployment is indexing in order to make a decision. Příklad konfigurace pravidla nasazení: @@ -138,7 +138,7 @@ indexers = [ "index_node_kovan_0" ] match = { network = [ "xdai", "poa-core" ] } indexers = [ "index_node_other_0" ] [[deployment.rule]] -# There's no 'match', so any subgraph matches +# There's no 'match', so any Subgraph matches shards = [ "sharda", "shardb" ] indexers = [ "index_node_community_0", @@ -167,11 +167,11 @@ Každý uzel, jehož --node-id odpovídá regulárnímu výrazu, bude nastaven t Pro většinu případů použití postačuje k podpoře instance graf uzlu jedna databáze Postgres. Pokud instance graf uzlu přeroste rámec jedné databáze Postgres, je možné rozdělit ukládání dat grafového uzlu do více databází Postgres. Všechny databáze dohromady tvoří úložiště instance graf uzlu. Každá jednotlivá databáze se nazývá shard. -Shards can be used to split subgraph deployments across multiple databases, and can also be used to use replicas to spread query load across databases. This includes configuring the number of available database connections each `graph-node` should keep in its connection pool for each database, which becomes increasingly important as more subgraphs are being indexed. +Shards can be used to split Subgraph deployments across multiple databases, and can also be used to use replicas to spread query load across databases. This includes configuring the number of available database connections each `graph-node` should keep in its connection pool for each database, which becomes increasingly important as more Subgraphs are being indexed. Sharding se stává užitečným, když vaše stávající databáze nedokáže udržet krok se zátěží, kterou na ni Graf Uzel vyvíjí, a když už není možné zvětšit velikost databáze. -> Obecně je lepší vytvořit jednu co největší databázi, než začít s oddíly. Jednou z výjimek jsou případy, kdy je provoz dotazů rozdělen velmi nerovnoměrně mezi dílčí podgrafy; v těchto situacích může výrazně pomoci, pokud jsou dílčí podgrafy s velkým objemem uchovávány v jednom shardu a vše ostatní v jiném, protože toto nastavení zvyšuje pravděpodobnost, že data pro dílčí podgrafu s velkým objemem zůstanou v interní cache db a nebudou nahrazena daty, která nejsou tolik potřebná z dílčích podgrafů s malým objemem. +> It is generally better make a single database as big as possible, before starting with shards. One exception is where query traffic is split very unevenly between Subgraphs; in those situations it can help dramatically if the high-volume Subgraphs are kept in one shard and everything else in another because that setup makes it more likely that the data for the high-volume Subgraphs stays in the db-internal cache and doesn't get replaced by data that's not needed as much from low-volume Subgraphs. Pokud jde o konfiguraci připojení, začněte s max_connections v souboru postgresql.conf nastaveným na 400 (nebo možná dokonce 200) a podívejte se na metriky store_connection_wait_time_ms a store_connection_checkout_count Prometheus. Výrazné čekací doby (cokoli nad 5 ms) jsou známkou toho, že je k dispozici příliš málo připojení; vysoké čekací doby tam budou také způsobeny tím, že databáze je velmi vytížená (například vysoké zatížení procesoru). Pokud se však databáze jinak jeví jako stabilní, vysoké čekací doby naznačují potřebu zvýšit počet připojení. V konfiguraci je horní hranicí, kolik připojení může každá instance graf uzlu používat, a graf uzel nebude udržovat otevřená připojení, pokud je nepotřebuje. @@ -188,7 +188,7 @@ ingestor = "block_ingestor_node" #### Podpora více sítí -The Graph Protocol is increasing the number of networks supported for indexing rewards, and there exist many subgraphs indexing unsupported networks which an indexer would like to process. The `config.toml` file allows for expressive and flexible configuration of: +The Graph Protocol is increasing the number of networks supported for indexing rewards, and there exist many Subgraphs indexing unsupported networks which an indexer would like to process. The `config.toml` file allows for expressive and flexible configuration of: - Více sítí - Více poskytovatelů na síť (to může umožnit rozdělení zátěže mezi poskytovatele a také konfiguraci plných uzlů i archivních uzlů, přičemž Graph Node může preferovat levnější poskytovatele, pokud to daná pracovní zátěž umožňuje). @@ -225,11 +225,11 @@ Uživatelé, kteří provozují škálované nastavení indexování s pokročil ### Správa uzlu graf -Vzhledem k běžícímu uzlu Graf (nebo uzlům Graf Uzel!) je pak úkolem spravovat rozmístěné podgrafy v těchto uzlech. Graf Uzel nabízí řadu nástrojů, které pomáhají se správou podgrafů. +Given a running Graph Node (or Graph Nodes!), the challenge is then to manage deployed Subgraphs across those nodes. Graph Node surfaces a range of tools to help with managing Subgraphs. #### Protokolování -Graph Node's logs can provide useful information for debugging and optimisation of Graph Node and specific subgraphs. Graph Node supports different log levels via the `GRAPH_LOG` environment variable, with the following levels: error, warn, info, debug or trace. +Graph Node's logs can provide useful information for debugging and optimisation of Graph Node and specific Subgraphs. Graph Node supports different log levels via the `GRAPH_LOG` environment variable, with the following levels: error, warn, info, debug or trace. In addition setting `GRAPH_LOG_QUERY_TIMING` to `gql` provides more details about how GraphQL queries are running (though this will generate a large volume of logs). @@ -247,11 +247,11 @@ The graphman command is included in the official containers, and you can docker Full documentation of `graphman` commands is available in the Graph Node repository. See [/docs/graphman.md] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) in the Graph Node `/docs` -### Práce s podgrafy +### Working with Subgraphs #### Stav indexování API -API pro stav indexování, které je ve výchozím nastavení dostupné na portu 8030/graphql, nabízí řadu metod pro kontrolu stavu indexování pro různé podgrafy, kontrolu důkazů indexování, kontrolu vlastností podgrafů a další. +Available on port 8030/graphql by default, the indexing status API exposes a range of methods for checking indexing status for different Subgraphs, checking proofs of indexing, inspecting Subgraph features and more. The full schema is available [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). @@ -263,7 +263,7 @@ Proces indexování má tři samostatné části: - Zpracování událostí v pořadí pomocí příslušných obslužných (to může zahrnovat volání řetězce pro zjištění stavu a načtení dat z úložiště) - Zápis výsledných dat do úložiště -Tyto fáze jsou spojeny do potrubí (tj. mohou být prováděny paralelně), ale jsou na sobě závislé. Pokud se podgrafy indexují pomalu, bude příčina záviset na konkrétním podgrafu. +These stages are pipelined (i.e. they can be executed in parallel), but they are dependent on one another. Where Subgraphs are slow to index, the underlying cause will depend on the specific Subgraph. Běžné příčiny pomalého indexování: @@ -276,24 +276,24 @@ Běžné příčiny pomalého indexování: - Samotný poskytovatel se dostává za hlavu řetězu - Pomalé načítání nových účtenek od poskytovatele v hlavě řetězce -Metriky indexování podgrafů mohou pomoci diagnostikovat hlavní příčinu pomalého indexování. V některých případech spočívá problém v samotném podgrafu, ale v jiných případech mohou zlepšení síťových poskytovatelů, snížení konfliktů v databázi a další zlepšení konfigurace výrazně zlepšit výkon indexování. +Subgraph indexing metrics can help diagnose the root cause of indexing slowness. In some cases, the problem lies with the Subgraph itself, but in others, improved network providers, reduced database contention and other configuration improvements can markedly improve indexing performance. -#### Neúspěšné podgrafy +#### Failed Subgraphs -Během indexování mohou dílčí graf selhat, pokud narazí na neočekávaná data, pokud některá komponenta nefunguje podle očekávání nebo pokud je chyba ve zpracovatelích událostí nebo v konfiguraci. Existují dva obecné typy selhání: +During indexing Subgraphs might fail, if they encounter data that is unexpected, some component not working as expected, or if there is some bug in the event handlers or configuration. There are two general types of failure: - Deterministická selhání: jedná se o selhání, která nebudou vyřešena opakovanými pokusy - Nedeterministická selhání: mohou být způsobena problémy se zprostředkovatelem nebo neočekávanou chybou grafického uzlu. Pokud dojde k nedeterministickému selhání, uzel Graf zopakuje selhání obsluhy a postupně se vrátí zpět. -V některých případech může být chyba řešitelná indexátorem (například pokud je chyba důsledkem toho, že není k dispozici správný typ zprostředkovatele, přidání požadovaného zprostředkovatele umožní pokračovat v indexování). V jiných případech je však nutná změna v kódu podgrafu. +In some cases a failure might be resolvable by the indexer (for example if the error is a result of not having the right kind of provider, adding the required provider will allow indexing to continue). However in others, a change in the Subgraph code is required. -> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-deterministic failures are not, as the subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. +> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-deterministic failures are not, as the Subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the Subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. #### Bloková a volací mezipaměť -Graph Node caches certain data in the store in order to save refetching from the provider. Blocks are cached, as are the results of `eth_calls` (the latter being cached as of a specific block). This caching can dramatically increase indexing speed during "resyncing" of a slightly altered subgraph. +Graph Node caches certain data in the store in order to save refetching from the provider. Blocks are cached, as are the results of `eth_calls` (the latter being cached as of a specific block). This caching can dramatically increase indexing speed during "resyncing" of a slightly altered Subgraph. -However, in some instances, if an Ethereum node has provided incorrect data for some period, that can make its way into the cache, leading to incorrect data or failed subgraphs. In this case indexers can use `graphman` to clear the poisoned cache, and then rewind the affected subgraphs, which will then fetch fresh data from the (hopefully) healthy provider. +However, in some instances, if an Ethereum node has provided incorrect data for some period, that can make its way into the cache, leading to incorrect data or failed Subgraphs. In this case indexers can use `graphman` to clear the poisoned cache, and then rewind the affected Subgraphs, which will then fetch fresh data from the (hopefully) healthy provider. Pokud existuje podezření na nekonzistenci blokové mezipaměti, například chybějící událost tx receipt: @@ -304,7 +304,7 @@ Pokud existuje podezření na nekonzistenci blokové mezipaměti, například ch #### Problémy a chyby při dotazování -Jakmile je podgraf indexován, lze očekávat, že indexery budou obsluhovat dotazy prostřednictvím koncového bodu vyhrazeného pro dotazy podgrafu. Pokud indexátor doufá, že bude obsluhovat značný objem dotazů, doporučuje se použít vyhrazený uzel pro dotazy a v případě velmi vysokého objemu dotazů mohou indexátory chtít nakonfigurovat oddíly replik tak, aby dotazy neovlivňovaly proces indexování. +Once a Subgraph has been indexed, indexers can expect to serve queries via the Subgraph's dedicated query endpoint. If the indexer is hoping to serve significant query volume, a dedicated query node is recommended, and in case of very high query volumes, indexers may want to configure replica shards so that queries don't impact the indexing process. I s vyhrazeným dotazovacím uzlem a replikami však může provádění některých dotazů trvat dlouho a v některých případech může zvýšit využití paměti a negativně ovlivnit dobu dotazování ostatních uživatelů. @@ -316,7 +316,7 @@ Graph Node caches GraphQL queries by default, which can significantly reduce dat ##### Analýza dotazů -Problematické dotazy se nejčastěji objevují jedním ze dvou způsobů. V některých případech uživatelé sami hlásí, že daný dotaz je pomalý. V takovém případě je úkolem diagnostikovat příčinu pomalosti - zda se jedná o obecný problém, nebo o specifický problém daného podgrafu či dotazu. A pak ho samozřejmě vyřešit, pokud je to možné. +Problematic queries most often surface in one of two ways. In some cases, users themselves report that a given query is slow. In that case the challenge is to diagnose the reason for the slowness - whether it is a general issue, or specific to that Subgraph or query. And then of course to resolve it, if possible. V jiných případech může být spouštěcím faktorem vysoké využití paměti v uzlu dotazu a v takovém případě je třeba nejprve identifikovat dotaz, který problém způsobuje. @@ -336,10 +336,10 @@ In general, tables where the number of distinct entities are less than 1% of the Once a table has been determined to be account-like, running `graphman stats account-like .
` will turn on the account-like optimization for queries against that table. The optimization can be turned off again with `graphman stats account-like --clear .
` It takes up to 5 minutes for query nodes to notice that the optimization has been turned on or off. After turning the optimization on, it is necessary to verify that the change does not in fact make queries slower for that table. If you have configured Grafana to monitor Postgres, slow queries would show up in `pg_stat_activity`in large numbers, taking several seconds. In that case, the optimization needs to be turned off again. -For Uniswap-like subgraphs, the `pair` and `token` tables are prime candidates for this optimization, and can have a dramatic effect on database load. +For Uniswap-like Subgraphs, the `pair` and `token` tables are prime candidates for this optimization, and can have a dramatic effect on database load. -#### Odstranění podgrafů +#### Removing Subgraphs > Jedná se o novou funkci, která bude k dispozici v uzlu Graf 0.29.x -At some point an indexer might want to remove a given subgraph. This can be easily done via `graphman drop`, which deletes a deployment and all it's indexed data. The deployment can be specified as either a subgraph name, an IPFS hash `Qm..`, or the database namespace `sgdNNN`. Further documentation is available [here](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). +At some point an indexer might want to remove a given Subgraph. This can be easily done via `graphman drop`, which deletes a deployment and all it's indexed data. The deployment can be specified as either a Subgraph name, an IPFS hash `Qm..`, or the database namespace `sgdNNN`. Further documentation is available [here](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). From 037d07f175701dbd515835a30aa9a1475f2e3c8d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:09:47 -0500 Subject: [PATCH 0101/1789] New translations graph-node.mdx (German) --- .../pages/de/indexing/tooling/graph-node.mdx | 72 +++++++++---------- 1 file changed, 36 insertions(+), 36 deletions(-) diff --git a/website/src/pages/de/indexing/tooling/graph-node.mdx b/website/src/pages/de/indexing/tooling/graph-node.mdx index ad1242d7c2b7..1aced4bdab55 100644 --- a/website/src/pages/de/indexing/tooling/graph-node.mdx +++ b/website/src/pages/de/indexing/tooling/graph-node.mdx @@ -2,31 +2,31 @@ title: Graph Node --- -Graph Node ist die Komponente, die Subgrafen indiziert und die resultierenden Daten zur Abfrage über eine GraphQL-API verfügbar macht. Als solches ist es für den Indexer-Stack von zentraler Bedeutung, und der korrekte Betrieb des Graph-Knotens ist entscheidend für den Betrieb eines erfolgreichen Indexers. +Graph Node is the component which indexes Subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. This provides a contextual overview of Graph Node, and some of the more advanced options available to indexers. Detailed documentation and instructions can be found in the [Graph Node repository](https://github.com/graphprotocol/graph-node). ## Graph Node -[Graph Node](https://github.com/graphprotocol/graph-node) is the reference implementation for indexing Subgraphs on The Graph Network, connecting to blockchain clients, indexing subgraphs and making indexed data available to query. +[Graph Node](https://github.com/graphprotocol/graph-node) is the reference implementation for indexing Subgraphs on The Graph Network, connecting to blockchain clients, indexing Subgraphs and making indexed data available to query. Graph Node (and the whole indexer stack) can be run on bare metal, or in a cloud environment. This flexibility of the central indexing component is crucial to the robustness of The Graph Protocol. Similarly, Graph Node can be [built from source](https://github.com/graphprotocol/graph-node), or indexers can use one of the [provided Docker Images](https://hub.docker.com/r/graphprotocol/graph-node). ### PostgreSQL-Datenbank -Der Hauptspeicher für den Graph-Knoten, hier werden Subgraf-Daten sowie Metadaten zu Subgrafen und Subgraf-unabhängige Netzwerkdaten wie Block-Cache und eth_call-Cache gespeichert. +The main store for the Graph Node, this is where Subgraph data is stored, as well as metadata about Subgraphs, and Subgraph-agnostic network data such as the block cache, and eth_call cache. ### Netzwerk-Clients In order to index a network, Graph Node needs access to a network client via an EVM-compatible JSON-RPC API. This RPC may connect to a single client or it could be a more complex setup that load balances across multiple. -While some subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). +While some Subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically Subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and Subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). **Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an Indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). ### IPFS-Knoten -Subgraf-Bereitstellungsmetadaten werden im IPFS-Netzwerk gespeichert. Der Graph-Knoten greift hauptsächlich während der Subgraf-Bereitstellung auf den IPFS-Knoten zu, um das Subgraf-Manifest und alle verknüpften Dateien abzurufen. Netzwerk-Indexierer müssen keinen eigenen IPFS-Knoten hosten, ein IPFS-Knoten für das Netzwerk wird unter https://ipfs.network.thegraph.com gehostet. +Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during Subgraph deployment to fetch the Subgraph manifest and all linked files. Network indexers do not need to host their own IPFS node. An IPFS node for the network is hosted at https://ipfs.network.thegraph.com. ### Prometheus-Metrikserver @@ -77,19 +77,19 @@ A complete Kubernetes example configuration can be found in the [indexer reposit Wenn es ausgeführt wird, stellt Graph Node die folgenden Ports zur Verfügung: -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | -| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | -| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | -| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for Subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for Subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | > **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC endpoint. ## Erweiterte Graph-Knoten-Konfiguration -In seiner einfachsten Form kann Graph Node mit einer einzelnen Instanz von Graph Node, einer einzelnen PostgreSQL-Datenbank, einem IPFS-Knoten und den Netzwerk-Clients betrieben werden, die von den zu indizierenden Subgrafen benötigt werden. +At its simplest, Graph Node can be operated with a single instance of Graph Node, a single PostgreSQL database, an IPFS node, and the network clients as required by the Subgraphs to be indexed. This setup can be scaled horizontally, by adding multiple Graph Nodes, and multiple databases to support those Graph Nodes. Advanced users may want to take advantage of some of the horizontal scaling capabilities of Graph Node, as well as some of the more advanced configuration options, via the `config.toml` file and Graph Node's environment variables. @@ -114,13 +114,13 @@ Full documentation of `config.toml` can be found in the [Graph Node docs](https: #### Mehrere Graph-Knoten -Graph Node indexing can scale horizontally, running multiple instances of Graph Node to split indexing and querying across different nodes. This can be done simply by running Graph Nodes configured with a different `node_id` on startup (e.g. in the Docker Compose file), which can then be used in the `config.toml` file to specify [dedicated query nodes](#dedicated-query-nodes), [block ingestors](#dedicated-block-ingestion), and splitting subgraphs across nodes with [deployment rules](#deployment-rules). +Graph Node indexing can scale horizontally, running multiple instances of Graph Node to split indexing and querying across different nodes. This can be done simply by running Graph Nodes configured with a different `node_id` on startup (e.g. in the Docker Compose file), which can then be used in the `config.toml` file to specify [dedicated query nodes](#dedicated-query-nodes), [block ingestors](#dedicated-block-ingestion), and splitting Subgraphs across nodes with [deployment rules](#deployment-rules). > Beachten Sie darauf, dass mehrere Graph-Knoten so konfiguriert werden können, dass sie dieselbe Datenbank verwenden, die ihrerseits durch Sharding horizontal skaliert werden kann. #### Bereitstellungsregeln -Given multiple Graph Nodes, it is necessary to manage deployment of new subgraphs so that the same subgraph isn't being indexed by two different nodes, which would lead to collisions. This can be done by using deployment rules, which can also specify which `shard` a subgraph's data should be stored in, if database sharding is being used. Deployment rules can match on the subgraph name and the network that the deployment is indexing in order to make a decision. +Given multiple Graph Nodes, it is necessary to manage deployment of new Subgraphs so that the same Subgraph isn't being indexed by two different nodes, which would lead to collisions. This can be done by using deployment rules, which can also specify which `shard` a Subgraph's data should be stored in, if database sharding is being used. Deployment rules can match on the Subgraph name and the network that the deployment is indexing in order to make a decision. Beispielkonfiguration für Bereitstellungsregeln: @@ -138,7 +138,7 @@ indexers = [ "index_node_kovan_0" ] match = { network = [ "xdai", "poa-core" ] } indexers = [ "index_node_other_0" ] [[deployment.rule]] -# There's no 'match', so any subgraph matches +# There's no 'match', so any Subgraph matches shards = [ "sharda", "shardb" ] indexers = [ "index_node_community_0", @@ -167,11 +167,11 @@ Jeder Knoten, dessen --node-id mit dem regulären Ausdruck übereinstimmt, wird Für die meisten Anwendungsfälle reicht eine einzelne Postgres-Datenbank aus, um eine Graph-Node-Instanz zu unterstützen. Wenn eine Graph-Node-Instanz aus einer einzelnen Postgres-Datenbank herauswächst, ist es möglich, die Speicherung der Daten des Graph-Nodes auf mehrere Postgres-Datenbanken aufzuteilen. Alle Datenbanken zusammen bilden den Speicher der Graph-Node-Instanz. Jede einzelne Datenbank wird als Shard bezeichnet. -Shards can be used to split subgraph deployments across multiple databases, and can also be used to use replicas to spread query load across databases. This includes configuring the number of available database connections each `graph-node` should keep in its connection pool for each database, which becomes increasingly important as more subgraphs are being indexed. +Shards can be used to split Subgraph deployments across multiple databases, and can also be used to use replicas to spread query load across databases. This includes configuring the number of available database connections each `graph-node` should keep in its connection pool for each database, which becomes increasingly important as more Subgraphs are being indexed. Sharding wird nützlich, wenn Ihre vorhandene Datenbank nicht mit der Last Schritt halten kann, die Graph Node ihr auferlegt, und wenn es nicht mehr möglich ist, die Datenbankgröße zu erhöhen. -> It is generally better make a single database as big as possible, before starting with shards. One exception is where query traffic is split very unevenly between subgraphs; in those situations it can help dramatically if the high-volume subgraphs are kept in one shard and everything else in another because that setup makes it more likely that the data for the high-volume subgraphs stays in the db-internal cache and doesn't get replaced by data that's not needed as much from low-volume subgraphs. +> It is generally better make a single database as big as possible, before starting with shards. One exception is where query traffic is split very unevenly between Subgraphs; in those situations it can help dramatically if the high-volume Subgraphs are kept in one shard and everything else in another because that setup makes it more likely that the data for the high-volume Subgraphs stays in the db-internal cache and doesn't get replaced by data that's not needed as much from low-volume Subgraphs. Was das Konfigurieren von Verbindungen betrifft, beginnen Sie mit max_connections in postgresql.conf, das auf 400 (oder vielleicht sogar 200) eingestellt ist, und sehen Sie sich die Prometheus-Metriken store_connection_wait_time_ms und store_connection_checkout_count an. Spürbare Wartezeiten (alles über 5 ms) sind ein Hinweis darauf, dass zu wenige Verbindungen verfügbar sind; hohe Wartezeiten werden auch dadurch verursacht, dass die Datenbank sehr ausgelastet ist (z. B. hohe CPU-Last). Wenn die Datenbank jedoch ansonsten stabil erscheint, weisen hohe Wartezeiten darauf hin, dass die Anzahl der Verbindungen erhöht werden muss. In der Konfiguration ist die Anzahl der Verbindungen, die jede Graph-Knoten-Instanz verwenden kann, eine Obergrenze, und der Graph-Knoten hält Verbindungen nicht offen, wenn er sie nicht benötigt. @@ -188,7 +188,7 @@ ingestor = "block_ingestor_node" #### Unterstützung mehrerer Netzwerke -Das Graph-Protokoll erhöht die Anzahl der Netzwerke, die für die Indizierung von Belohnungen unterstützt werden, und es gibt viele Subgraphen, die nicht unterstützte Netzwerke indizieren, die ein Indexer verarbeiten möchte. Die Datei `config.toml` ermöglicht eine ausdrucksstarke und flexible Konfiguration von: +The Graph Protocol is increasing the number of networks supported for indexing rewards, and there exist many Subgraphs indexing unsupported networks which an indexer would like to process. The `config.toml` file allows for expressive and flexible configuration of: - Mehrere Netzwerke - Mehrere Anbieter pro Netzwerk (dies kann eine Aufteilung der Last auf Anbieter ermöglichen und kann auch die Konfiguration von vollständigen Knoten sowie Archivknoten ermöglichen, wobei Graph Node günstigere Anbieter bevorzugt, wenn eine bestimmte Arbeitslast dies zulässt). @@ -225,11 +225,11 @@ Benutzer, die ein skaliertes Indizierungs-Setup mit erweiterter Konfiguration be ### Managing Graph Node -Given a running Graph Node (or Graph Nodes!), the challenge is then to manage deployed subgraphs across those nodes. Graph Node surfaces a range of tools to help with managing subgraphs. +Given a running Graph Node (or Graph Nodes!), the challenge is then to manage deployed Subgraphs across those nodes. Graph Node surfaces a range of tools to help with managing Subgraphs. #### Protokollierung -Die Protokolle von Graph Node können nützliche Informationen für die Debuggen und Optimierung von Graph Node und bestimmten Subgraphen liefern. Graph Node unterstützt verschiedene Log-Ebenen über die Umgebungsvariable `GRAPH_LOG`, mit den folgenden Ebenen: Fehler, Warnung, Info, Debug oder Trace. +Graph Node's logs can provide useful information for debugging and optimisation of Graph Node and specific Subgraphs. Graph Node supports different log levels via the `GRAPH_LOG` environment variable, with the following levels: error, warn, info, debug or trace. Wenn Sie außerdem `GRAPH_LOG_QUERY_TIMING` auf `gql` setzen, erhalten Sie mehr Details darüber, wie GraphQL-Abfragen ausgeführt werden (allerdings wird dadurch eine große Menge an Protokollen erzeugt). @@ -247,11 +247,11 @@ Der Befehl graphman ist in den offiziellen Containern enthalten, und Sie können Eine vollständige Dokumentation der `graphman`-Befehle ist im Graph Node Repository verfügbar. Siehe [/docs/graphman.md] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) im Graph Node `/docs` -### Working with subgraphs +### Working with Subgraphs #### Indizierungsstatus-API -Available on port 8030/graphql by default, the indexing status API exposes a range of methods for checking indexing status for different subgraphs, checking proofs of indexing, inspecting subgraph features and more. +Available on port 8030/graphql by default, the indexing status API exposes a range of methods for checking indexing status for different Subgraphs, checking proofs of indexing, inspecting Subgraph features and more. Das vollständige Schema ist [hier] verfügbar (https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). @@ -263,7 +263,7 @@ There are three separate parts of the indexing process: - Processing events in order with the appropriate handlers (this can involve calling the chain for state, and fetching data from the store) - Writing the resulting data to the store -These stages are pipelined (i.e. they can be executed in parallel), but they are dependent on one another. Where subgraphs are slow to index, the underlying cause will depend on the specific subgraph. +These stages are pipelined (i.e. they can be executed in parallel), but they are dependent on one another. Where Subgraphs are slow to index, the underlying cause will depend on the specific Subgraph. Common causes of indexing slowness: @@ -276,24 +276,24 @@ Common causes of indexing slowness: - The provider itself falling behind the chain head - Slowness in fetching new receipts at the chain head from the provider -Subgraph indexing metrics can help diagnose the root cause of indexing slowness. In some cases, the problem lies with the subgraph itself, but in others, improved network providers, reduced database contention and other configuration improvements can markedly improve indexing performance. +Subgraph indexing metrics can help diagnose the root cause of indexing slowness. In some cases, the problem lies with the Subgraph itself, but in others, improved network providers, reduced database contention and other configuration improvements can markedly improve indexing performance. -#### Failed subgraphs +#### Failed Subgraphs -During indexing subgraphs might fail, if they encounter data that is unexpected, some component not working as expected, or if there is some bug in the event handlers or configuration. There are two general types of failure: +During indexing Subgraphs might fail, if they encounter data that is unexpected, some component not working as expected, or if there is some bug in the event handlers or configuration. There are two general types of failure: - Deterministic failures: these are failures which will not be resolved with retries - Non-deterministic failures: these might be down to issues with the provider, or some unexpected Graph Node error. When a non-deterministic failure occurs, Graph Node will retry the failing handlers, backing off over time. -In some cases a failure might be resolvable by the indexer (for example if the error is a result of not having the right kind of provider, adding the required provider will allow indexing to continue). However in others, a change in the subgraph code is required. +In some cases a failure might be resolvable by the indexer (for example if the error is a result of not having the right kind of provider, adding the required provider will allow indexing to continue). However in others, a change in the Subgraph code is required. -> Deterministische Fehler werden als „endgültig“ betrachtet, wobei für den fehlgeschlagenen Block ein Indizierungsnachweis generiert wird, während nicht-deterministische Fehler nicht als solche betrachtet werden, da es dem Subgraph gelingen kann, „auszufallen“ und die Indizierung fortzusetzen. In einigen Fällen ist das nicht-deterministische Label falsch und der Subgraph wird den Fehler nie überwinden; solche Fehler sollten als Probleme im Graph Node Repository gemeldet werden. +> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-deterministic failures are not, as the Subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the Subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. #### Block and call cache -Graph Node speichert bestimmte Daten im Zwischenspeicher, um ein erneutes Abrufen vom Anbieter zu vermeiden. Blöcke werden zwischengespeichert, ebenso wie die Ergebnisse von `eth_calls` (letztere werden ab einem bestimmten Block zwischengespeichert). Diese Zwischenspeicherung kann die Indizierungsgeschwindigkeit bei der „Neusynchronisierung“ eines geringfügig veränderten Subgraphen drastisch erhöhen. +Graph Node caches certain data in the store in order to save refetching from the provider. Blocks are cached, as are the results of `eth_calls` (the latter being cached as of a specific block). This caching can dramatically increase indexing speed during "resyncing" of a slightly altered Subgraph. -Wenn jedoch ein Ethereum-Knoten über einen bestimmten Zeitraum falsche Daten geliefert hat, können diese in den Cache gelangen und zu falschen Daten oder fehlgeschlagenen Subgraphen führen. In diesem Fall können Indexer `graphman` verwenden, um den vergifteten Cache zu löschen und dann die betroffenen Subgraphen zurückzuspulen, die dann frische Daten von dem (hoffentlich) gesunden Anbieter abrufen. +However, in some instances, if an Ethereum node has provided incorrect data for some period, that can make its way into the cache, leading to incorrect data or failed Subgraphs. In this case indexers can use `graphman` to clear the poisoned cache, and then rewind the affected Subgraphs, which will then fetch fresh data from the (hopefully) healthy provider. If a block cache inconsistency is suspected, such as a tx receipt missing event: @@ -304,7 +304,7 @@ If a block cache inconsistency is suspected, such as a tx receipt missing event: #### Querying issues and errors -Once a subgraph has been indexed, indexers can expect to serve queries via the subgraph's dedicated query endpoint. If the indexer is hoping to serve significant query volume, a dedicated query node is recommended, and in case of very high query volumes, indexers may want to configure replica shards so that queries don't impact the indexing process. +Once a Subgraph has been indexed, indexers can expect to serve queries via the Subgraph's dedicated query endpoint. If the indexer is hoping to serve significant query volume, a dedicated query node is recommended, and in case of very high query volumes, indexers may want to configure replica shards so that queries don't impact the indexing process. However, even with a dedicated query node and replicas, certain queries can take a long time to execute, and in some cases increase memory usage and negatively impact the query time for other users. @@ -316,7 +316,7 @@ Graph Node zwischenspeichert GraphQL-Abfragen standardmäßig, was die Datenbank ##### Analysing queries -Problematic queries most often surface in one of two ways. In some cases, users themselves report that a given query is slow. In that case the challenge is to diagnose the reason for the slowness - whether it is a general issue, or specific to that subgraph or query. And then of course to resolve it, if possible. +Problematic queries most often surface in one of two ways. In some cases, users themselves report that a given query is slow. In that case the challenge is to diagnose the reason for the slowness - whether it is a general issue, or specific to that Subgraph or query. And then of course to resolve it, if possible. In other cases, the trigger might be high memory usage on a query node, in which case the challenge is first to identify the query causing the issue. @@ -336,10 +336,10 @@ Im Allgemeinen sind Tabellen, bei denen die Anzahl der unterschiedlichen Entitä Sobald eine Tabelle als „kontoähnlich“ eingestuft wurde, wird durch die Ausführung von `graphman stats account-like .
` die kontoähnliche Optimierung für Abfragen auf diese Tabelle aktiviert. Die Optimierung kann mit `graphman stats account-like --clear .
` wieder ausgeschaltet werden. Es dauert bis zu 5 Minuten, bis die Abfrageknoten merken, dass die Optimierung ein- oder ausgeschaltet wurde. Nach dem Einschalten der Optimierung muss überprüft werden, ob die Abfragen für diese Tabelle durch die Änderung nicht tatsächlich langsamer werden. Wenn Sie Grafana für die Überwachung von Postgres konfiguriert haben, würden langsame Abfragen in `pg_stat_activity` in großer Zahl angezeigt werden und mehrere Sekunden dauern. In diesem Fall muss die Optimierung wieder abgeschaltet werden. -Bei Uniswap-ähnlichen Subgraphen sind die `pair`- und `token`-Tabellen die Hauptkandidaten für diese Optimierung und können die Datenbankauslastung erheblich beeinflussen. +For Uniswap-like Subgraphs, the `pair` and `token` tables are prime candidates for this optimization, and can have a dramatic effect on database load. -#### Removing subgraphs +#### Removing Subgraphs > This is new functionality, which will be available in Graph Node 0.29.x -Irgendwann möchte ein Indexer vielleicht einen bestimmten Subgraph entfernen. Das kann einfach mit `graphman drop` gemacht werden, das einen Einsatz und alle indizierten Daten löscht. Der Einsatz kann entweder als Subgraph-Name, als IPFS-Hash `Qm..` oder als Datenbank-Namensraum `sgdNNN` angegeben werden. Weitere Dokumentation ist [hier](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop) verfügbar. +At some point an indexer might want to remove a given Subgraph. This can be easily done via `graphman drop`, which deletes a deployment and all it's indexed data. The deployment can be specified as either a Subgraph name, an IPFS hash `Qm..`, or the database namespace `sgdNNN`. Further documentation is available [here](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). From 270a6487bdf635c4d0a66dd85f9b148cdd95516a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:09:48 -0500 Subject: [PATCH 0102/1789] New translations graph-node.mdx (Italian) --- .../pages/it/indexing/tooling/graph-node.mdx | 84 +++++++++---------- 1 file changed, 42 insertions(+), 42 deletions(-) diff --git a/website/src/pages/it/indexing/tooling/graph-node.mdx b/website/src/pages/it/indexing/tooling/graph-node.mdx index b77c651c0bd2..3fef49ce3bf5 100644 --- a/website/src/pages/it/indexing/tooling/graph-node.mdx +++ b/website/src/pages/it/indexing/tooling/graph-node.mdx @@ -2,31 +2,31 @@ title: Graph Node --- -Graph Node è il componente che indica i subgraph e rende i dati risultanti disponibili per l'interrogazione tramite API GraphQL. È quindi centrale per lo stack degli indexer, ed inoltre il corretto funzionamento di Graph Node è cruciale per il buon funzionamento di un indexer di successo. +Graph Node is the component which indexes Subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. This provides a contextual overview of Graph Node, and some of the more advanced options available to indexers. Detailed documentation and instructions can be found in the [Graph Node repository](https://github.com/graphprotocol/graph-node). ## Graph Node -[Graph Node](https://github.com/graphprotocol/graph-node) is the reference implementation for indexing Subgraphs on The Graph Network, connecting to blockchain clients, indexing subgraphs and making indexed data available to query. +[Graph Node](https://github.com/graphprotocol/graph-node) is the reference implementation for indexing Subgraphs on The Graph Network, connecting to blockchain clients, indexing Subgraphs and making indexed data available to query. Graph Node (and the whole indexer stack) can be run on bare metal, or in a cloud environment. This flexibility of the central indexing component is crucial to the robustness of The Graph Protocol. Similarly, Graph Node can be [built from source](https://github.com/graphprotocol/graph-node), or indexers can use one of the [provided Docker Images](https://hub.docker.com/r/graphprotocol/graph-node). ### Database PostgreSQL -È l'archivio principale del Graph Node, in cui vengono memorizzati i dati dei subgraph, i metadati sui subgraph e i dati di rete che non dipendono dal subgraph, come la cache dei blocchi e la cache eth_call. +The main store for the Graph Node, this is where Subgraph data is stored, as well as metadata about Subgraphs, and Subgraph-agnostic network data such as the block cache, and eth_call cache. ### Clienti della rete Per indicizzare una rete, Graph Node deve accedere a un cliente di rete tramite un'API JSON-RPC compatibile con EVM. Questo RPC può connettersi a un singolo cliente o può essere una configurazione più complessa che bilancia il carico su più clienti. -While some subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). +While some Subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically Subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and Subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). **Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an Indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). ### Nodi IPFS -I metadati di distribuzione del subgraph sono memorizzati sulla rete IPFS. The Graph Node accede principalmente al nodo IPFS durante la distribuzione del subgraph per recuperare il manifest del subgraph e tutti i file collegati. Gli indexer di rete non devono ospitare un proprio nodo IPFS. Un nodo IPFS per la rete è ospitato su https://ipfs.network.thegraph.com. +Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during Subgraph deployment to fetch the Subgraph manifest and all linked files. Network indexers do not need to host their own IPFS node. An IPFS node for the network is hosted at https://ipfs.network.thegraph.com. ### Server di metriche Prometheus @@ -77,19 +77,19 @@ A complete Kubernetes example configuration can be found in the [indexer reposit Quando è in funzione, Graph Node espone le seguenti porte: -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | -| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | -| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | -| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for Subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for Subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | > **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC endpoint. ## Configurazione avanzata del Graph Node -Nella sua forma più semplice, Graph Node può essere utilizzato con una singola istanza di Graph Node, un singolo database PostgreSQL, un nodo IPFS e i client di rete richiesti dai subgraph da indicizzare. +At its simplest, Graph Node can be operated with a single instance of Graph Node, a single PostgreSQL database, an IPFS node, and the network clients as required by the Subgraphs to be indexed. This setup can be scaled horizontally, by adding multiple Graph Nodes, and multiple databases to support those Graph Nodes. Advanced users may want to take advantage of some of the horizontal scaling capabilities of Graph Node, as well as some of the more advanced configuration options, via the `config.toml` file and Graph Node's environment variables. @@ -114,39 +114,39 @@ Full documentation of `config.toml` can be found in the [Graph Node docs](https: #### Graph Node multipli -Graph Node indexing can scale horizontally, running multiple instances of Graph Node to split indexing and querying across different nodes. This can be done simply by running Graph Nodes configured with a different `node_id` on startup (e.g. in the Docker Compose file), which can then be used in the `config.toml` file to specify [dedicated query nodes](#dedicated-query-nodes), [block ingestors](#dedicated-block-ingestion), and splitting subgraphs across nodes with [deployment rules](#deployment-rules). +Graph Node indexing can scale horizontally, running multiple instances of Graph Node to split indexing and querying across different nodes. This can be done simply by running Graph Nodes configured with a different `node_id` on startup (e.g. in the Docker Compose file), which can then be used in the `config.toml` file to specify [dedicated query nodes](#dedicated-query-nodes), [block ingestors](#dedicated-block-ingestion), and splitting Subgraphs across nodes with [deployment rules](#deployment-rules). > Si noti che più Graph Node possono essere configurati per utilizzare lo stesso database, che può essere scalato orizzontalmente tramite sharding. #### Regole di distribuzione -Given multiple Graph Nodes, it is necessary to manage deployment of new subgraphs so that the same subgraph isn't being indexed by two different nodes, which would lead to collisions. This can be done by using deployment rules, which can also specify which `shard` a subgraph's data should be stored in, if database sharding is being used. Deployment rules can match on the subgraph name and the network that the deployment is indexing in order to make a decision. +Given multiple Graph Nodes, it is necessary to manage deployment of new Subgraphs so that the same Subgraph isn't being indexed by two different nodes, which would lead to collisions. This can be done by using deployment rules, which can also specify which `shard` a Subgraph's data should be stored in, if database sharding is being used. Deployment rules can match on the Subgraph name and the network that the deployment is indexing in order to make a decision. Esempio di configurazione della regola di distribuzione: ```toml [deployment] [[deployment.rule]] -match = { name = "(vip|importante)/.*" } +match = { name = "(vip|important)/.*" } shard = "vip" indexers = [ "index_node_vip_0", "index_node_vip_1" ] [[deployment.rule]] match = { network = "kovan" } -# Nessun shard, quindi usiamo lo shard predefinito chiamato "primario". -indicizzatori = [ "index_node_kovan_0" ] +# No shard, so we use the default shard called 'primary' +indexers = [ "index_node_kovan_0" ] [[deployment.rule]] match = { network = [ "xdai", "poa-core" ] } indexers = [ "index_node_other_0" ] [[deployment.rule]] -# Non c'è nessun "match", quindi qualsiasi sottografo corrisponde -shard = [ "sharda", "shardb" ] -indicizzatori = [ +# There's no 'match', so any Subgraph matches +shards = [ "sharda", "shardb" ] +indexers = [ "index_node_community_0", "index_node_community_1", "index_node_community_2", "index_node_community_3", "index_node_community_4", - "indice_nodo_comunità_5" + "index_node_community_5" ] ``` @@ -167,11 +167,11 @@ Ogni nodo il cui --node-id corrisponde all'espressione regolare sarà impostato Per la maggior parte dei casi d'uso, un singolo database Postgres è sufficiente per supportare un'istanza del graph-node. Quando un'istanza del graph-node supera un singolo database Postgres, è possibile suddividere l'archiviazione dei dati del graph-node su più database Postgres. Tutti i database insieme formano lo store dell'istanza del graph-node. Ogni singolo database è chiamato shard. -Shards can be used to split subgraph deployments across multiple databases, and can also be used to use replicas to spread query load across databases. This includes configuring the number of available database connections each `graph-node` should keep in its connection pool for each database, which becomes increasingly important as more subgraphs are being indexed. +Shards can be used to split Subgraph deployments across multiple databases, and can also be used to use replicas to spread query load across databases. This includes configuring the number of available database connections each `graph-node` should keep in its connection pool for each database, which becomes increasingly important as more Subgraphs are being indexed. Lo sharding diventa utile quando il database esistente non riesce a reggere il carico che Graph Node gli impone e quando non è più possibile aumentare le dimensioni del database. -> In genere è meglio creare un singolo database il più grande possibile, prima di iniziare con gli shard. Un'eccezione è rappresentata dai casi in cui il traffico di query è suddiviso in modo molto disomogeneo tra i subgraph; in queste situazioni può essere di grande aiuto tenere i subgraph ad alto volume in uno shard e tutto il resto in un altro, perché questa configurazione rende più probabile che i dati per i subgraph ad alto volume rimangano nella cache interna del database e non vengano sostituiti da dati non necessari per i subgraph a basso volume. +> It is generally better make a single database as big as possible, before starting with shards. One exception is where query traffic is split very unevenly between Subgraphs; in those situations it can help dramatically if the high-volume Subgraphs are kept in one shard and everything else in another because that setup makes it more likely that the data for the high-volume Subgraphs stays in the db-internal cache and doesn't get replaced by data that's not needed as much from low-volume Subgraphs. Per quanto riguarda la configurazione delle connessioni, iniziare con max_connections in postgresql.conf impostato a 400 (o forse anche a 200) e osservare le metriche di Prometheus store_connection_wait_time_ms e store_connection_checkout_count. Tempi di attesa notevoli (qualsiasi cosa superiore a 5 ms) indicano che le connessioni disponibili sono troppo poche; tempi di attesa elevati possono anche essere causati da un database molto occupato (come un elevato carico della CPU). Tuttavia, se il database sembra altrimenti stabile, tempi di attesa elevati indicano la necessità di aumentare il numero di connessioni. Nella configurazione, il numero di connessioni che ogni istanza del graph-node può utilizzare è un limite massimo e Graph Node non manterrà aperte le connessioni se non ne ha bisogno. @@ -188,7 +188,7 @@ ingestor = "block_ingestor_node" #### Supporto di più reti -The Graph Protocol is increasing the number of networks supported for indexing rewards, and there exist many subgraphs indexing unsupported networks which an indexer would like to process. The `config.toml` file allows for expressive and flexible configuration of: +The Graph Protocol is increasing the number of networks supported for indexing rewards, and there exist many Subgraphs indexing unsupported networks which an indexer would like to process. The `config.toml` file allows for expressive and flexible configuration of: - Reti multiple - Fornitori multipli per rete (questo può consentire di suddividere il carico tra i fornitori e di configurare nodi completi e nodi di archivio, con Graph Node che preferisce i fornitori più economici se un determinato carico di lavoro lo consente). @@ -225,11 +225,11 @@ Gli utenti che gestiscono una configurazione di indicizzazione scalare con una c ### Gestione del Graph Node -Dato un Graph Node (o più Graph Nodes!) in funzione, la sfida consiste nel gestire i subgraph distribuiti tra i nodi. Graph Node offre una serie di strumenti che aiutano a gestire i subgraph. +Given a running Graph Node (or Graph Nodes!), the challenge is then to manage deployed Subgraphs across those nodes. Graph Node surfaces a range of tools to help with managing Subgraphs. #### Logging -Graph Node's logs can provide useful information for debugging and optimisation of Graph Node and specific subgraphs. Graph Node supports different log levels via the `GRAPH_LOG` environment variable, with the following levels: error, warn, info, debug or trace. +Graph Node's logs can provide useful information for debugging and optimisation of Graph Node and specific Subgraphs. Graph Node supports different log levels via the `GRAPH_LOG` environment variable, with the following levels: error, warn, info, debug or trace. In addition setting `GRAPH_LOG_QUERY_TIMING` to `gql` provides more details about how GraphQL queries are running (though this will generate a large volume of logs). @@ -247,11 +247,11 @@ The graphman command is included in the official containers, and you can docker Full documentation of `graphman` commands is available in the Graph Node repository. See [/docs/graphman.md] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) in the Graph Node `/docs` -### Lavorare con i subgraph +### Working with Subgraphs #### Stato dell'indicizzazione API -Disponibile sulla porta 8030/graphql per impostazione predefinita, l'API dello stato di indicizzazione espone una serie di metodi per verificare lo stato di indicizzazione di diversi subgraph, controllare le prove di indicizzazione, ispezionare le caratteristiche dei subgraph e altro ancora. +Available on port 8030/graphql by default, the indexing status API exposes a range of methods for checking indexing status for different Subgraphs, checking proofs of indexing, inspecting Subgraph features and more. The full schema is available [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). @@ -263,7 +263,7 @@ Il processo di indicizzazione si articola in tre parti distinte: - Elaborare gli eventi in ordine con i gestori appropriati (questo può comportare la chiamata alla chain per lo stato e il recupero dei dati dall'archivio) - Scrivere i dati risultanti nell'archivio -Questi stadi sono collegati tra loro (cioè possono essere eseguiti in parallelo), ma dipendono l'uno dall'altro. Se i subgraph sono lenti da indicizzare, la causa dipende dal subgraph specifico. +These stages are pipelined (i.e. they can be executed in parallel), but they are dependent on one another. Where Subgraphs are slow to index, the underlying cause will depend on the specific Subgraph. Cause comuni di lentezza dell'indicizzazione: @@ -276,24 +276,24 @@ Cause comuni di lentezza dell'indicizzazione: - Il fornitore stesso è in ritardo rispetto alla testa della chain - Lentezza nell'acquisizione di nuove ricevute dal fornitore alla testa della chain -Le metriche di indicizzazione dei subgraph possono aiutare a diagnosticare la causa principale della lentezza dell'indicizzazione. In alcuni casi, il problema risiede nel subgraph stesso, ma in altri, il miglioramento dei provider di rete, la riduzione della contesa del database e altri miglioramenti della configurazione possono migliorare notevolmente le prestazioni dell'indicizzazione. +Subgraph indexing metrics can help diagnose the root cause of indexing slowness. In some cases, the problem lies with the Subgraph itself, but in others, improved network providers, reduced database contention and other configuration improvements can markedly improve indexing performance. -#### I subgraph falliti +#### Failed Subgraphs -Durante l'indicizzazione, i subgraph possono fallire se incontrano dati inaspettati, se qualche componente non funziona come previsto o se c'è un bug nei gestori di eventi o nella configurazione. Esistono due tipi generali di errore: +During indexing Subgraphs might fail, if they encounter data that is unexpected, some component not working as expected, or if there is some bug in the event handlers or configuration. There are two general types of failure: - Guasti deterministici: si tratta di guasti che non possono essere risolti con tentativi di risposta - Fallimenti non deterministici: potrebbero essere dovuti a problemi con il provider o a qualche errore imprevisto di Graph Node. Quando si verifica un errore non deterministico, Graph Node riprova i gestori che non hanno funzionato, riducendo il tempo a disposizione. -In alcuni casi, un errore può essere risolto dall'indexer (ad esempio, se l'errore è dovuto alla mancanza del tipo di provider giusto, l'aggiunta del provider richiesto consentirà di continuare l'indicizzazione). In altri casi, invece, è necessario modificare il codice del subgraph. +In some cases a failure might be resolvable by the indexer (for example if the error is a result of not having the right kind of provider, adding the required provider will allow indexing to continue). However in others, a change in the Subgraph code is required. -> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-deterministic failures are not, as the subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. +> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-deterministic failures are not, as the Subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the Subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. #### Cache dei blocchi e delle chiamate -Graph Node caches certain data in the store in order to save refetching from the provider. Blocks are cached, as are the results of `eth_calls` (the latter being cached as of a specific block). This caching can dramatically increase indexing speed during "resyncing" of a slightly altered subgraph. +Graph Node caches certain data in the store in order to save refetching from the provider. Blocks are cached, as are the results of `eth_calls` (the latter being cached as of a specific block). This caching can dramatically increase indexing speed during "resyncing" of a slightly altered Subgraph. -However, in some instances, if an Ethereum node has provided incorrect data for some period, that can make its way into the cache, leading to incorrect data or failed subgraphs. In this case indexers can use `graphman` to clear the poisoned cache, and then rewind the affected subgraphs, which will then fetch fresh data from the (hopefully) healthy provider. +However, in some instances, if an Ethereum node has provided incorrect data for some period, that can make its way into the cache, leading to incorrect data or failed Subgraphs. In this case indexers can use `graphman` to clear the poisoned cache, and then rewind the affected Subgraphs, which will then fetch fresh data from the (hopefully) healthy provider. Se si sospetta un'incongruenza nella cache a blocchi, come ad esempio un evento di ricezione tx mancante: @@ -304,7 +304,7 @@ Se si sospetta un'incongruenza nella cache a blocchi, come ad esempio un evento #### Problemi ed errori di query -Una volta che un subgraph è stato indicizzato, gli indexer possono aspettarsi di servire le query attraverso l'endpoint di query dedicato al subgraph. Se l'indexer spera di servire un volume significativo di query, è consigliabile un nodo di query dedicato; in caso di volumi di query molto elevati, gli indexer potrebbero voler configurare shard di replica in modo che le query non abbiano un impatto sul processo di indicizzazione. +Once a Subgraph has been indexed, indexers can expect to serve queries via the Subgraph's dedicated query endpoint. If the indexer is hoping to serve significant query volume, a dedicated query node is recommended, and in case of very high query volumes, indexers may want to configure replica shards so that queries don't impact the indexing process. Tuttavia, anche con un nodo di query dedicato e le repliche, alcune query possono richiedere molto tempo per essere eseguite e, in alcuni casi, aumentare l'utilizzo della memoria e avere un impatto negativo sul tempo di query per gli altri utenti. @@ -316,7 +316,7 @@ Graph Node caches GraphQL queries by default, which can significantly reduce dat ##### Analisi delle query -Le query problematiche emergono spesso in due modi. In alcuni casi, sono gli stessi utenti a segnalare la lentezza di una determinata query. In questo caso, la sfida consiste nel diagnosticare la ragione della lentezza, sia che si tratti di un problema generale, sia che si tratti di un problema specifico di quel subgraph o di quella query. E poi, naturalmente, risolverlo, se possibile. +Problematic queries most often surface in one of two ways. In some cases, users themselves report that a given query is slow. In that case the challenge is to diagnose the reason for the slowness - whether it is a general issue, or specific to that Subgraph or query. And then of course to resolve it, if possible. In altri casi, il fattore scatenante potrebbe essere l'elevato utilizzo della memoria su un nodo di query, nel qual caso la sfida consiste nell'identificare la query che causa il problema. @@ -336,10 +336,10 @@ In general, tables where the number of distinct entities are less than 1% of the Once a table has been determined to be account-like, running `graphman stats account-like .
` will turn on the account-like optimization for queries against that table. The optimization can be turned off again with `graphman stats account-like --clear .
` It takes up to 5 minutes for query nodes to notice that the optimization has been turned on or off. After turning the optimization on, it is necessary to verify that the change does not in fact make queries slower for that table. If you have configured Grafana to monitor Postgres, slow queries would show up in `pg_stat_activity`in large numbers, taking several seconds. In that case, the optimization needs to be turned off again. -For Uniswap-like subgraphs, the `pair` and `token` tables are prime candidates for this optimization, and can have a dramatic effect on database load. +For Uniswap-like Subgraphs, the `pair` and `token` tables are prime candidates for this optimization, and can have a dramatic effect on database load. -#### Rimozione dei subgraph +#### Removing Subgraphs > Si tratta di una nuova funzionalità, che sarà disponibile in Graph Node 0.29.x -At some point an indexer might want to remove a given subgraph. This can be easily done via `graphman drop`, which deletes a deployment and all it's indexed data. The deployment can be specified as either a subgraph name, an IPFS hash `Qm..`, or the database namespace `sgdNNN`. Further documentation is available [here](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). +At some point an indexer might want to remove a given Subgraph. This can be easily done via `graphman drop`, which deletes a deployment and all it's indexed data. The deployment can be specified as either a Subgraph name, an IPFS hash `Qm..`, or the database namespace `sgdNNN`. Further documentation is available [here](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). From 7ea02e84b637153a4f6a0f1a576510b0a558197b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:09:49 -0500 Subject: [PATCH 0103/1789] New translations graph-node.mdx (Japanese) --- .../pages/ja/indexing/tooling/graph-node.mdx | 72 +++++++++---------- 1 file changed, 36 insertions(+), 36 deletions(-) diff --git a/website/src/pages/ja/indexing/tooling/graph-node.mdx b/website/src/pages/ja/indexing/tooling/graph-node.mdx index 604095157886..332b7fd79baf 100644 --- a/website/src/pages/ja/indexing/tooling/graph-node.mdx +++ b/website/src/pages/ja/indexing/tooling/graph-node.mdx @@ -2,31 +2,31 @@ title: グラフノード --- -グラフノードはサブグラフのインデックスを作成し、得られたデータをGraphQL API経由でクエリできるようにするコンポーネントです。そのため、インデクサースタックの中心的存在であり、グラフノードの正しい動作はインデクサーを成功させるために非常に重要です。 +Graph Node is the component which indexes Subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. This provides a contextual overview of Graph Node, and some of the more advanced options available to indexers. Detailed documentation and instructions can be found in the [Graph Node repository](https://github.com/graphprotocol/graph-node). ## グラフノード -[Graph Node](https://github.com/graphprotocol/graph-node) is the reference implementation for indexing Subgraphs on The Graph Network, connecting to blockchain clients, indexing subgraphs and making indexed data available to query. +[Graph Node](https://github.com/graphprotocol/graph-node) is the reference implementation for indexing Subgraphs on The Graph Network, connecting to blockchain clients, indexing Subgraphs and making indexed data available to query. Graph Node (and the whole indexer stack) can be run on bare metal, or in a cloud environment. This flexibility of the central indexing component is crucial to the robustness of The Graph Protocol. Similarly, Graph Node can be [built from source](https://github.com/graphprotocol/graph-node), or indexers can use one of the [provided Docker Images](https://hub.docker.com/r/graphprotocol/graph-node). ### PostgreSQLデータベース -グラフノードのメインストアで、サブグラフデータ、サブグラフに関するメタデータ、ブロックキャッシュやeth_callキャッシュなどのサブグラフに依存しないネットワークデータが格納されます。 +The main store for the Graph Node, this is where Subgraph data is stored, as well as metadata about Subgraphs, and Subgraph-agnostic network data such as the block cache, and eth_call cache. ### ネットワーククライアント ネットワークにインデックスを付けるために、グラフ ノードは EVM 互換の JSON-RPC API を介してネットワーク クライアントにアクセスする必要があります。この RPC は単一のクライアントに接続する場合もあれば、複数のクライアントに負荷を分散するより複雑なセットアップになる場合もあります。 -While some subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). +While some Subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically Subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and Subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). **Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an Indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). ### IPFSノード -IPFS ノード(バージョン 未満) - サブグラフのデプロイメタデータは IPFS ネットワーク上に保存されます。 グラフノードは、サブグラフのデプロイ時に主に IPFS ノードにアクセスし、サブグラフマニフェストと全てのリンクファイルを取得します。 ネットワーク・インデクサーは独自の IPFS ノードをホストする必要はありません。 ネットワーク用の IPFS ノードは、https://ipfs.network.thegraph.com でホストされています。 +Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during Subgraph deployment to fetch the Subgraph manifest and all linked files. Network indexers do not need to host their own IPFS node. An IPFS node for the network is hosted at https://ipfs.network.thegraph.com. ### Prometheus メトリクスサーバー @@ -77,19 +77,19 @@ A complete Kubernetes example configuration can be found in the [indexer reposit グラフノードは起動時に以下のポートを公開します。 -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | -| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | -| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | -| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for Subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for Subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | > **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC endpoint. ## グラフノードの高度な設定 -最も単純な場合、Graph Node は、Graph Node の単一のインスタンス、単一の PostgreSQL データベース、IPFS ノード、およびサブグラフのインデックス作成に必要なネットワーク クライアントで操作できます。 +At its simplest, Graph Node can be operated with a single instance of Graph Node, a single PostgreSQL database, an IPFS node, and the network clients as required by the Subgraphs to be indexed. This setup can be scaled horizontally, by adding multiple Graph Nodes, and multiple databases to support those Graph Nodes. Advanced users may want to take advantage of some of the horizontal scaling capabilities of Graph Node, as well as some of the more advanced configuration options, via the `config.toml` file and Graph Node's environment variables. @@ -114,13 +114,13 @@ Full documentation of `config.toml` can be found in the [Graph Node docs](https: #### 複数のグラフノード -Graph Node indexing can scale horizontally, running multiple instances of Graph Node to split indexing and querying across different nodes. This can be done simply by running Graph Nodes configured with a different `node_id` on startup (e.g. in the Docker Compose file), which can then be used in the `config.toml` file to specify [dedicated query nodes](#dedicated-query-nodes), [block ingestors](#dedicated-block-ingestion), and splitting subgraphs across nodes with [deployment rules](#deployment-rules). +Graph Node indexing can scale horizontally, running multiple instances of Graph Node to split indexing and querying across different nodes. This can be done simply by running Graph Nodes configured with a different `node_id` on startup (e.g. in the Docker Compose file), which can then be used in the `config.toml` file to specify [dedicated query nodes](#dedicated-query-nodes), [block ingestors](#dedicated-block-ingestion), and splitting Subgraphs across nodes with [deployment rules](#deployment-rules). > なお、複数のGraph Nodeはすべて同じデータベースを使用するように設定することができ、Shardingによって水平方向に拡張することができます。 #### デプロイメントルール -Given multiple Graph Nodes, it is necessary to manage deployment of new subgraphs so that the same subgraph isn't being indexed by two different nodes, which would lead to collisions. This can be done by using deployment rules, which can also specify which `shard` a subgraph's data should be stored in, if database sharding is being used. Deployment rules can match on the subgraph name and the network that the deployment is indexing in order to make a decision. +Given multiple Graph Nodes, it is necessary to manage deployment of new Subgraphs so that the same Subgraph isn't being indexed by two different nodes, which would lead to collisions. This can be done by using deployment rules, which can also specify which `shard` a Subgraph's data should be stored in, if database sharding is being used. Deployment rules can match on the Subgraph name and the network that the deployment is indexing in order to make a decision. デプロイメントルールの設定例: @@ -138,7 +138,7 @@ indexers = [ "index_node_kovan_0" ] match = { network = [ "xdai", "poa-core" ] } indexers = [ "index_node_other_0" ] [[deployment.rule]] -# There's no 'match', so any subgraph matches +# There's no 'match', so any Subgraph matches shards = [ "sharda", "shardb" ] indexers = [ "index_node_community_0", @@ -167,11 +167,11 @@ query = "" ほとんどの場合、1つのPostgresデータベースでグラフノードインスタンスをサポートするのに十分です。グラフノードインスタンスが1つのPostgresデータベースを使い切った場合、グラフノードデータを複数のPostgresデータベースに分割して保存することが可能です。全てのデータベースが一緒になってグラフノードインスタンスのストアを形成します。個々のデータベースはシャード(shard)と呼ばれます。 -Shards can be used to split subgraph deployments across multiple databases, and can also be used to use replicas to spread query load across databases. This includes configuring the number of available database connections each `graph-node` should keep in its connection pool for each database, which becomes increasingly important as more subgraphs are being indexed. +Shards can be used to split Subgraph deployments across multiple databases, and can also be used to use replicas to spread query load across databases. This includes configuring the number of available database connections each `graph-node` should keep in its connection pool for each database, which becomes increasingly important as more Subgraphs are being indexed. グラフノードの負荷に既存のデータベースが追いつかず、これ以上データベースサイズを大きくすることができない場合に、シャーディングが有効になります。 -> 一般的には、シャードを作成する前に、単一のデータベースを可能な限り大きくすることをお勧めします。例外は、クエリのトラフィックがサブグラフ間で非常に不均一に分割される場合です。このような状況では、ボリュームの大きいサブグラフを1つのシャードに、それ以外を別のシャードに保存すると劇的に効果があります。この設定により、ボリュームの大きいサブグラフのデータがdb内部キャッシュに残り、ボリュームの小さいサブグラフからそれほど必要とされていないデータに置き換えられる可能性が少なくなるためです。 +> It is generally better make a single database as big as possible, before starting with shards. One exception is where query traffic is split very unevenly between Subgraphs; in those situations it can help dramatically if the high-volume Subgraphs are kept in one shard and everything else in another because that setup makes it more likely that the data for the high-volume Subgraphs stays in the db-internal cache and doesn't get replaced by data that's not needed as much from low-volume Subgraphs. 接続の設定に関しては、まずpostgresql.confのmax_connectionsを400(あるいは200)に設定し、store_connection_wait_time_msとstore_connection_checkout_count Prometheusメトリクスを見てみてください。顕著な待ち時間(5ms以上)は、利用可能な接続が少なすぎることを示しています。高い待ち時間は、データベースが非常に忙しいこと(CPU負荷が高いなど)によっても引き起こされます。しかし、データベースが安定しているようであれば、待ち時間が長いのは接続数を増やす必要があることを示しています。設定上、各グラフノードインスタンスが使用できるコネクション数は上限であり、グラフノードは必要ないコネクションはオープンにしておきません。 @@ -188,7 +188,7 @@ ingestor = "block_ingestor_node" #### 複数のネットワークに対応 -The Graph Protocol is increasing the number of networks supported for indexing rewards, and there exist many subgraphs indexing unsupported networks which an indexer would like to process. The `config.toml` file allows for expressive and flexible configuration of: +The Graph Protocol is increasing the number of networks supported for indexing rewards, and there exist many Subgraphs indexing unsupported networks which an indexer would like to process. The `config.toml` file allows for expressive and flexible configuration of: - 複数のネットワーク - ネットワークごとに複数のプロバイダ(プロバイダ間で負荷を分割することができ、また、フルノードとアーカイブノードを構成することができ、作業負荷が許す限り、Graph Nodeはより安価なプロバイダを優先することができます)。 @@ -225,11 +225,11 @@ Graph Node supports a range of environment variables which can enable features, ### グラフノードの管理 -グラフノードが動作している場合、それらのノードに展開されたサブグラフを管理することが課題となります。グラフノードは、サブグラフを管理するための様々なツールを提供します。 +Given a running Graph Node (or Graph Nodes!), the challenge is then to manage deployed Subgraphs across those nodes. Graph Node surfaces a range of tools to help with managing Subgraphs. #### ロギング -Graph Node's logs can provide useful information for debugging and optimisation of Graph Node and specific subgraphs. Graph Node supports different log levels via the `GRAPH_LOG` environment variable, with the following levels: error, warn, info, debug or trace. +Graph Node's logs can provide useful information for debugging and optimisation of Graph Node and specific Subgraphs. Graph Node supports different log levels via the `GRAPH_LOG` environment variable, with the following levels: error, warn, info, debug or trace. In addition setting `GRAPH_LOG_QUERY_TIMING` to `gql` provides more details about how GraphQL queries are running (though this will generate a large volume of logs). @@ -247,11 +247,11 @@ The graphman command is included in the official containers, and you can docker Full documentation of `graphman` commands is available in the Graph Node repository. See [/docs/graphman.md] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) in the Graph Node `/docs` -### サブグラフの操作 +### Working with Subgraphs #### インデックスステータスAPI -デフォルトではポート8030/graphqlで利用可能なindexing status APIは、異なるサブグラフのindexing statusのチェック、indexing proofのチェック、サブグラフの特徴の検査など、様々なメソッドを公開しています。 +Available on port 8030/graphql by default, the indexing status API exposes a range of methods for checking indexing status for different Subgraphs, checking proofs of indexing, inspecting Subgraph features and more. The full schema is available [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). @@ -263,7 +263,7 @@ The full schema is available [here](https://github.com/graphprotocol/graph-node/ - 適切なハンドラで順番にイベントを処理する(これには、状態のためにチェーンを呼び出したり、ストアからデータを取得したりすることが含まれます)。 - 出来上がったデータをストアに書き込む -これらのステージはパイプライン化されていますが(つまり、並列に実行することができます)、互いに依存し合っています。サブグラフのインデックス作成に時間がかかる場合、その根本的な原因は、特定のサブグラフに依存します。 +These stages are pipelined (i.e. they can be executed in parallel), but they are dependent on one another. Where Subgraphs are slow to index, the underlying cause will depend on the specific Subgraph. インデックス作成が遅くなる一般的な原因: @@ -276,24 +276,24 @@ The full schema is available [here](https://github.com/graphprotocol/graph-node/ - プロバイダー自体がチェーンヘッドに遅れる場合 - チェーンヘッドでプロバイダーから新しいレシートを取得する際の遅延 -サブグラフのインデックス作成指標は、インデックス作成の遅さの根本的な原因を診断するのに役立ちます。あるケースでは、問題はサブグラフ自体にありますが、他のケースでは、ネットワークプロバイダーの改善、データベースの競合の減少、その他の構成の改善により、インデックス作成性能を著しく向上させることができます。 +Subgraph indexing metrics can help diagnose the root cause of indexing slowness. In some cases, the problem lies with the Subgraph itself, but in others, improved network providers, reduced database contention and other configuration improvements can markedly improve indexing performance. -#### 失敗したサブグラフ +#### Failed Subgraphs -インデックス作成中、サブグラフは予期しないデータに遭遇したり、あるコンポーネントが期待通りに動作しなかったり、イベントハンドラや設定に何らかのバグがあったりすると、失敗することがあります。失敗には一般に2つのタイプがあります。 +During indexing Subgraphs might fail, if they encounter data that is unexpected, some component not working as expected, or if there is some bug in the event handlers or configuration. There are two general types of failure: - 決定論的失敗:再試行では解決できない失敗 - 非決定論的失敗:プロバイダの問題や、予期しないグラフノードのエラーに起因する可能性があります。非決定論的失敗が発生すると、グラフノードは失敗したハンドラを再試行し、時間をかけて後退させます。 -いくつかのケースでは、失敗はインデクサーによって解決できるかもしれません(例えば、エラーが正しい種類のプロバイダを持っていない結果である場合、必要なプロバイダを追加することでインデックス作成を継続することが可能になります)。しかし、サブグラフのコードを変更する必要がある場合もあります。 +In some cases a failure might be resolvable by the indexer (for example if the error is a result of not having the right kind of provider, adding the required provider will allow indexing to continue). However in others, a change in the Subgraph code is required. -> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-deterministic failures are not, as the subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. +> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-deterministic failures are not, as the Subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the Subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. #### ブロックキャッシュとコールキャッシュ -Graph Node caches certain data in the store in order to save refetching from the provider. Blocks are cached, as are the results of `eth_calls` (the latter being cached as of a specific block). This caching can dramatically increase indexing speed during "resyncing" of a slightly altered subgraph. +Graph Node caches certain data in the store in order to save refetching from the provider. Blocks are cached, as are the results of `eth_calls` (the latter being cached as of a specific block). This caching can dramatically increase indexing speed during "resyncing" of a slightly altered Subgraph. -However, in some instances, if an Ethereum node has provided incorrect data for some period, that can make its way into the cache, leading to incorrect data or failed subgraphs. In this case indexers can use `graphman` to clear the poisoned cache, and then rewind the affected subgraphs, which will then fetch fresh data from the (hopefully) healthy provider. +However, in some instances, if an Ethereum node has provided incorrect data for some period, that can make its way into the cache, leading to incorrect data or failed Subgraphs. In this case indexers can use `graphman` to clear the poisoned cache, and then rewind the affected Subgraphs, which will then fetch fresh data from the (hopefully) healthy provider. TX受信欠落イベントなど、ブロックキャッシュの不整合が疑われる場合。 @@ -304,7 +304,7 @@ TX受信欠落イベントなど、ブロックキャッシュの不整合が疑 #### 問題やエラーのクエリ -サブグラフがインデックス化されると、インデクサはサブグラフの専用クエリエントポイントを介してクエリを提供することが期待できます。もしインデクサがかなりの量のクエリを提供することを望むなら、専用のクエリノードを推奨します。また、クエリ量が非常に多い場合、インデクサーはレプリカシャードを構成して、クエリがインデックス作成プロセスに影響を与えないようにしたいと思うかもしれません。 +Once a Subgraph has been indexed, indexers can expect to serve queries via the Subgraph's dedicated query endpoint. If the indexer is hoping to serve significant query volume, a dedicated query node is recommended, and in case of very high query volumes, indexers may want to configure replica shards so that queries don't impact the indexing process. ただし、専用のクエリ ノードとレプリカを使用しても、特定のクエリの実行に時間がかかる場合があり、場合によってはメモリ使用量が増加し、他のユーザーのクエリ時間に悪影響を及ぼします。 @@ -316,7 +316,7 @@ Graph Node caches GraphQL queries by default, which can significantly reduce dat ##### クエリの分析 -問題のあるクエリが表面化するのは、ほとんどの場合、次の2つの方法のどちらかです。あるケースでは、ユーザー自身があるクエリが遅いと報告します。この場合、一般的な問題なのか、そのサブグラフやクエリに固有の問題なのか、遅さの理由を診断することが課題となります。そしてもちろん、可能であればそれを解決することです。 +Problematic queries most often surface in one of two ways. In some cases, users themselves report that a given query is slow. In that case the challenge is to diagnose the reason for the slowness - whether it is a general issue, or specific to that Subgraph or query. And then of course to resolve it, if possible. また、クエリノードでメモリ使用量が多いことが引き金になる場合もあり、その場合は、まず問題の原因となっているクエリを特定することが課題となります。 @@ -336,10 +336,10 @@ In general, tables where the number of distinct entities are less than 1% of the Once a table has been determined to be account-like, running `graphman stats account-like .
` will turn on the account-like optimization for queries against that table. The optimization can be turned off again with `graphman stats account-like --clear .
` It takes up to 5 minutes for query nodes to notice that the optimization has been turned on or off. After turning the optimization on, it is necessary to verify that the change does not in fact make queries slower for that table. If you have configured Grafana to monitor Postgres, slow queries would show up in `pg_stat_activity`in large numbers, taking several seconds. In that case, the optimization needs to be turned off again. -For Uniswap-like subgraphs, the `pair` and `token` tables are prime candidates for this optimization, and can have a dramatic effect on database load. +For Uniswap-like Subgraphs, the `pair` and `token` tables are prime candidates for this optimization, and can have a dramatic effect on database load. -#### サブグラフの削除 +#### Removing Subgraphs > これは新しい機能で、Graph Node 0.29.xで利用可能になる予定です。 -At some point an indexer might want to remove a given subgraph. This can be easily done via `graphman drop`, which deletes a deployment and all it's indexed data. The deployment can be specified as either a subgraph name, an IPFS hash `Qm..`, or the database namespace `sgdNNN`. Further documentation is available [here](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). +At some point an indexer might want to remove a given Subgraph. This can be easily done via `graphman drop`, which deletes a deployment and all it's indexed data. The deployment can be specified as either a Subgraph name, an IPFS hash `Qm..`, or the database namespace `sgdNNN`. Further documentation is available [here](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). From 7c543873cb81f8af9958b2654725b3a24730610b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:09:50 -0500 Subject: [PATCH 0104/1789] New translations graph-node.mdx (Korean) --- .../pages/ko/indexing/tooling/graph-node.mdx | 72 +++++++++---------- 1 file changed, 36 insertions(+), 36 deletions(-) diff --git a/website/src/pages/ko/indexing/tooling/graph-node.mdx b/website/src/pages/ko/indexing/tooling/graph-node.mdx index 0250f14a3d08..f5778789213d 100644 --- a/website/src/pages/ko/indexing/tooling/graph-node.mdx +++ b/website/src/pages/ko/indexing/tooling/graph-node.mdx @@ -2,31 +2,31 @@ title: Graph Node --- -Graph Node is the component which indexes subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. +Graph Node is the component which indexes Subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. This provides a contextual overview of Graph Node, and some of the more advanced options available to indexers. Detailed documentation and instructions can be found in the [Graph Node repository](https://github.com/graphprotocol/graph-node). ## Graph Node -[Graph Node](https://github.com/graphprotocol/graph-node) is the reference implementation for indexing Subgraphs on The Graph Network, connecting to blockchain clients, indexing subgraphs and making indexed data available to query. +[Graph Node](https://github.com/graphprotocol/graph-node) is the reference implementation for indexing Subgraphs on The Graph Network, connecting to blockchain clients, indexing Subgraphs and making indexed data available to query. Graph Node (and the whole indexer stack) can be run on bare metal, or in a cloud environment. This flexibility of the central indexing component is crucial to the robustness of The Graph Protocol. Similarly, Graph Node can be [built from source](https://github.com/graphprotocol/graph-node), or indexers can use one of the [provided Docker Images](https://hub.docker.com/r/graphprotocol/graph-node). ### PostgreSQL database -The main store for the Graph Node, this is where subgraph data is stored, as well as metadata about subgraphs, and subgraph-agnostic network data such as the block cache, and eth_call cache. +The main store for the Graph Node, this is where Subgraph data is stored, as well as metadata about Subgraphs, and Subgraph-agnostic network data such as the block cache, and eth_call cache. ### Network clients In order to index a network, Graph Node needs access to a network client via an EVM-compatible JSON-RPC API. This RPC may connect to a single client or it could be a more complex setup that load balances across multiple. -While some subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). +While some Subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically Subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and Subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). **Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an Indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). ### IPFS Nodes -Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network indexers do not need to host their own IPFS node. An IPFS node for the network is hosted at https://ipfs.network.thegraph.com. +Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during Subgraph deployment to fetch the Subgraph manifest and all linked files. Network indexers do not need to host their own IPFS node. An IPFS node for the network is hosted at https://ipfs.network.thegraph.com. ### Prometheus metrics server @@ -77,19 +77,19 @@ A complete Kubernetes example configuration can be found in the [indexer reposit When it is running Graph Node exposes the following ports: -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | -| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | -| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | -| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for Subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for Subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | > **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC endpoint. ## Advanced Graph Node configuration -At its simplest, Graph Node can be operated with a single instance of Graph Node, a single PostgreSQL database, an IPFS node, and the network clients as required by the subgraphs to be indexed. +At its simplest, Graph Node can be operated with a single instance of Graph Node, a single PostgreSQL database, an IPFS node, and the network clients as required by the Subgraphs to be indexed. This setup can be scaled horizontally, by adding multiple Graph Nodes, and multiple databases to support those Graph Nodes. Advanced users may want to take advantage of some of the horizontal scaling capabilities of Graph Node, as well as some of the more advanced configuration options, via the `config.toml` file and Graph Node's environment variables. @@ -114,13 +114,13 @@ Full documentation of `config.toml` can be found in the [Graph Node docs](https: #### Multiple Graph Nodes -Graph Node indexing can scale horizontally, running multiple instances of Graph Node to split indexing and querying across different nodes. This can be done simply by running Graph Nodes configured with a different `node_id` on startup (e.g. in the Docker Compose file), which can then be used in the `config.toml` file to specify [dedicated query nodes](#dedicated-query-nodes), [block ingestors](#dedicated-block-ingestion), and splitting subgraphs across nodes with [deployment rules](#deployment-rules). +Graph Node indexing can scale horizontally, running multiple instances of Graph Node to split indexing and querying across different nodes. This can be done simply by running Graph Nodes configured with a different `node_id` on startup (e.g. in the Docker Compose file), which can then be used in the `config.toml` file to specify [dedicated query nodes](#dedicated-query-nodes), [block ingestors](#dedicated-block-ingestion), and splitting Subgraphs across nodes with [deployment rules](#deployment-rules). > Note that multiple Graph Nodes can all be configured to use the same database, which itself can be horizontally scaled via sharding. #### Deployment rules -Given multiple Graph Nodes, it is necessary to manage deployment of new subgraphs so that the same subgraph isn't being indexed by two different nodes, which would lead to collisions. This can be done by using deployment rules, which can also specify which `shard` a subgraph's data should be stored in, if database sharding is being used. Deployment rules can match on the subgraph name and the network that the deployment is indexing in order to make a decision. +Given multiple Graph Nodes, it is necessary to manage deployment of new Subgraphs so that the same Subgraph isn't being indexed by two different nodes, which would lead to collisions. This can be done by using deployment rules, which can also specify which `shard` a Subgraph's data should be stored in, if database sharding is being used. Deployment rules can match on the Subgraph name and the network that the deployment is indexing in order to make a decision. Example deployment rule configuration: @@ -138,7 +138,7 @@ indexers = [ "index_node_kovan_0" ] match = { network = [ "xdai", "poa-core" ] } indexers = [ "index_node_other_0" ] [[deployment.rule]] -# There's no 'match', so any subgraph matches +# There's no 'match', so any Subgraph matches shards = [ "sharda", "shardb" ] indexers = [ "index_node_community_0", @@ -167,11 +167,11 @@ Any node whose --node-id matches the regular expression will be set up to only r For most use cases, a single Postgres database is sufficient to support a graph-node instance. When a graph-node instance outgrows a single Postgres database, it is possible to split the storage of graph-node's data across multiple Postgres databases. All databases together form the store of the graph-node instance. Each individual database is called a shard. -Shards can be used to split subgraph deployments across multiple databases, and can also be used to use replicas to spread query load across databases. This includes configuring the number of available database connections each `graph-node` should keep in its connection pool for each database, which becomes increasingly important as more subgraphs are being indexed. +Shards can be used to split Subgraph deployments across multiple databases, and can also be used to use replicas to spread query load across databases. This includes configuring the number of available database connections each `graph-node` should keep in its connection pool for each database, which becomes increasingly important as more Subgraphs are being indexed. Sharding becomes useful when your existing database can't keep up with the load that Graph Node puts on it, and when it's not possible to increase the database size anymore. -> It is generally better make a single database as big as possible, before starting with shards. One exception is where query traffic is split very unevenly between subgraphs; in those situations it can help dramatically if the high-volume subgraphs are kept in one shard and everything else in another because that setup makes it more likely that the data for the high-volume subgraphs stays in the db-internal cache and doesn't get replaced by data that's not needed as much from low-volume subgraphs. +> It is generally better make a single database as big as possible, before starting with shards. One exception is where query traffic is split very unevenly between Subgraphs; in those situations it can help dramatically if the high-volume Subgraphs are kept in one shard and everything else in another because that setup makes it more likely that the data for the high-volume Subgraphs stays in the db-internal cache and doesn't get replaced by data that's not needed as much from low-volume Subgraphs. In terms of configuring connections, start with max_connections in postgresql.conf set to 400 (or maybe even 200) and look at the store_connection_wait_time_ms and store_connection_checkout_count Prometheus metrics. Noticeable wait times (anything above 5ms) is an indication that there are too few connections available; high wait times there will also be caused by the database being very busy (like high CPU load). However if the database seems otherwise stable, high wait times indicate a need to increase the number of connections. In the configuration, how many connections each graph-node instance can use is an upper limit, and Graph Node will not keep connections open if it doesn't need them. @@ -188,7 +188,7 @@ ingestor = "block_ingestor_node" #### Supporting multiple networks -The Graph Protocol is increasing the number of networks supported for indexing rewards, and there exist many subgraphs indexing unsupported networks which an indexer would like to process. The `config.toml` file allows for expressive and flexible configuration of: +The Graph Protocol is increasing the number of networks supported for indexing rewards, and there exist many Subgraphs indexing unsupported networks which an indexer would like to process. The `config.toml` file allows for expressive and flexible configuration of: - Multiple networks - Multiple providers per network (this can allow splitting of load across providers, and can also allow for configuration of full nodes as well as archive nodes, with Graph Node preferring cheaper providers if a given workload allows). @@ -225,11 +225,11 @@ Users who are operating a scaled indexing setup with advanced configuration may ### Managing Graph Node -Given a running Graph Node (or Graph Nodes!), the challenge is then to manage deployed subgraphs across those nodes. Graph Node surfaces a range of tools to help with managing subgraphs. +Given a running Graph Node (or Graph Nodes!), the challenge is then to manage deployed Subgraphs across those nodes. Graph Node surfaces a range of tools to help with managing Subgraphs. #### Logging -Graph Node's logs can provide useful information for debugging and optimisation of Graph Node and specific subgraphs. Graph Node supports different log levels via the `GRAPH_LOG` environment variable, with the following levels: error, warn, info, debug or trace. +Graph Node's logs can provide useful information for debugging and optimisation of Graph Node and specific Subgraphs. Graph Node supports different log levels via the `GRAPH_LOG` environment variable, with the following levels: error, warn, info, debug or trace. In addition setting `GRAPH_LOG_QUERY_TIMING` to `gql` provides more details about how GraphQL queries are running (though this will generate a large volume of logs). @@ -247,11 +247,11 @@ The graphman command is included in the official containers, and you can docker Full documentation of `graphman` commands is available in the Graph Node repository. See [/docs/graphman.md] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) in the Graph Node `/docs` -### Working with subgraphs +### Working with Subgraphs #### Indexing status API -Available on port 8030/graphql by default, the indexing status API exposes a range of methods for checking indexing status for different subgraphs, checking proofs of indexing, inspecting subgraph features and more. +Available on port 8030/graphql by default, the indexing status API exposes a range of methods for checking indexing status for different Subgraphs, checking proofs of indexing, inspecting Subgraph features and more. The full schema is available [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). @@ -263,7 +263,7 @@ There are three separate parts of the indexing process: - Processing events in order with the appropriate handlers (this can involve calling the chain for state, and fetching data from the store) - Writing the resulting data to the store -These stages are pipelined (i.e. they can be executed in parallel), but they are dependent on one another. Where subgraphs are slow to index, the underlying cause will depend on the specific subgraph. +These stages are pipelined (i.e. they can be executed in parallel), but they are dependent on one another. Where Subgraphs are slow to index, the underlying cause will depend on the specific Subgraph. Common causes of indexing slowness: @@ -276,24 +276,24 @@ Common causes of indexing slowness: - The provider itself falling behind the chain head - Slowness in fetching new receipts at the chain head from the provider -Subgraph indexing metrics can help diagnose the root cause of indexing slowness. In some cases, the problem lies with the subgraph itself, but in others, improved network providers, reduced database contention and other configuration improvements can markedly improve indexing performance. +Subgraph indexing metrics can help diagnose the root cause of indexing slowness. In some cases, the problem lies with the Subgraph itself, but in others, improved network providers, reduced database contention and other configuration improvements can markedly improve indexing performance. -#### Failed subgraphs +#### Failed Subgraphs -During indexing subgraphs might fail, if they encounter data that is unexpected, some component not working as expected, or if there is some bug in the event handlers or configuration. There are two general types of failure: +During indexing Subgraphs might fail, if they encounter data that is unexpected, some component not working as expected, or if there is some bug in the event handlers or configuration. There are two general types of failure: - Deterministic failures: these are failures which will not be resolved with retries - Non-deterministic failures: these might be down to issues with the provider, or some unexpected Graph Node error. When a non-deterministic failure occurs, Graph Node will retry the failing handlers, backing off over time. -In some cases a failure might be resolvable by the indexer (for example if the error is a result of not having the right kind of provider, adding the required provider will allow indexing to continue). However in others, a change in the subgraph code is required. +In some cases a failure might be resolvable by the indexer (for example if the error is a result of not having the right kind of provider, adding the required provider will allow indexing to continue). However in others, a change in the Subgraph code is required. -> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-deterministic failures are not, as the subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. +> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-deterministic failures are not, as the Subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the Subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. #### Block and call cache -Graph Node caches certain data in the store in order to save refetching from the provider. Blocks are cached, as are the results of `eth_calls` (the latter being cached as of a specific block). This caching can dramatically increase indexing speed during "resyncing" of a slightly altered subgraph. +Graph Node caches certain data in the store in order to save refetching from the provider. Blocks are cached, as are the results of `eth_calls` (the latter being cached as of a specific block). This caching can dramatically increase indexing speed during "resyncing" of a slightly altered Subgraph. -However, in some instances, if an Ethereum node has provided incorrect data for some period, that can make its way into the cache, leading to incorrect data or failed subgraphs. In this case indexers can use `graphman` to clear the poisoned cache, and then rewind the affected subgraphs, which will then fetch fresh data from the (hopefully) healthy provider. +However, in some instances, if an Ethereum node has provided incorrect data for some period, that can make its way into the cache, leading to incorrect data or failed Subgraphs. In this case indexers can use `graphman` to clear the poisoned cache, and then rewind the affected Subgraphs, which will then fetch fresh data from the (hopefully) healthy provider. If a block cache inconsistency is suspected, such as a tx receipt missing event: @@ -304,7 +304,7 @@ If a block cache inconsistency is suspected, such as a tx receipt missing event: #### Querying issues and errors -Once a subgraph has been indexed, indexers can expect to serve queries via the subgraph's dedicated query endpoint. If the indexer is hoping to serve significant query volume, a dedicated query node is recommended, and in case of very high query volumes, indexers may want to configure replica shards so that queries don't impact the indexing process. +Once a Subgraph has been indexed, indexers can expect to serve queries via the Subgraph's dedicated query endpoint. If the indexer is hoping to serve significant query volume, a dedicated query node is recommended, and in case of very high query volumes, indexers may want to configure replica shards so that queries don't impact the indexing process. However, even with a dedicated query node and replicas, certain queries can take a long time to execute, and in some cases increase memory usage and negatively impact the query time for other users. @@ -316,7 +316,7 @@ Graph Node caches GraphQL queries by default, which can significantly reduce dat ##### Analysing queries -Problematic queries most often surface in one of two ways. In some cases, users themselves report that a given query is slow. In that case the challenge is to diagnose the reason for the slowness - whether it is a general issue, or specific to that subgraph or query. And then of course to resolve it, if possible. +Problematic queries most often surface in one of two ways. In some cases, users themselves report that a given query is slow. In that case the challenge is to diagnose the reason for the slowness - whether it is a general issue, or specific to that Subgraph or query. And then of course to resolve it, if possible. In other cases, the trigger might be high memory usage on a query node, in which case the challenge is first to identify the query causing the issue. @@ -336,10 +336,10 @@ In general, tables where the number of distinct entities are less than 1% of the Once a table has been determined to be account-like, running `graphman stats account-like .
` will turn on the account-like optimization for queries against that table. The optimization can be turned off again with `graphman stats account-like --clear .
` It takes up to 5 minutes for query nodes to notice that the optimization has been turned on or off. After turning the optimization on, it is necessary to verify that the change does not in fact make queries slower for that table. If you have configured Grafana to monitor Postgres, slow queries would show up in `pg_stat_activity`in large numbers, taking several seconds. In that case, the optimization needs to be turned off again. -For Uniswap-like subgraphs, the `pair` and `token` tables are prime candidates for this optimization, and can have a dramatic effect on database load. +For Uniswap-like Subgraphs, the `pair` and `token` tables are prime candidates for this optimization, and can have a dramatic effect on database load. -#### Removing subgraphs +#### Removing Subgraphs > This is new functionality, which will be available in Graph Node 0.29.x -At some point an indexer might want to remove a given subgraph. This can be easily done via `graphman drop`, which deletes a deployment and all it's indexed data. The deployment can be specified as either a subgraph name, an IPFS hash `Qm..`, or the database namespace `sgdNNN`. Further documentation is available [here](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). +At some point an indexer might want to remove a given Subgraph. This can be easily done via `graphman drop`, which deletes a deployment and all it's indexed data. The deployment can be specified as either a Subgraph name, an IPFS hash `Qm..`, or the database namespace `sgdNNN`. Further documentation is available [here](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). From dbeb1d2019b94cc94c11f596a5daeacc6139bc16 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:09:51 -0500 Subject: [PATCH 0105/1789] New translations graph-node.mdx (Dutch) --- .../pages/nl/indexing/tooling/graph-node.mdx | 72 +++++++++---------- 1 file changed, 36 insertions(+), 36 deletions(-) diff --git a/website/src/pages/nl/indexing/tooling/graph-node.mdx b/website/src/pages/nl/indexing/tooling/graph-node.mdx index 0250f14a3d08..f5778789213d 100644 --- a/website/src/pages/nl/indexing/tooling/graph-node.mdx +++ b/website/src/pages/nl/indexing/tooling/graph-node.mdx @@ -2,31 +2,31 @@ title: Graph Node --- -Graph Node is the component which indexes subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. +Graph Node is the component which indexes Subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. This provides a contextual overview of Graph Node, and some of the more advanced options available to indexers. Detailed documentation and instructions can be found in the [Graph Node repository](https://github.com/graphprotocol/graph-node). ## Graph Node -[Graph Node](https://github.com/graphprotocol/graph-node) is the reference implementation for indexing Subgraphs on The Graph Network, connecting to blockchain clients, indexing subgraphs and making indexed data available to query. +[Graph Node](https://github.com/graphprotocol/graph-node) is the reference implementation for indexing Subgraphs on The Graph Network, connecting to blockchain clients, indexing Subgraphs and making indexed data available to query. Graph Node (and the whole indexer stack) can be run on bare metal, or in a cloud environment. This flexibility of the central indexing component is crucial to the robustness of The Graph Protocol. Similarly, Graph Node can be [built from source](https://github.com/graphprotocol/graph-node), or indexers can use one of the [provided Docker Images](https://hub.docker.com/r/graphprotocol/graph-node). ### PostgreSQL database -The main store for the Graph Node, this is where subgraph data is stored, as well as metadata about subgraphs, and subgraph-agnostic network data such as the block cache, and eth_call cache. +The main store for the Graph Node, this is where Subgraph data is stored, as well as metadata about Subgraphs, and Subgraph-agnostic network data such as the block cache, and eth_call cache. ### Network clients In order to index a network, Graph Node needs access to a network client via an EVM-compatible JSON-RPC API. This RPC may connect to a single client or it could be a more complex setup that load balances across multiple. -While some subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). +While some Subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically Subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and Subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). **Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an Indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). ### IPFS Nodes -Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network indexers do not need to host their own IPFS node. An IPFS node for the network is hosted at https://ipfs.network.thegraph.com. +Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during Subgraph deployment to fetch the Subgraph manifest and all linked files. Network indexers do not need to host their own IPFS node. An IPFS node for the network is hosted at https://ipfs.network.thegraph.com. ### Prometheus metrics server @@ -77,19 +77,19 @@ A complete Kubernetes example configuration can be found in the [indexer reposit When it is running Graph Node exposes the following ports: -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | -| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | -| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | -| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for Subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for Subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | > **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC endpoint. ## Advanced Graph Node configuration -At its simplest, Graph Node can be operated with a single instance of Graph Node, a single PostgreSQL database, an IPFS node, and the network clients as required by the subgraphs to be indexed. +At its simplest, Graph Node can be operated with a single instance of Graph Node, a single PostgreSQL database, an IPFS node, and the network clients as required by the Subgraphs to be indexed. This setup can be scaled horizontally, by adding multiple Graph Nodes, and multiple databases to support those Graph Nodes. Advanced users may want to take advantage of some of the horizontal scaling capabilities of Graph Node, as well as some of the more advanced configuration options, via the `config.toml` file and Graph Node's environment variables. @@ -114,13 +114,13 @@ Full documentation of `config.toml` can be found in the [Graph Node docs](https: #### Multiple Graph Nodes -Graph Node indexing can scale horizontally, running multiple instances of Graph Node to split indexing and querying across different nodes. This can be done simply by running Graph Nodes configured with a different `node_id` on startup (e.g. in the Docker Compose file), which can then be used in the `config.toml` file to specify [dedicated query nodes](#dedicated-query-nodes), [block ingestors](#dedicated-block-ingestion), and splitting subgraphs across nodes with [deployment rules](#deployment-rules). +Graph Node indexing can scale horizontally, running multiple instances of Graph Node to split indexing and querying across different nodes. This can be done simply by running Graph Nodes configured with a different `node_id` on startup (e.g. in the Docker Compose file), which can then be used in the `config.toml` file to specify [dedicated query nodes](#dedicated-query-nodes), [block ingestors](#dedicated-block-ingestion), and splitting Subgraphs across nodes with [deployment rules](#deployment-rules). > Note that multiple Graph Nodes can all be configured to use the same database, which itself can be horizontally scaled via sharding. #### Deployment rules -Given multiple Graph Nodes, it is necessary to manage deployment of new subgraphs so that the same subgraph isn't being indexed by two different nodes, which would lead to collisions. This can be done by using deployment rules, which can also specify which `shard` a subgraph's data should be stored in, if database sharding is being used. Deployment rules can match on the subgraph name and the network that the deployment is indexing in order to make a decision. +Given multiple Graph Nodes, it is necessary to manage deployment of new Subgraphs so that the same Subgraph isn't being indexed by two different nodes, which would lead to collisions. This can be done by using deployment rules, which can also specify which `shard` a Subgraph's data should be stored in, if database sharding is being used. Deployment rules can match on the Subgraph name and the network that the deployment is indexing in order to make a decision. Example deployment rule configuration: @@ -138,7 +138,7 @@ indexers = [ "index_node_kovan_0" ] match = { network = [ "xdai", "poa-core" ] } indexers = [ "index_node_other_0" ] [[deployment.rule]] -# There's no 'match', so any subgraph matches +# There's no 'match', so any Subgraph matches shards = [ "sharda", "shardb" ] indexers = [ "index_node_community_0", @@ -167,11 +167,11 @@ Any node whose --node-id matches the regular expression will be set up to only r For most use cases, a single Postgres database is sufficient to support a graph-node instance. When a graph-node instance outgrows a single Postgres database, it is possible to split the storage of graph-node's data across multiple Postgres databases. All databases together form the store of the graph-node instance. Each individual database is called a shard. -Shards can be used to split subgraph deployments across multiple databases, and can also be used to use replicas to spread query load across databases. This includes configuring the number of available database connections each `graph-node` should keep in its connection pool for each database, which becomes increasingly important as more subgraphs are being indexed. +Shards can be used to split Subgraph deployments across multiple databases, and can also be used to use replicas to spread query load across databases. This includes configuring the number of available database connections each `graph-node` should keep in its connection pool for each database, which becomes increasingly important as more Subgraphs are being indexed. Sharding becomes useful when your existing database can't keep up with the load that Graph Node puts on it, and when it's not possible to increase the database size anymore. -> It is generally better make a single database as big as possible, before starting with shards. One exception is where query traffic is split very unevenly between subgraphs; in those situations it can help dramatically if the high-volume subgraphs are kept in one shard and everything else in another because that setup makes it more likely that the data for the high-volume subgraphs stays in the db-internal cache and doesn't get replaced by data that's not needed as much from low-volume subgraphs. +> It is generally better make a single database as big as possible, before starting with shards. One exception is where query traffic is split very unevenly between Subgraphs; in those situations it can help dramatically if the high-volume Subgraphs are kept in one shard and everything else in another because that setup makes it more likely that the data for the high-volume Subgraphs stays in the db-internal cache and doesn't get replaced by data that's not needed as much from low-volume Subgraphs. In terms of configuring connections, start with max_connections in postgresql.conf set to 400 (or maybe even 200) and look at the store_connection_wait_time_ms and store_connection_checkout_count Prometheus metrics. Noticeable wait times (anything above 5ms) is an indication that there are too few connections available; high wait times there will also be caused by the database being very busy (like high CPU load). However if the database seems otherwise stable, high wait times indicate a need to increase the number of connections. In the configuration, how many connections each graph-node instance can use is an upper limit, and Graph Node will not keep connections open if it doesn't need them. @@ -188,7 +188,7 @@ ingestor = "block_ingestor_node" #### Supporting multiple networks -The Graph Protocol is increasing the number of networks supported for indexing rewards, and there exist many subgraphs indexing unsupported networks which an indexer would like to process. The `config.toml` file allows for expressive and flexible configuration of: +The Graph Protocol is increasing the number of networks supported for indexing rewards, and there exist many Subgraphs indexing unsupported networks which an indexer would like to process. The `config.toml` file allows for expressive and flexible configuration of: - Multiple networks - Multiple providers per network (this can allow splitting of load across providers, and can also allow for configuration of full nodes as well as archive nodes, with Graph Node preferring cheaper providers if a given workload allows). @@ -225,11 +225,11 @@ Users who are operating a scaled indexing setup with advanced configuration may ### Managing Graph Node -Given a running Graph Node (or Graph Nodes!), the challenge is then to manage deployed subgraphs across those nodes. Graph Node surfaces a range of tools to help with managing subgraphs. +Given a running Graph Node (or Graph Nodes!), the challenge is then to manage deployed Subgraphs across those nodes. Graph Node surfaces a range of tools to help with managing Subgraphs. #### Logging -Graph Node's logs can provide useful information for debugging and optimisation of Graph Node and specific subgraphs. Graph Node supports different log levels via the `GRAPH_LOG` environment variable, with the following levels: error, warn, info, debug or trace. +Graph Node's logs can provide useful information for debugging and optimisation of Graph Node and specific Subgraphs. Graph Node supports different log levels via the `GRAPH_LOG` environment variable, with the following levels: error, warn, info, debug or trace. In addition setting `GRAPH_LOG_QUERY_TIMING` to `gql` provides more details about how GraphQL queries are running (though this will generate a large volume of logs). @@ -247,11 +247,11 @@ The graphman command is included in the official containers, and you can docker Full documentation of `graphman` commands is available in the Graph Node repository. See [/docs/graphman.md] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) in the Graph Node `/docs` -### Working with subgraphs +### Working with Subgraphs #### Indexing status API -Available on port 8030/graphql by default, the indexing status API exposes a range of methods for checking indexing status for different subgraphs, checking proofs of indexing, inspecting subgraph features and more. +Available on port 8030/graphql by default, the indexing status API exposes a range of methods for checking indexing status for different Subgraphs, checking proofs of indexing, inspecting Subgraph features and more. The full schema is available [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). @@ -263,7 +263,7 @@ There are three separate parts of the indexing process: - Processing events in order with the appropriate handlers (this can involve calling the chain for state, and fetching data from the store) - Writing the resulting data to the store -These stages are pipelined (i.e. they can be executed in parallel), but they are dependent on one another. Where subgraphs are slow to index, the underlying cause will depend on the specific subgraph. +These stages are pipelined (i.e. they can be executed in parallel), but they are dependent on one another. Where Subgraphs are slow to index, the underlying cause will depend on the specific Subgraph. Common causes of indexing slowness: @@ -276,24 +276,24 @@ Common causes of indexing slowness: - The provider itself falling behind the chain head - Slowness in fetching new receipts at the chain head from the provider -Subgraph indexing metrics can help diagnose the root cause of indexing slowness. In some cases, the problem lies with the subgraph itself, but in others, improved network providers, reduced database contention and other configuration improvements can markedly improve indexing performance. +Subgraph indexing metrics can help diagnose the root cause of indexing slowness. In some cases, the problem lies with the Subgraph itself, but in others, improved network providers, reduced database contention and other configuration improvements can markedly improve indexing performance. -#### Failed subgraphs +#### Failed Subgraphs -During indexing subgraphs might fail, if they encounter data that is unexpected, some component not working as expected, or if there is some bug in the event handlers or configuration. There are two general types of failure: +During indexing Subgraphs might fail, if they encounter data that is unexpected, some component not working as expected, or if there is some bug in the event handlers or configuration. There are two general types of failure: - Deterministic failures: these are failures which will not be resolved with retries - Non-deterministic failures: these might be down to issues with the provider, or some unexpected Graph Node error. When a non-deterministic failure occurs, Graph Node will retry the failing handlers, backing off over time. -In some cases a failure might be resolvable by the indexer (for example if the error is a result of not having the right kind of provider, adding the required provider will allow indexing to continue). However in others, a change in the subgraph code is required. +In some cases a failure might be resolvable by the indexer (for example if the error is a result of not having the right kind of provider, adding the required provider will allow indexing to continue). However in others, a change in the Subgraph code is required. -> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-deterministic failures are not, as the subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. +> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-deterministic failures are not, as the Subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the Subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. #### Block and call cache -Graph Node caches certain data in the store in order to save refetching from the provider. Blocks are cached, as are the results of `eth_calls` (the latter being cached as of a specific block). This caching can dramatically increase indexing speed during "resyncing" of a slightly altered subgraph. +Graph Node caches certain data in the store in order to save refetching from the provider. Blocks are cached, as are the results of `eth_calls` (the latter being cached as of a specific block). This caching can dramatically increase indexing speed during "resyncing" of a slightly altered Subgraph. -However, in some instances, if an Ethereum node has provided incorrect data for some period, that can make its way into the cache, leading to incorrect data or failed subgraphs. In this case indexers can use `graphman` to clear the poisoned cache, and then rewind the affected subgraphs, which will then fetch fresh data from the (hopefully) healthy provider. +However, in some instances, if an Ethereum node has provided incorrect data for some period, that can make its way into the cache, leading to incorrect data or failed Subgraphs. In this case indexers can use `graphman` to clear the poisoned cache, and then rewind the affected Subgraphs, which will then fetch fresh data from the (hopefully) healthy provider. If a block cache inconsistency is suspected, such as a tx receipt missing event: @@ -304,7 +304,7 @@ If a block cache inconsistency is suspected, such as a tx receipt missing event: #### Querying issues and errors -Once a subgraph has been indexed, indexers can expect to serve queries via the subgraph's dedicated query endpoint. If the indexer is hoping to serve significant query volume, a dedicated query node is recommended, and in case of very high query volumes, indexers may want to configure replica shards so that queries don't impact the indexing process. +Once a Subgraph has been indexed, indexers can expect to serve queries via the Subgraph's dedicated query endpoint. If the indexer is hoping to serve significant query volume, a dedicated query node is recommended, and in case of very high query volumes, indexers may want to configure replica shards so that queries don't impact the indexing process. However, even with a dedicated query node and replicas, certain queries can take a long time to execute, and in some cases increase memory usage and negatively impact the query time for other users. @@ -316,7 +316,7 @@ Graph Node caches GraphQL queries by default, which can significantly reduce dat ##### Analysing queries -Problematic queries most often surface in one of two ways. In some cases, users themselves report that a given query is slow. In that case the challenge is to diagnose the reason for the slowness - whether it is a general issue, or specific to that subgraph or query. And then of course to resolve it, if possible. +Problematic queries most often surface in one of two ways. In some cases, users themselves report that a given query is slow. In that case the challenge is to diagnose the reason for the slowness - whether it is a general issue, or specific to that Subgraph or query. And then of course to resolve it, if possible. In other cases, the trigger might be high memory usage on a query node, in which case the challenge is first to identify the query causing the issue. @@ -336,10 +336,10 @@ In general, tables where the number of distinct entities are less than 1% of the Once a table has been determined to be account-like, running `graphman stats account-like .
` will turn on the account-like optimization for queries against that table. The optimization can be turned off again with `graphman stats account-like --clear .
` It takes up to 5 minutes for query nodes to notice that the optimization has been turned on or off. After turning the optimization on, it is necessary to verify that the change does not in fact make queries slower for that table. If you have configured Grafana to monitor Postgres, slow queries would show up in `pg_stat_activity`in large numbers, taking several seconds. In that case, the optimization needs to be turned off again. -For Uniswap-like subgraphs, the `pair` and `token` tables are prime candidates for this optimization, and can have a dramatic effect on database load. +For Uniswap-like Subgraphs, the `pair` and `token` tables are prime candidates for this optimization, and can have a dramatic effect on database load. -#### Removing subgraphs +#### Removing Subgraphs > This is new functionality, which will be available in Graph Node 0.29.x -At some point an indexer might want to remove a given subgraph. This can be easily done via `graphman drop`, which deletes a deployment and all it's indexed data. The deployment can be specified as either a subgraph name, an IPFS hash `Qm..`, or the database namespace `sgdNNN`. Further documentation is available [here](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). +At some point an indexer might want to remove a given Subgraph. This can be easily done via `graphman drop`, which deletes a deployment and all it's indexed data. The deployment can be specified as either a Subgraph name, an IPFS hash `Qm..`, or the database namespace `sgdNNN`. Further documentation is available [here](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). From 09750939b90e30703134a0ef80f13d01fdcf1284 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:09:52 -0500 Subject: [PATCH 0106/1789] New translations graph-node.mdx (Polish) --- .../pages/pl/indexing/tooling/graph-node.mdx | 72 +++++++++---------- 1 file changed, 36 insertions(+), 36 deletions(-) diff --git a/website/src/pages/pl/indexing/tooling/graph-node.mdx b/website/src/pages/pl/indexing/tooling/graph-node.mdx index 0250f14a3d08..f5778789213d 100644 --- a/website/src/pages/pl/indexing/tooling/graph-node.mdx +++ b/website/src/pages/pl/indexing/tooling/graph-node.mdx @@ -2,31 +2,31 @@ title: Graph Node --- -Graph Node is the component which indexes subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. +Graph Node is the component which indexes Subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. This provides a contextual overview of Graph Node, and some of the more advanced options available to indexers. Detailed documentation and instructions can be found in the [Graph Node repository](https://github.com/graphprotocol/graph-node). ## Graph Node -[Graph Node](https://github.com/graphprotocol/graph-node) is the reference implementation for indexing Subgraphs on The Graph Network, connecting to blockchain clients, indexing subgraphs and making indexed data available to query. +[Graph Node](https://github.com/graphprotocol/graph-node) is the reference implementation for indexing Subgraphs on The Graph Network, connecting to blockchain clients, indexing Subgraphs and making indexed data available to query. Graph Node (and the whole indexer stack) can be run on bare metal, or in a cloud environment. This flexibility of the central indexing component is crucial to the robustness of The Graph Protocol. Similarly, Graph Node can be [built from source](https://github.com/graphprotocol/graph-node), or indexers can use one of the [provided Docker Images](https://hub.docker.com/r/graphprotocol/graph-node). ### PostgreSQL database -The main store for the Graph Node, this is where subgraph data is stored, as well as metadata about subgraphs, and subgraph-agnostic network data such as the block cache, and eth_call cache. +The main store for the Graph Node, this is where Subgraph data is stored, as well as metadata about Subgraphs, and Subgraph-agnostic network data such as the block cache, and eth_call cache. ### Network clients In order to index a network, Graph Node needs access to a network client via an EVM-compatible JSON-RPC API. This RPC may connect to a single client or it could be a more complex setup that load balances across multiple. -While some subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). +While some Subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically Subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and Subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). **Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an Indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). ### IPFS Nodes -Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network indexers do not need to host their own IPFS node. An IPFS node for the network is hosted at https://ipfs.network.thegraph.com. +Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during Subgraph deployment to fetch the Subgraph manifest and all linked files. Network indexers do not need to host their own IPFS node. An IPFS node for the network is hosted at https://ipfs.network.thegraph.com. ### Prometheus metrics server @@ -77,19 +77,19 @@ A complete Kubernetes example configuration can be found in the [indexer reposit When it is running Graph Node exposes the following ports: -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | -| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | -| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | -| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for Subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for Subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | > **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC endpoint. ## Advanced Graph Node configuration -At its simplest, Graph Node can be operated with a single instance of Graph Node, a single PostgreSQL database, an IPFS node, and the network clients as required by the subgraphs to be indexed. +At its simplest, Graph Node can be operated with a single instance of Graph Node, a single PostgreSQL database, an IPFS node, and the network clients as required by the Subgraphs to be indexed. This setup can be scaled horizontally, by adding multiple Graph Nodes, and multiple databases to support those Graph Nodes. Advanced users may want to take advantage of some of the horizontal scaling capabilities of Graph Node, as well as some of the more advanced configuration options, via the `config.toml` file and Graph Node's environment variables. @@ -114,13 +114,13 @@ Full documentation of `config.toml` can be found in the [Graph Node docs](https: #### Multiple Graph Nodes -Graph Node indexing can scale horizontally, running multiple instances of Graph Node to split indexing and querying across different nodes. This can be done simply by running Graph Nodes configured with a different `node_id` on startup (e.g. in the Docker Compose file), which can then be used in the `config.toml` file to specify [dedicated query nodes](#dedicated-query-nodes), [block ingestors](#dedicated-block-ingestion), and splitting subgraphs across nodes with [deployment rules](#deployment-rules). +Graph Node indexing can scale horizontally, running multiple instances of Graph Node to split indexing and querying across different nodes. This can be done simply by running Graph Nodes configured with a different `node_id` on startup (e.g. in the Docker Compose file), which can then be used in the `config.toml` file to specify [dedicated query nodes](#dedicated-query-nodes), [block ingestors](#dedicated-block-ingestion), and splitting Subgraphs across nodes with [deployment rules](#deployment-rules). > Note that multiple Graph Nodes can all be configured to use the same database, which itself can be horizontally scaled via sharding. #### Deployment rules -Given multiple Graph Nodes, it is necessary to manage deployment of new subgraphs so that the same subgraph isn't being indexed by two different nodes, which would lead to collisions. This can be done by using deployment rules, which can also specify which `shard` a subgraph's data should be stored in, if database sharding is being used. Deployment rules can match on the subgraph name and the network that the deployment is indexing in order to make a decision. +Given multiple Graph Nodes, it is necessary to manage deployment of new Subgraphs so that the same Subgraph isn't being indexed by two different nodes, which would lead to collisions. This can be done by using deployment rules, which can also specify which `shard` a Subgraph's data should be stored in, if database sharding is being used. Deployment rules can match on the Subgraph name and the network that the deployment is indexing in order to make a decision. Example deployment rule configuration: @@ -138,7 +138,7 @@ indexers = [ "index_node_kovan_0" ] match = { network = [ "xdai", "poa-core" ] } indexers = [ "index_node_other_0" ] [[deployment.rule]] -# There's no 'match', so any subgraph matches +# There's no 'match', so any Subgraph matches shards = [ "sharda", "shardb" ] indexers = [ "index_node_community_0", @@ -167,11 +167,11 @@ Any node whose --node-id matches the regular expression will be set up to only r For most use cases, a single Postgres database is sufficient to support a graph-node instance. When a graph-node instance outgrows a single Postgres database, it is possible to split the storage of graph-node's data across multiple Postgres databases. All databases together form the store of the graph-node instance. Each individual database is called a shard. -Shards can be used to split subgraph deployments across multiple databases, and can also be used to use replicas to spread query load across databases. This includes configuring the number of available database connections each `graph-node` should keep in its connection pool for each database, which becomes increasingly important as more subgraphs are being indexed. +Shards can be used to split Subgraph deployments across multiple databases, and can also be used to use replicas to spread query load across databases. This includes configuring the number of available database connections each `graph-node` should keep in its connection pool for each database, which becomes increasingly important as more Subgraphs are being indexed. Sharding becomes useful when your existing database can't keep up with the load that Graph Node puts on it, and when it's not possible to increase the database size anymore. -> It is generally better make a single database as big as possible, before starting with shards. One exception is where query traffic is split very unevenly between subgraphs; in those situations it can help dramatically if the high-volume subgraphs are kept in one shard and everything else in another because that setup makes it more likely that the data for the high-volume subgraphs stays in the db-internal cache and doesn't get replaced by data that's not needed as much from low-volume subgraphs. +> It is generally better make a single database as big as possible, before starting with shards. One exception is where query traffic is split very unevenly between Subgraphs; in those situations it can help dramatically if the high-volume Subgraphs are kept in one shard and everything else in another because that setup makes it more likely that the data for the high-volume Subgraphs stays in the db-internal cache and doesn't get replaced by data that's not needed as much from low-volume Subgraphs. In terms of configuring connections, start with max_connections in postgresql.conf set to 400 (or maybe even 200) and look at the store_connection_wait_time_ms and store_connection_checkout_count Prometheus metrics. Noticeable wait times (anything above 5ms) is an indication that there are too few connections available; high wait times there will also be caused by the database being very busy (like high CPU load). However if the database seems otherwise stable, high wait times indicate a need to increase the number of connections. In the configuration, how many connections each graph-node instance can use is an upper limit, and Graph Node will not keep connections open if it doesn't need them. @@ -188,7 +188,7 @@ ingestor = "block_ingestor_node" #### Supporting multiple networks -The Graph Protocol is increasing the number of networks supported for indexing rewards, and there exist many subgraphs indexing unsupported networks which an indexer would like to process. The `config.toml` file allows for expressive and flexible configuration of: +The Graph Protocol is increasing the number of networks supported for indexing rewards, and there exist many Subgraphs indexing unsupported networks which an indexer would like to process. The `config.toml` file allows for expressive and flexible configuration of: - Multiple networks - Multiple providers per network (this can allow splitting of load across providers, and can also allow for configuration of full nodes as well as archive nodes, with Graph Node preferring cheaper providers if a given workload allows). @@ -225,11 +225,11 @@ Users who are operating a scaled indexing setup with advanced configuration may ### Managing Graph Node -Given a running Graph Node (or Graph Nodes!), the challenge is then to manage deployed subgraphs across those nodes. Graph Node surfaces a range of tools to help with managing subgraphs. +Given a running Graph Node (or Graph Nodes!), the challenge is then to manage deployed Subgraphs across those nodes. Graph Node surfaces a range of tools to help with managing Subgraphs. #### Logging -Graph Node's logs can provide useful information for debugging and optimisation of Graph Node and specific subgraphs. Graph Node supports different log levels via the `GRAPH_LOG` environment variable, with the following levels: error, warn, info, debug or trace. +Graph Node's logs can provide useful information for debugging and optimisation of Graph Node and specific Subgraphs. Graph Node supports different log levels via the `GRAPH_LOG` environment variable, with the following levels: error, warn, info, debug or trace. In addition setting `GRAPH_LOG_QUERY_TIMING` to `gql` provides more details about how GraphQL queries are running (though this will generate a large volume of logs). @@ -247,11 +247,11 @@ The graphman command is included in the official containers, and you can docker Full documentation of `graphman` commands is available in the Graph Node repository. See [/docs/graphman.md] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) in the Graph Node `/docs` -### Working with subgraphs +### Working with Subgraphs #### Indexing status API -Available on port 8030/graphql by default, the indexing status API exposes a range of methods for checking indexing status for different subgraphs, checking proofs of indexing, inspecting subgraph features and more. +Available on port 8030/graphql by default, the indexing status API exposes a range of methods for checking indexing status for different Subgraphs, checking proofs of indexing, inspecting Subgraph features and more. The full schema is available [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). @@ -263,7 +263,7 @@ There are three separate parts of the indexing process: - Processing events in order with the appropriate handlers (this can involve calling the chain for state, and fetching data from the store) - Writing the resulting data to the store -These stages are pipelined (i.e. they can be executed in parallel), but they are dependent on one another. Where subgraphs are slow to index, the underlying cause will depend on the specific subgraph. +These stages are pipelined (i.e. they can be executed in parallel), but they are dependent on one another. Where Subgraphs are slow to index, the underlying cause will depend on the specific Subgraph. Common causes of indexing slowness: @@ -276,24 +276,24 @@ Common causes of indexing slowness: - The provider itself falling behind the chain head - Slowness in fetching new receipts at the chain head from the provider -Subgraph indexing metrics can help diagnose the root cause of indexing slowness. In some cases, the problem lies with the subgraph itself, but in others, improved network providers, reduced database contention and other configuration improvements can markedly improve indexing performance. +Subgraph indexing metrics can help diagnose the root cause of indexing slowness. In some cases, the problem lies with the Subgraph itself, but in others, improved network providers, reduced database contention and other configuration improvements can markedly improve indexing performance. -#### Failed subgraphs +#### Failed Subgraphs -During indexing subgraphs might fail, if they encounter data that is unexpected, some component not working as expected, or if there is some bug in the event handlers or configuration. There are two general types of failure: +During indexing Subgraphs might fail, if they encounter data that is unexpected, some component not working as expected, or if there is some bug in the event handlers or configuration. There are two general types of failure: - Deterministic failures: these are failures which will not be resolved with retries - Non-deterministic failures: these might be down to issues with the provider, or some unexpected Graph Node error. When a non-deterministic failure occurs, Graph Node will retry the failing handlers, backing off over time. -In some cases a failure might be resolvable by the indexer (for example if the error is a result of not having the right kind of provider, adding the required provider will allow indexing to continue). However in others, a change in the subgraph code is required. +In some cases a failure might be resolvable by the indexer (for example if the error is a result of not having the right kind of provider, adding the required provider will allow indexing to continue). However in others, a change in the Subgraph code is required. -> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-deterministic failures are not, as the subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. +> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-deterministic failures are not, as the Subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the Subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. #### Block and call cache -Graph Node caches certain data in the store in order to save refetching from the provider. Blocks are cached, as are the results of `eth_calls` (the latter being cached as of a specific block). This caching can dramatically increase indexing speed during "resyncing" of a slightly altered subgraph. +Graph Node caches certain data in the store in order to save refetching from the provider. Blocks are cached, as are the results of `eth_calls` (the latter being cached as of a specific block). This caching can dramatically increase indexing speed during "resyncing" of a slightly altered Subgraph. -However, in some instances, if an Ethereum node has provided incorrect data for some period, that can make its way into the cache, leading to incorrect data or failed subgraphs. In this case indexers can use `graphman` to clear the poisoned cache, and then rewind the affected subgraphs, which will then fetch fresh data from the (hopefully) healthy provider. +However, in some instances, if an Ethereum node has provided incorrect data for some period, that can make its way into the cache, leading to incorrect data or failed Subgraphs. In this case indexers can use `graphman` to clear the poisoned cache, and then rewind the affected Subgraphs, which will then fetch fresh data from the (hopefully) healthy provider. If a block cache inconsistency is suspected, such as a tx receipt missing event: @@ -304,7 +304,7 @@ If a block cache inconsistency is suspected, such as a tx receipt missing event: #### Querying issues and errors -Once a subgraph has been indexed, indexers can expect to serve queries via the subgraph's dedicated query endpoint. If the indexer is hoping to serve significant query volume, a dedicated query node is recommended, and in case of very high query volumes, indexers may want to configure replica shards so that queries don't impact the indexing process. +Once a Subgraph has been indexed, indexers can expect to serve queries via the Subgraph's dedicated query endpoint. If the indexer is hoping to serve significant query volume, a dedicated query node is recommended, and in case of very high query volumes, indexers may want to configure replica shards so that queries don't impact the indexing process. However, even with a dedicated query node and replicas, certain queries can take a long time to execute, and in some cases increase memory usage and negatively impact the query time for other users. @@ -316,7 +316,7 @@ Graph Node caches GraphQL queries by default, which can significantly reduce dat ##### Analysing queries -Problematic queries most often surface in one of two ways. In some cases, users themselves report that a given query is slow. In that case the challenge is to diagnose the reason for the slowness - whether it is a general issue, or specific to that subgraph or query. And then of course to resolve it, if possible. +Problematic queries most often surface in one of two ways. In some cases, users themselves report that a given query is slow. In that case the challenge is to diagnose the reason for the slowness - whether it is a general issue, or specific to that Subgraph or query. And then of course to resolve it, if possible. In other cases, the trigger might be high memory usage on a query node, in which case the challenge is first to identify the query causing the issue. @@ -336,10 +336,10 @@ In general, tables where the number of distinct entities are less than 1% of the Once a table has been determined to be account-like, running `graphman stats account-like .
` will turn on the account-like optimization for queries against that table. The optimization can be turned off again with `graphman stats account-like --clear .
` It takes up to 5 minutes for query nodes to notice that the optimization has been turned on or off. After turning the optimization on, it is necessary to verify that the change does not in fact make queries slower for that table. If you have configured Grafana to monitor Postgres, slow queries would show up in `pg_stat_activity`in large numbers, taking several seconds. In that case, the optimization needs to be turned off again. -For Uniswap-like subgraphs, the `pair` and `token` tables are prime candidates for this optimization, and can have a dramatic effect on database load. +For Uniswap-like Subgraphs, the `pair` and `token` tables are prime candidates for this optimization, and can have a dramatic effect on database load. -#### Removing subgraphs +#### Removing Subgraphs > This is new functionality, which will be available in Graph Node 0.29.x -At some point an indexer might want to remove a given subgraph. This can be easily done via `graphman drop`, which deletes a deployment and all it's indexed data. The deployment can be specified as either a subgraph name, an IPFS hash `Qm..`, or the database namespace `sgdNNN`. Further documentation is available [here](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). +At some point an indexer might want to remove a given Subgraph. This can be easily done via `graphman drop`, which deletes a deployment and all it's indexed data. The deployment can be specified as either a Subgraph name, an IPFS hash `Qm..`, or the database namespace `sgdNNN`. Further documentation is available [here](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). From 2afd3751af4fe6fb033d859f05606c658697b94c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:09:53 -0500 Subject: [PATCH 0107/1789] New translations graph-node.mdx (Portuguese) --- .../pages/pt/indexing/tooling/graph-node.mdx | 72 +++++++++---------- 1 file changed, 36 insertions(+), 36 deletions(-) diff --git a/website/src/pages/pt/indexing/tooling/graph-node.mdx b/website/src/pages/pt/indexing/tooling/graph-node.mdx index 370538b94e34..5bfa2f07b8fa 100644 --- a/website/src/pages/pt/indexing/tooling/graph-node.mdx +++ b/website/src/pages/pt/indexing/tooling/graph-node.mdx @@ -2,31 +2,31 @@ title: Graph Node --- -O Node do The Graph (Graph Node) é o componente que indexa subgraphs e disponibiliza os dados resultantes a queries (consultas de dados) através de uma API GraphQL. Assim, ele é central ao stack dos indexers, e é crucial fazer operações corretas com um node Graph para executar um indexer com êxito. +Graph Node is the component which indexes Subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. Isto fornece um resumo contextual do Graph Node e algumas das opções mais avançadas disponíveis para indexadores. Para mais instruções e documentação, veja o [repositório do Graph Node](https://github.com/graphprotocol/graph-node). ## Graph Node -O [Graph Node](https://github.com/graphprotocol/graph-node) é a implementação de referência para indexar Subgraphs na The Graph Network (rede do The Graph); fazer conexões com clientes de blockchain; indexar subgraphs; e disponibilizar dados indexados para queries. +[Graph Node](https://github.com/graphprotocol/graph-node) is the reference implementation for indexing Subgraphs on The Graph Network, connecting to blockchain clients, indexing Subgraphs and making indexed data available to query. O Graph Node (e todo o stack dos indexadores) pode ser executado em um sistema bare-metal ou num ambiente na nuvem. Esta flexibilidade do componente central de indexing é importante para a robustez do Protocolo The Graph. Da mesma forma, um Graph Node pode ser [construído do código fonte](https://github.com/graphprotocol/graph-node) ou os indexadores podem usar uma das [imagens disponíveis no Docker](https://hub.docker.com/r/graphprotocol/graph-node). ### Banco de dados PostgreSQL -O armazenamento principal para o Graph Node. É aqui que são guardados dados de subgraph, assim como metadados sobre subgraphs e dados de rede agnósticos a subgraphs, como o cache de blocos e o cache eth_call. +The main store for the Graph Node, this is where Subgraph data is stored, as well as metadata about Subgraphs, and Subgraph-agnostic network data such as the block cache, and eth_call cache. ### Clientes de rede Para indexar uma rede, o Graph Node precisa de acesso a um cliente de rede através de uma API JSON-RPC compatível com EVM. Esta RPC (chamada de processamento remoto) pode se conectar a um único cliente de Ethereum; ou o setup pode ser mais complexo, de modo a carregar saldos em múltiplos clientes. -Enquanto alguns subgraphs exigem apenas um node completo, alguns podem ter recursos de indexação que precisem de funções adicionais de RPC (chamadas de procedimento remoto). Especificamente, subgraphs que usam o `eth_calls` como parte da indexação exigirão um node de arquivo que apoie o [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898); e subgraphs com `callHandlers`, ou `blockHandlers` com um filtro `call`, exigem apoio ao `trace_filter` (veja a documentação sobre o trace module (módulo de rastreio) [aqui](https://openethereum.github.io/JSONRPC-trace-module)). +While some Subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically Subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and Subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). **Firehoses de Rede** - um Firehose é um serviço de gRPC (chamadas de procedimento remoto - Google) que fornece uma transmissão ordenada — mas consciente de forks — de blocos, feito pelos programadores centrais do The Graph para permitir indexação em escala mais eficiente. Isto não é um requisito atual para Indexadores, mas é ideal que os mesmos experimentem a tecnologia, antes do apoio total à rede. Leia mais sobre o Firehose [aqui](https://firehose.streamingfast.io/). ### Nodes IPFS -Os metadados de lançamento de subgraph são armazenados na rede IPFS. O Graph Node acessa primariamente o node IPFS durante o lançamento do subgraph, para retirar o manifest e todos os arquivos ligados. Os indexadores de rede não precisam hospedar seu próprio node IPFS. Um node IPFS para a rede é hospedado em https://ipfs.network.thegraph.com. +Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during Subgraph deployment to fetch the Subgraph manifest and all linked files. Network indexers do not need to host their own IPFS node. An IPFS node for the network is hosted at https://ipfs.network.thegraph.com. ### Servidor de métricas Prometheus @@ -77,19 +77,19 @@ Veja um exemplo completo de configuração do Kubernetes no [repositório do ind Durante a execução, o Graph Node expõe as seguintes portas: -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | -| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | -| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | -| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for Subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for Subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | > **Importante**: Cuidado ao expor portas publicamente — **portas de administração** devem ser trancadas a sete chaves. Isto inclui o endpoint JSON-RPC do Graph Node. ## Configurações avançadas do Graph Node -Basicamente, o Graph Node pode ser operado com uma única instância de Graph Node, um único banco de dados PostgreSQP, e os clientes de rede como exigidos pelos subgraphs a serem indexados. +At its simplest, Graph Node can be operated with a single instance of Graph Node, a single PostgreSQL database, an IPFS node, and the network clients as required by the Subgraphs to be indexed. Este setup pode ser escalado horizontalmente, com a adição de vários Graph Nodes e bancos de dados para apoiá-los. Utilizadores mais avançados podem tomar vantagem de algumas das capacidades de escala horizontal do Graph Node, assim como algumas das opções de configuração mais avançadas, através do arquivo `config.toml` e as variáveis de ambiente do Graph Node. @@ -114,13 +114,13 @@ A documentação completa do `config.toml` pode ser encontrada nos [documentos d #### Múltiplos Graph Nodes -A indexação de Graph Nodes pode ser escalada horizontalmente, com a execução de várias instâncias de Graph Node para separar indexação de queries em nodes diferentes. Isto é possível só com a execução de Graph Nodes, configurados com um `node_id` diferente na inicialização (por ex. no arquivo Docker Compose), que pode então ser usado no arquivo `config.toml` para especificar [nodes dedicados de query](#dedicated-query-nodes), [ingestores de blocos](#dedicated-block-ingestion") e separar subgraphs entre nódulos com [regras de lançamento](#deployment-rules). +Graph Node indexing can scale horizontally, running multiple instances of Graph Node to split indexing and querying across different nodes. This can be done simply by running Graph Nodes configured with a different `node_id` on startup (e.g. in the Docker Compose file), which can then be used in the `config.toml` file to specify [dedicated query nodes](#dedicated-query-nodes), [block ingestors](#dedicated-block-ingestion), and splitting Subgraphs across nodes with [deployment rules](#deployment-rules). > Note que vários Graph Nodes podem ser configurados para usar o mesmo banco de dados — que, por conta própria, pode ser escalado horizontalmente através do sharding. #### Regras de lançamento -Levando em conta vários Graph Nodes, é necessário gerir o lançamento de novos subgraphs para que o mesmo subgraph não seja indexado por dois nodes diferentes, o que levaria a colisões. Isto é possível regras de lançamento, que também podem especificar em qual `shard` os dados de um subgraph devem ser armazenados, caso seja usado o sharding de bancos de dados. As regras de lançamento podem combinar com o nome do subgraph e com a rede que o lançamento indexa para fazer uma decisão. +Given multiple Graph Nodes, it is necessary to manage deployment of new Subgraphs so that the same Subgraph isn't being indexed by two different nodes, which would lead to collisions. This can be done by using deployment rules, which can also specify which `shard` a Subgraph's data should be stored in, if database sharding is being used. Deployment rules can match on the Subgraph name and the network that the deployment is indexing in order to make a decision. Exemplo de configuração de regra de lançamento: @@ -138,7 +138,7 @@ indexers = [ "index_node_kovan_0" ] match = { network = [ "xdai", "poa-core" ] } indexers = [ "index_node_other_0" ] [[deployment.rule]] -# There's no 'match', so any subgraph matches +# There's no 'match', so any Subgraph matches shards = [ "sharda", "shardb" ] indexers = [ "index_node_community_0", @@ -167,11 +167,11 @@ Qualquer node cujo --node-id combina com a expressão regular será programado p Para a maioria dos casos de uso, um único banco de dados Postgres é suficiente para apoiar uma instância de graph-node. Quando uma instância de graph-node cresce mais que um único banco Postgres, é possível dividir o armazenamento dos dados do graph-node entre múltiplos bancos Postgres. Todos os bancos de dados, juntos, formam o armazenamento da instância do graph-node. Cada banco de dados individual é chamado de shard. -Os shards servem para dividir lançamentos de subgraph em múltiplos bancos de dados, e podem também ser configurados para usar réplicas a fim de dividir a carga de query entre bancos de dados. Isto inclui a configuração do número de conexões disponíveis do banco que cada `graph-node` deve manter em seu pool de conexão para cada banco, o que fica cada vez mais importante conforme são indexados mais subgraphs. +Shards can be used to split Subgraph deployments across multiple databases, and can also be used to use replicas to spread query load across databases. This includes configuring the number of available database connections each `graph-node` should keep in its connection pool for each database, which becomes increasingly important as more Subgraphs are being indexed. O sharding torna-se útil quando o seu banco de dados existente não aguenta o peso do Graph Node, e quando não é mais possível aumentar o tamanho do banco. -> Geralmente, antes de começar com shards, é melhor maximizar o tamanho de um único banco de dados. Uma exceção é onde o tráfego de queries é dividido de maneira muito desigual entre subgraphs; nestes casos, pode ser de bom valor que os subgraphs de alto volume sejam mantidos em um shard e todo o resto em outro, porque aquele setup aumenta muito a chance dos dados para os subgraphs de alto volume permanecerem no cache db-internal e não serem substituídos por dados de menor prioridade nos subgraphs de baixo volume. +> It is generally better make a single database as big as possible, before starting with shards. One exception is where query traffic is split very unevenly between Subgraphs; in those situations it can help dramatically if the high-volume Subgraphs are kept in one shard and everything else in another because that setup makes it more likely that the data for the high-volume Subgraphs stays in the db-internal cache and doesn't get replaced by data that's not needed as much from low-volume Subgraphs. Em termos de configuração de conexões, comece com o max_connections no postgresql.conf configurado em 400 (ou talvez até 200) e preste atenção nas métricas do Prometheus store_connection_wait_time_ms e store_connection_checkout_count. Tempos de espera óbvios (acima de 5ms) indicam que há poucas conexões disponíveis; também podem ser causados por atividade excessiva no banco de dados (como uso alto de CPU). Mas caso o banco de dados pareça estável fora isto, os tempos de espera longos indicam uma necessidade de aumento no número de conexões. Na configuração, há um limite máximo de conexões que cada instância graph-node pode usar, e o Graph Node não manterá conexões abertas caso não sejam necessárias. @@ -188,7 +188,7 @@ ingestor = "block_ingestor_node" #### Apoio a múltiplas redes -O Graph Protocol só aumenta o número de redes, com apoio a recompensas de indexação, e existem muitos subgraphs a indexarem redes não apoiadas que um indexador gostaria de processar. O arquivo config.toml permite a configuração expressiva e flexível de: +The Graph Protocol is increasing the number of networks supported for indexing rewards, and there exist many Subgraphs indexing unsupported networks which an indexer would like to process. The `config.toml` file allows for expressive and flexible configuration of: - Múltiplas redes - Múltiplos provedores por rede (isto pode permitir a separação de peso entre eles, e pode permitir a configuração de nodes completos além de nodes de arquivo; o Graph Node prefere provedores mais baratos, caso permita uma carga de trabalho). @@ -225,11 +225,11 @@ Os utilizadores a operar um setup de indexing escalado, com configurações avan ### Como gerir o Graph Node -Dado um Graph Node (ou Nodes!) em execução, o desafio torna-se gerir subgraphs lançados entre estes nodes. O Graph Node tem uma gama de ferramentas para ajudar a direção de subgraphs. +Given a running Graph Node (or Graph Nodes!), the challenge is then to manage deployed Subgraphs across those nodes. Graph Node surfaces a range of tools to help with managing Subgraphs. #### Logging -Os logs do Graph Node podem fornecer informações úteis, para debug e otimização — do Graph Node e de subgraphs específicos. O Graph Node apoia níveis diferentes de logs através da variável de ambiente `GRAPH_LOG`, com os seguintes níveis: `error`, `warn`, `info`, `debug` ou `trace`. +Graph Node's logs can provide useful information for debugging and optimisation of Graph Node and specific Subgraphs. Graph Node supports different log levels via the `GRAPH_LOG` environment variable, with the following levels: error, warn, info, debug or trace. Além disto, configurar o `GRAPH_LOG_QUERY_TIMING` para `gql` fornece mais detalhes sobre o processo de queries no GraphQL (porém, isto criará um grande volume de logs). @@ -247,11 +247,11 @@ O comando `graphman` é incluído nos containers oficiais, e pode ser executado A documentação completa dos comandos do `graphman` está no repositório do Graph Node. Veja o [/docs/graphman.md](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) no `/docs` do Graph Node -### Como trabalhar com subgraphs +### Working with Subgraphs #### API de estado de indexação -Inicialmente disponível na porta 8030/graphql, a API de estado de indexação expõe uma gama de métodos para conferir o estado da indexação para subgraphs diferentes, conferir provas de indexação, inspecionar características de subgraphs, e mais. +Available on port 8030/graphql by default, the indexing status API exposes a range of methods for checking indexing status for different Subgraphs, checking proofs of indexing, inspecting Subgraph features and more. Veja o schema completo [aqui](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). @@ -263,7 +263,7 @@ Há três partes separadas no processo de indexação: - Processar eventos conforme os handlers apropriados (isto pode envolver chamar a chain para o estado, e retirar dados do armazenamento) - Escrever os dados resultantes ao armazenamento -Estes estágios são segmentados (por ex., podem ser executados em paralelo), mas são dependentes um no outro. Quando há demora em indexar, a causa depende do subgraph específico. +These stages are pipelined (i.e. they can be executed in parallel), but they are dependent on one another. Where Subgraphs are slow to index, the underlying cause will depend on the specific Subgraph. Causas comuns de lentidão na indexação: @@ -276,24 +276,24 @@ Causas comuns de lentidão na indexação: - Atraso do próprio provedor em relação ao topo da chain - Atraso em retirar novos recibos do topo da chain do provedor -As métricas de indexação de subgraph podem ajudar a diagnosticar a causa raiz do atraso na indexação. Em alguns casos, o problema está no próprio subgraph, mas em outros, melhorar provedores de rede, reduzir a contenção no banco de dados, e outras melhorias na configuração podem aprimorar muito o desempenho da indexação. +Subgraph indexing metrics can help diagnose the root cause of indexing slowness. In some cases, the problem lies with the Subgraph itself, but in others, improved network providers, reduced database contention and other configuration improvements can markedly improve indexing performance. -#### Subgraphs falhos +#### Failed Subgraphs -É possível que subgraphs falhem durante a indexação, caso encontrem dados inesperados; algum componente não funcione como o esperado; ou se houver algum bug nos handlers de eventos ou na configuração. Geralmente, há dois tipos de falha: +During indexing Subgraphs might fail, if they encounter data that is unexpected, some component not working as expected, or if there is some bug in the event handlers or configuration. There are two general types of failure: - Falhas determinísticas: Falhas que não podem ser resolvidas com outras tentativas - Falhas não determinísticas: podem ser resumidas em problemas com o provedor ou algum erro inesperado no Graph Node. Quando ocorrer uma falha não determinística, o Graph Node reiniciará os handlers falhos e recuará gradualmente. -Em alguns casos, uma falha pode ser resolvida pelo indexador (por ex. a indexação falhou por ter o tipo errado de provedor, e necessita do correto para continuar). Porém, em outros, é necessária uma alteração no código do subgraph. +In some cases a failure might be resolvable by the indexer (for example if the error is a result of not having the right kind of provider, adding the required provider will allow indexing to continue). However in others, a change in the Subgraph code is required. -> Falhas determinísticas são consideradas "finais", com uma Prova de Indexação (POI) gerada para o bloco falho; falhas não determinísticas não são finais, como há chances do subgraph superar a falha e continuar a indexar. Às vezes, o rótulo de "não determinístico" é incorreto e o subgraph não tem como melhorar do erro; estas falhas devem ser relatadas como problemas no repositório do Graph Node. +> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-deterministic failures are not, as the Subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the Subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. #### Cache de blocos e chamadas -O Graph Node cacheia certos dados no armazenamento para poupar um refetching do provedor. São cacheados os blocos e os resultados do `eth_calls` (este último, cacheado a partir de um bloco específico). Este caching pode aumentar dramaticamente a velocidade de indexação durante a "ressincronização" de um subgraph levemente alterado. +Graph Node caches certain data in the store in order to save refetching from the provider. Blocks are cached, as are the results of `eth_calls` (the latter being cached as of a specific block). This caching can dramatically increase indexing speed during "resyncing" of a slightly altered Subgraph. -Porém, em algumas instâncias, se um node Ethereum tiver fornecido dados incorretos em algum período, isto pode entrar no cache, o que causa dados incorretos ou subgraphs falhos. Neste caso, os indexadores podem usar o `graphman` para limpar o cache envenenado e rebobinar os subgraphs afetados, que retirarão dados frescos do provedor (idealmente) saudável. +However, in some instances, if an Ethereum node has provided incorrect data for some period, that can make its way into the cache, leading to incorrect data or failed Subgraphs. In this case indexers can use `graphman` to clear the poisoned cache, and then rewind the affected Subgraphs, which will then fetch fresh data from the (hopefully) healthy provider. Caso haja uma suspeita de inconsistência no cache de blocos, como a falta de um evento tx receipt missing: @@ -304,7 +304,7 @@ Caso haja uma suspeita de inconsistência no cache de blocos, como a falta de um #### Erros e problemas de query -Quando um subgraph for indexado, os indexadores podem esperar servir consultas através do endpoint dedicado de consultas do subgraph. Se o indexador espera servir volumes significantes de consultas, é recomendado um node dedicado a queries; e para volumes muito altos, podem querer configurar réplicas de shard para que os queries não impactem o processo de indexação. +Once a Subgraph has been indexed, indexers can expect to serve queries via the Subgraph's dedicated query endpoint. If the indexer is hoping to serve significant query volume, a dedicated query node is recommended, and in case of very high query volumes, indexers may want to configure replica shards so that queries don't impact the indexing process. Porém, mesmo com um node dedicado a consultas e réplicas deste, certos queries podem demorar muito para executar; em alguns casos, aumentam o uso da memória e pioram o tempo de query para outros utilizadores. @@ -316,7 +316,7 @@ O Graph Node naturalmente cacheia queries no GraphQL, o que pode reduzir muito a ##### Análise de queries -Queries problemáticos tendem a surgir em uma de duas maneiras. Em alguns casos, os próprios utilizadores relatam que um certo query está lento; neste caso, o desafio é diagnosticar a razão para a lentidão, seja um problema geral ou específico àquele subgraph ou query. E depois, claro, resolvê-lo se possível. +Problematic queries most often surface in one of two ways. In some cases, users themselves report that a given query is slow. In that case the challenge is to diagnose the reason for the slowness - whether it is a general issue, or specific to that Subgraph or query. And then of course to resolve it, if possible. Em outros casos, o gatilho pode ser o excesso de uso de memória em um node de query; no caso, o primeiro desafio é identificar a consulta que causou este problema. @@ -336,10 +336,10 @@ Em geral, tábuas em que o número de entidades distintas é menos de 1% do tota Quando uma tábua for determinada como "tipo-conta", executar o `graphman stats account-like .
` ativará a otimização tipo-conta para queries frente àquela tábua. A otimização pode ser desativada novamente com `graphman stats account-like --clear .
`. Os nodes de consulta levam até 5 minutos para perceber que a otimização foi ligada ou desligada. Após ativar a otimização, verifique se a mudança não desacelera os queries para aquela tábua. Caso tenha configurado o Grafana para monitorar o Postgres, muitos queries lentos podem aparecer no `pg_stat_activity`, com demora de vários segundos. Neste caso, a otimização precisa ser desativada novamente. -Para subgraphs parecidos com o Uniswap, as tábuas `pair` e `token` são ótimas para esta otimização, e podem ter efeitos surpreendentes na carga do banco de dados. +For Uniswap-like Subgraphs, the `pair` and `token` tables are prime candidates for this optimization, and can have a dramatic effect on database load. -#### Como remover subgraphs +#### Removing Subgraphs > Esta é uma funcionalidade nova, que estará disponível no Graph Node 0.29.x -Em certo ponto, o indexador pode querer remover um subgraph. É só usar o `graphman drop`, que apaga um lançamento e todos os seus dados indexados. O lançamento pode ser especificado como o nome de um subgraph, um hash IPFS `Qm..`, ou o namespace de banco de dados `sgdNNN`. Mais documentos sobre o processo [aqui](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). +At some point an indexer might want to remove a given Subgraph. This can be easily done via `graphman drop`, which deletes a deployment and all it's indexed data. The deployment can be specified as either a Subgraph name, an IPFS hash `Qm..`, or the database namespace `sgdNNN`. Further documentation is available [here](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). From 7e0bd5dc64ab9a5bb69f2579989463e51cbec171 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:09:55 -0500 Subject: [PATCH 0108/1789] New translations graph-node.mdx (Russian) --- .../pages/ru/indexing/tooling/graph-node.mdx | 146 +++++++++--------- 1 file changed, 73 insertions(+), 73 deletions(-) diff --git a/website/src/pages/ru/indexing/tooling/graph-node.mdx b/website/src/pages/ru/indexing/tooling/graph-node.mdx index 43e98a3aad17..6e570d16d56a 100644 --- a/website/src/pages/ru/indexing/tooling/graph-node.mdx +++ b/website/src/pages/ru/indexing/tooling/graph-node.mdx @@ -2,31 +2,31 @@ title: Graph Node --- -Graph Node — это компонент, который индексирует подграфы и делает полученные данные доступными для запроса через GraphQL API. Таким образом, он занимает центральное место в стеке индексатора, а правильная работа Graph Node имеет решающее значение для успешного запуска индексатора. +Graph Node is the component which indexes Subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. -This provides a contextual overview of Graph Node, and some of the more advanced options available to indexers. Detailed documentation and instructions can be found in the [Graph Node repository](https://github.com/graphprotocol/graph-node). +Здесь представлен контекстуальный обзор Graph Node и некоторые более продвинутые параметры, доступные индексаторам. Подробную документацию и инструкции можно найти в [репозитории Graph Node](https://github.com/graphprotocol/graph-node). ## Graph Node -[Graph Node](https://github.com/graphprotocol/graph-node) is the reference implementation for indexing Subgraphs on The Graph Network, connecting to blockchain clients, indexing subgraphs and making indexed data available to query. +[Graph Node](https://github.com/graphprotocol/graph-node) is the reference implementation for indexing Subgraphs on The Graph Network, connecting to blockchain clients, indexing Subgraphs and making indexed data available to query. -Graph Node (and the whole indexer stack) can be run on bare metal, or in a cloud environment. This flexibility of the central indexing component is crucial to the robustness of The Graph Protocol. Similarly, Graph Node can be [built from source](https://github.com/graphprotocol/graph-node), or indexers can use one of the [provided Docker Images](https://hub.docker.com/r/graphprotocol/graph-node). +Graph Node (и весь стек Индексаторов) можно запускать на «голом железе» или в облачной среде. Эта гибкость центрального компонента индексирования имеет решающее значение для надежности The Graph Protocol. Точно так же Graph Node может быть [создана из исходного кода](https://github.com/graphprotocol/graph-node), или Индексаторы могут использовать один из [предусмотренных Docker Images](https://hub.docker.com/r/graphprotocol/graph-node). ### База данных PostgreSQL -Основное хранилище для Graph Node, это место, где хранятся данные подграфа, а также метаданные о подграфах и сетевые данные, не зависящие от подграфа, такие как кэш блоков и кэш eth_call. +The main store for the Graph Node, this is where Subgraph data is stored, as well as metadata about Subgraphs, and Subgraph-agnostic network data such as the block cache, and eth_call cache. ### Клиенты сети Для индексации сети Graph Node требуется доступ к сетевому клиенту через EVM-совместимый JSON-RPC API. Этот RPC может подключаться к одному клиенту или может представлять собой более сложную настройку, которая распределяет нагрузку между несколькими. -While some subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). +While some Subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically Subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and Subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). -**Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an Indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). +**Network Firehoses**. Firehose — это служба gRPC, предоставляющая упорядоченный, но учитывающий форк поток блоков, разработанная разработчиками ядра The Graph для лучшей поддержки крупномасштабного высокопроизводительного индексирования. В настоящее время это не является обязательным требованием для Индексаторов, но Индексаторам рекомендуется ознакомиться с технологией до начала полной поддержки сети. Подробнее о Firehose можно узнать [здесь(https://firehose.streamingfast.io/). ### Ноды IPFS -Метаданные о развертывании подграфа хранятся в сети IPFS. The Graph Node в первую очередь обращается к ноде IPFS во время развертывания подграфа, чтобы получить манифест подграфа и все связанные файлы. Сетевым индексаторам не требуется запускать собственную ноду IPFS. Нода IPFS для сети находиться по адресу https://ipfs.network.thegraph.com. +Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during Subgraph deployment to fetch the Subgraph manifest and all linked files. Network indexers do not need to host their own IPFS node. An IPFS node for the network is hosted at https://ipfs.network.thegraph.com. ### Сервер метрик Prometheus @@ -42,7 +42,7 @@ While some subgraphs may just require a full node, some may have indexing featur - **IPFS** -- **Additional Requirements for Ubuntu users** - To run a Graph Node on Ubuntu a few additional packages may be needed. +- **Дополнительные требования для пользователей Ubuntu**. Для запуска Graph Node на Ubuntu может потребоваться несколько дополнительных пакетов. ```sh sudo apt-get install -y clang libpq-dev libssl-dev pkg-config @@ -58,7 +58,7 @@ pg_ctl -D .postgres -l logfile start createdb graph-node ``` -2. Clone [Graph Node](https://github.com/graphprotocol/graph-node) repo and build the source by running `cargo build` +2. Клонируйте репозиторий [Graph Node](https://github.com/graphprotocol/graph-node) и соберите исходный код, запустив `cargo build` 3. Now that all the dependencies are setup, start the Graph Node: @@ -71,35 +71,35 @@ cargo run -p graph-node --release -- \ ### Начало работы с Kubernetes -A complete Kubernetes example configuration can be found in the [indexer repository](https://github.com/graphprotocol/indexer/tree/main/k8s). +Полный пример конфигурации Kubernetes можно найти в [репозитории индексатора](https://github.com/graphprotocol/indexer/tree/main/k8s). ### Порты Во время работы Graph Node предоставляет следующие порты: -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | -| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | -| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | -| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for Subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for Subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | -> **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC endpoint. +> **Важно**. Будьте осторожны, открывая порты для общего доступа — **порты администрирования** должны оставаться закрытыми. Это касается конечных точек Graph Node JSON-RPC. ## Расширенная настройка Graph Node -На простейшем уровне Graph Node может работать с одним экземпляром Graph Node, одной базой данных PostgreSQL, нодой IPFS и сетевыми клиентами в соответствии с требованиями субграфов, подлежащих индексированию. +At its simplest, Graph Node can be operated with a single instance of Graph Node, a single PostgreSQL database, an IPFS node, and the network clients as required by the Subgraphs to be indexed. -This setup can be scaled horizontally, by adding multiple Graph Nodes, and multiple databases to support those Graph Nodes. Advanced users may want to take advantage of some of the horizontal scaling capabilities of Graph Node, as well as some of the more advanced configuration options, via the `config.toml` file and Graph Node's environment variables. +Эту настройку можно масштабировать горизонтально, добавляя несколько Graph Node и несколько баз данных для поддержки этих Graph Node. Опытные пользователи могут воспользоваться некоторыми возможностями горизонтального масштабирования Graph Node, а также некоторыми более продвинутыми параметрами конфигурации через файл `config.toml`l и переменные среды Graph Node. ### `config.toml` -A [TOML](https://toml.io/en/) configuration file can be used to set more complex configurations than those exposed in the CLI. The location of the file is passed with the --config command line switch. +Файл конфигурации [TOML](https://toml.io/en/) можно использовать для установки более сложных конфигураций, чем те, которые представлены в интерфейсе командной строки. Местоположение файла передается с помощью параметра командной строки --config. > При использовании файла конфигурации невозможно использовать параметры --postgres-url, --postgres-secondary-hosts и --postgres-host-weights. -A minimal `config.toml` file can be provided; the following file is equivalent to using the --postgres-url command line option: +Можно предоставить минимальный файл `config.toml`, следующий файл эквивалентен использованию опции командной строки --postgres-url: ```toml [store] @@ -110,17 +110,17 @@ connection="<.. postgres-url argument ..>" indexers = [ "<.. list of all indexing nodes ..>" ] ``` -Full documentation of `config.toml` can be found in the [Graph Node docs](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md). +Полную документацию по `config.toml` можно найти в [документации Graph Node](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md). #### Множественные Graph Node -Graph Node indexing can scale horizontally, running multiple instances of Graph Node to split indexing and querying across different nodes. This can be done simply by running Graph Nodes configured with a different `node_id` on startup (e.g. in the Docker Compose file), which can then be used in the `config.toml` file to specify [dedicated query nodes](#dedicated-query-nodes), [block ingestors](#dedicated-block-ingestion), and splitting subgraphs across nodes with [deployment rules](#deployment-rules). +Graph Node indexing can scale horizontally, running multiple instances of Graph Node to split indexing and querying across different nodes. This can be done simply by running Graph Nodes configured with a different `node_id` on startup (e.g. in the Docker Compose file), which can then be used in the `config.toml` file to specify [dedicated query nodes](#dedicated-query-nodes), [block ingestors](#dedicated-block-ingestion), and splitting Subgraphs across nodes with [deployment rules](#deployment-rules). > Обратите внимание, что несколько Graph Nodes могут быть настроены для использования одной и той же базы данных, которая сама по себе может масштабироваться по горизонтали с помощью сегментирования. #### Правила развертывания -Given multiple Graph Nodes, it is necessary to manage deployment of new subgraphs so that the same subgraph isn't being indexed by two different nodes, which would lead to collisions. This can be done by using deployment rules, which can also specify which `shard` a subgraph's data should be stored in, if database sharding is being used. Deployment rules can match on the subgraph name and the network that the deployment is indexing in order to make a decision. +Given multiple Graph Nodes, it is necessary to manage deployment of new Subgraphs so that the same Subgraph isn't being indexed by two different nodes, which would lead to collisions. This can be done by using deployment rules, which can also specify which `shard` a Subgraph's data should be stored in, if database sharding is being used. Deployment rules can match on the Subgraph name and the network that the deployment is indexing in order to make a decision. Пример настройки правил развертывания: @@ -138,7 +138,7 @@ indexers = [ "index_node_kovan_0" ] match = { network = [ "xdai", "poa-core" ] } indexers = [ "index_node_other_0" ] [[deployment.rule]] -# There's no 'match', so any subgraph matches +# There's no 'match', so any Subgraph matches shards = [ "sharda", "shardb" ] indexers = [ "index_node_community_0", @@ -150,7 +150,7 @@ indexers = [ ] ``` -Read more about deployment rules [here](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#controlling-deployment). +Подробную информацию о правилах развертывания можно найти [здесь](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#controlling-deployment). #### Выделенные ноды запросов @@ -167,19 +167,19 @@ query = "" В большинстве случаев одной базы данных Postgres достаточно для поддержки отдельной Graph Node. Когда отдельная Graph Node перерастает одну базу данных Postgres, можно разделить хранилище данных Graph Node между несколькими базами данных Postgres. Все базы данных вместе образуют хранилище отдельной Graph Node. Каждая отдельная база данных называется шардом (сегментом). -Shards can be used to split subgraph deployments across multiple databases, and can also be used to use replicas to spread query load across databases. This includes configuring the number of available database connections each `graph-node` should keep in its connection pool for each database, which becomes increasingly important as more subgraphs are being indexed. +Shards can be used to split Subgraph deployments across multiple databases, and can also be used to use replicas to spread query load across databases. This includes configuring the number of available database connections each `graph-node` should keep in its connection pool for each database, which becomes increasingly important as more Subgraphs are being indexed. Сегментирование становится полезным, когда Ваша существующая база данных не может справиться с нагрузкой, которую на нее возлагает Graph Node, и когда больше невозможно увеличить размер базы данных. -> Обычно лучше сделать одну базу данных максимально большой, прежде чем начинать с шардов (сегментов). Единственным исключением является случай, когда трафик запросов распределяется между подграфами очень неравномерно; в таких ситуациях может существенно помочь, если подграфы большого объема хранятся в одном сегменте, а все остальное — в другом, потому что такая настройка повышает вероятность того, что данные для подграфов большого объема останутся во внутреннем кеше базы данных и не будут заменяться данными, которые не очень нужны, из подграфов с небольшим объемом. +> It is generally better make a single database as big as possible, before starting with shards. One exception is where query traffic is split very unevenly between Subgraphs; in those situations it can help dramatically if the high-volume Subgraphs are kept in one shard and everything else in another because that setup makes it more likely that the data for the high-volume Subgraphs stays in the db-internal cache and doesn't get replaced by data that's not needed as much from low-volume Subgraphs. Что касается настройки соединений, начните с max_connections в postgresql.conf, установленного на 400 (или, может быть, даже на 200), и посмотрите на метрики store_connection_wait_time_ms и store_connection_checkout_count Prometheus. Длительное время ожидания (все, что превышает 5 мс) является признаком того, что доступных соединений слишком мало; большое время ожидания также будет вызвано тем, что база данных очень загружена (например, высокая загрузка ЦП). Однако, если в остальном база данных кажется стабильной, большое время ожидания указывает на необходимость увеличения количества подключений. В конфигурации количество подключений, которое может использовать каждая отдельная Graph Node, является верхним пределом, и Graph Node не будет держать соединения открытыми, если они ей не нужны. -Read more about store configuration [here](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-multiple-databases). +Подробную информацию о настройке хранилища можно найти [здесь](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-multiple-databases). #### Прием выделенного блока -If there are multiple nodes configured, it will be necessary to specify one node which is responsible for ingestion of new blocks, so that all configured index nodes aren't polling the chain head. This is done as part of the `chains` namespace, specifying the `node_id` to be used for block ingestion: +Если настроено несколько нод, необходимо выделить одну, которая будет отвечать за прием новых блоков, чтобы все сконфигурированные ноды индекса не опрашивали заголовок чейна. Это настраивается в рамках пространства имен `chains`, в котором `node_id`, используемый для приема блоков: ```toml [chains] @@ -188,13 +188,13 @@ ingestor = "block_ingestor_node" #### Поддержка нескольких сетей -The Graph Protocol is increasing the number of networks supported for indexing rewards, and there exist many subgraphs indexing unsupported networks which an indexer would like to process. The `config.toml` file allows for expressive and flexible configuration of: +The Graph Protocol is increasing the number of networks supported for indexing rewards, and there exist many Subgraphs indexing unsupported networks which an indexer would like to process. The `config.toml` file allows for expressive and flexible configuration of: - Несколько сетей - Несколько провайдеров на сеть (это может позволить разделить нагрузку между провайдерами, а также может позволить настроить полные ноды, а также архивные ноды, при этом Graph Node предпочитает более дешевых поставщиков, если позволяет данная рабочая нагрузка). - Дополнительные сведения о провайдере, такие как функции, аутентификация и тип провайдера (для экспериментальной поддержки Firehose) -The `[chains]` section controls the ethereum providers that graph-node connects to, and where blocks and other metadata for each chain are stored. The following example configures two chains, mainnet and kovan, where blocks for mainnet are stored in the vip shard and blocks for kovan are stored in the primary shard. The mainnet chain can use two different providers, whereas kovan only has one provider. +Раздел `[chains]` управляет провайдерами Ethereum, к которым подключается graph-node, и где хранятся блоки и другие метаданные для каждого чейна. В следующем примере настраиваются два чейна, mainnet и kovan, где блоки для mainnet хранятся в сегменте vip, а блоки для kovan — в основном сегменте. Чейн mainnet может использовать двух разных провайдеров, тогда как у kovan есть только один провайдер. ```toml [chains] @@ -210,50 +210,50 @@ shard = "primary" provider = [ { label = "kovan", url = "http://..", features = [] } ] ``` -Read more about provider configuration [here](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-ethereum-providers). +Подробную информацию о настройке провайдера можно найти [здесь](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-ethereum-providers). ### Переменные среды -Graph Node supports a range of environment variables which can enable features, or change Graph Node behaviour. These are documented [here](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md). +Graph Node поддерживает ряд переменных среды, которые могут включать функции или изменять поведение Graph Node. Они описаны [здесь] (https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md). ### Непрерывное развертывание Пользователи, использующие масштабируемую настройку индексирования с расширенной конфигурацией, могут получить преимущество от управления своими узлами Graph с помощью Kubernetes. -- The indexer repository has an [example Kubernetes reference](https://github.com/graphprotocol/indexer/tree/main/k8s) -- [Launchpad](https://docs.graphops.xyz/launchpad/intro) is a toolkit for running a Graph Protocol Indexer on Kubernetes maintained by GraphOps. It provides a set of Helm charts and a CLI to manage a Graph Node deployment. +- В репозитории индексатора имеется [пример ссылки на Kubernetes](https://github.com/graphprotocol/indexer/tree/main/k8s) +- [Launchpad](https://docs.graphops.xyz/launchpad/intro) – это набор инструментов для запуска Индексатора Graph Protocol в Kubernetes, поддерживаемый GraphOps. Он предоставляет набор диаграмм Helm и интерфейс командной строки для управления развертыванием Graph Node. ### Управление Graph Node -При наличии работающей Graph Node (или Graph Nodes!), задача состоит в том, чтобы управлять развернутыми подграфами на этих нодах. Graph Node предлагает ряд инструментов для управления подграфами. +Given a running Graph Node (or Graph Nodes!), the challenge is then to manage deployed Subgraphs across those nodes. Graph Node surfaces a range of tools to help with managing Subgraphs. #### Логирование (ведение журналов) -Graph Node's logs can provide useful information for debugging and optimisation of Graph Node and specific subgraphs. Graph Node supports different log levels via the `GRAPH_LOG` environment variable, with the following levels: error, warn, info, debug or trace. +Graph Node's logs can provide useful information for debugging and optimisation of Graph Node and specific Subgraphs. Graph Node supports different log levels via the `GRAPH_LOG` environment variable, with the following levels: error, warn, info, debug or trace. -In addition setting `GRAPH_LOG_QUERY_TIMING` to `gql` provides more details about how GraphQL queries are running (though this will generate a large volume of logs). +Кроме того, установка для GRAPH_LOG_QUERY_TIMING`значения`gql\` предоставляет дополнительные сведения о том, как выполняются запросы GraphQL (хотя это приводит к созданию большого объема логов). -#### Monitoring & alerting +#### Мониторинг и оповещения Graph Node предоставляет метрики через конечную точку Prometheus на порту 8040 по умолчанию. Затем можно использовать Grafana для визуализации этих метрик. -The indexer repository provides an [example Grafana configuration](https://github.com/graphprotocol/indexer/blob/main/k8s/base/grafana.yaml). +В репозитории индексатора имеется [пример конфигурации Grafana] (https://github.com/graphprotocol/indexer/blob/main/k8s/base/grafana.yaml). #### Graphman -`graphman` is a maintenance tool for Graph Node, helping with diagnosis and resolution of different day-to-day and exceptional tasks. +`graphman` – это инструмент обслуживания Graph Node, помогающий диагностировать и решать различные повседневные и исключительные задачи. -The graphman command is included in the official containers, and you can docker exec into your graph-node container to run it. It requires a `config.toml` file. +Команда graphman включена в официальные контейнеры, и Вы можете выполнить docker exec в контейнере graph-node, чтобы запустить ее. Для этого требуется файл `config.toml`. -Full documentation of `graphman` commands is available in the Graph Node repository. See [/docs/graphman.md] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) in the Graph Node `/docs` +Полная документация по командам `graphman` доступна в репозитории Graph Node. См. [/docs/graphman.md] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) в Graph Node `/docs` -### Работа с подграфами +### Working with Subgraphs #### API статуса индексирования -Доступный по умолчанию на порту 8030/graphql, API статуса индексирования предоставляет ряд методов для проверки статуса индексирования для различных подграфов, проверки доказательств индексирования, проверки функций подграфов и многого другого. +Available on port 8030/graphql by default, the indexing status API exposes a range of methods for checking indexing status for different Subgraphs, checking proofs of indexing, inspecting Subgraph features and more. -The full schema is available [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). +Полная схема доступна [здесь](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). #### Производительность индексирования @@ -263,12 +263,12 @@ The full schema is available [here](https://github.com/graphprotocol/graph-node/ - Обработка событий по порядку с помощью соответствующих обработчиков (это может включать вызов чейна для состояния и выборку данных из хранилища) - Запись полученных данных в хранилище -Эти этапы конвейерные (т.е. могут выполняться параллельно), но они зависят друг от друга. Там, где подграфы индексируются медленно, основная причина будет зависеть от конкретного подграфа. +These stages are pipelined (i.e. they can be executed in parallel), but they are dependent on one another. Where Subgraphs are slow to index, the underlying cause will depend on the specific Subgraph. Распространенные причины низкой скорости индексации: -- Time taken to find relevant events from the chain (call handlers in particular can be slow, given the reliance on `trace_filter`) -- Making large numbers of `eth_calls` as part of handlers +- Время, затрачиваемое на поиск соответствующих событий в чейне (в частности, обработчики вызовов могут работать медленно, учитывая зависимость от `trace_filter`) +- Создание большого количества `eth_calls` в составе обработчиков - Большое количество операций с хранилищем во время выполнения - Большой объем данных для сохранения в хранилище - Большое количество событий для обработки @@ -276,35 +276,35 @@ The full schema is available [here](https://github.com/graphprotocol/graph-node/ - Сам провайдер отстает от головного чейна - Задержка получения новых поступлений от провайдера в головном чейне -Метрики индексации подграфов могут помочь диагностировать основную причину замедления индексации. В некоторых случаях проблема связана с самим подграфом, но в других случаях усовершенствованные сетевые провайдеры, снижение конкуренции за базу данных и другие улучшения конфигурации могут заметно повысить производительность индексирования. +Subgraph indexing metrics can help diagnose the root cause of indexing slowness. In some cases, the problem lies with the Subgraph itself, but in others, improved network providers, reduced database contention and other configuration improvements can markedly improve indexing performance. -#### Повреждённые подграфы +#### Failed Subgraphs -Во время индексации подграфов может произойти сбой, если они столкнутся с неожиданными данными, какой-то компонент не будет работать должным образом или если в обработчиках событий или конфигурации появится ошибка. Есть два основных типа отказа: +During indexing Subgraphs might fail, if they encounter data that is unexpected, some component not working as expected, or if there is some bug in the event handlers or configuration. There are two general types of failure: - Детерминированные сбои: это сбои, которые не будут устранены при повторных попытках - Недетерминированные сбои: они могут быть связаны с проблемами с провайдером или какой-либо неожиданной ошибкой Graph Node. Когда происходит недетерминированный сбой, Graph Node повторяет попытки обработчиков сбоя, со временем отказываясь от них. -В некоторых случаях сбой может быть устранен индексатором (например, если ошибка вызвана отсутствием нужного поставщика, добавление необходимого поставщика позволит продолжить индексирование). Однако в других случаях требуется изменить код подграфа. +In some cases a failure might be resolvable by the indexer (for example if the error is a result of not having the right kind of provider, adding the required provider will allow indexing to continue). However in others, a change in the Subgraph code is required. -> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-deterministic failures are not, as the subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. +> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-deterministic failures are not, as the Subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the Subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. #### Кэш блокировки и вызова -Graph Node caches certain data in the store in order to save refetching from the provider. Blocks are cached, as are the results of `eth_calls` (the latter being cached as of a specific block). This caching can dramatically increase indexing speed during "resyncing" of a slightly altered subgraph. +Graph Node caches certain data in the store in order to save refetching from the provider. Blocks are cached, as are the results of `eth_calls` (the latter being cached as of a specific block). This caching can dramatically increase indexing speed during "resyncing" of a slightly altered Subgraph. -However, in some instances, if an Ethereum node has provided incorrect data for some period, that can make its way into the cache, leading to incorrect data or failed subgraphs. In this case indexers can use `graphman` to clear the poisoned cache, and then rewind the affected subgraphs, which will then fetch fresh data from the (hopefully) healthy provider. +However, in some instances, if an Ethereum node has provided incorrect data for some period, that can make its way into the cache, leading to incorrect data or failed Subgraphs. In this case indexers can use `graphman` to clear the poisoned cache, and then rewind the affected Subgraphs, which will then fetch fresh data from the (hopefully) healthy provider. Если есть подозрение на несогласованность кэша блоков, например, событие отсутствия квитанции tx: -1. `graphman chain list` to find the chain name. -2. `graphman chain check-blocks by-number ` will check if the cached block matches the provider, and deletes the block from the cache if it doesn’t. - 1. If there is a difference, it may be safer to truncate the whole cache with `graphman chain truncate `. +1. `graphman chain list`, чтобы найти название чейна. +2. `graphman chain check-blocks by-number ` проверит, соответствует ли кэшированный блок провайдеру, и удалит блок из кэша, если это не так. + 1. Если есть разница, может быть безопаснее усечь весь кеш с помощью `graphman chain truncate `. 2. Если блок соответствует провайдеру, то проблема может быть отлажена непосредственно провайдером. #### Запрос проблем и ошибок -После индексации подграфа индексаторы могут рассчитывать на обслуживание запросов через выделенную конечную точку запроса подграфа. Если индексатор планирует обслуживать значительный объем запросов, рекомендуется выделенная нода запросов, а в случае очень больших объемов запросов индексаторы могут настроить сегменты копий так, чтобы запросы не влияли на процесс индексирования. +Once a Subgraph has been indexed, indexers can expect to serve queries via the Subgraph's dedicated query endpoint. If the indexer is hoping to serve significant query volume, a dedicated query node is recommended, and in case of very high query volumes, indexers may want to configure replica shards so that queries don't impact the indexing process. Однако, даже с выделенной нодой запросов и копиями выполнение некоторых запросов может занять много времени, а в некоторых случаях увеличить использование памяти и негативно повлиять на время выполнения запросов другими пользователями. @@ -312,15 +312,15 @@ However, in some instances, if an Ethereum node has provided incorrect data for ##### Кэширование запросов -Graph Node caches GraphQL queries by default, which can significantly reduce database load. This can be further configured with the `GRAPH_QUERY_CACHE_BLOCKS` and `GRAPH_QUERY_CACHE_MAX_MEM` settings - read more [here](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md#graphql-caching). +Graph Node по умолчанию кэширует запросы GraphQL, что может значительно снизить нагрузку на базу данных. Это можно дополнительно настроить с помощью параметров `GRAPH_QUERY_CACHE_BLOCKS` и `GRAPH_QUERY_CACHE_MAX_MEM` — подробнее читайте [здесь](https://github.com/graphprotocol/graph-node/blob/master. /docs/environment-variables.md#graphql-caching). ##### Анализ запросов -Проблемные запросы чаще всего выявляются одним из двух способов. В некоторых случаях пользователи сами сообщают, что данный запрос выполняется медленно. В этом случае задача состоит в том, чтобы диагностировать причину замедленности — является ли это общей проблемой или специфичной для этого подграфа или запроса. А затем, конечно же, решить ее, если это возможно. +Problematic queries most often surface in one of two ways. In some cases, users themselves report that a given query is slow. In that case the challenge is to diagnose the reason for the slowness - whether it is a general issue, or specific to that Subgraph or query. And then of course to resolve it, if possible. В других случаях триггером может быть высокий уровень использования памяти на ноде запроса, и в этом случае сначала нужно определить запрос, вызвавший проблему. -Indexers can use [qlog](https://github.com/graphprotocol/qlog/) to process and summarize Graph Node's query logs. `GRAPH_LOG_QUERY_TIMING` can also be enabled to help identify and debug slow queries. +Индексаторы могут использовать [qlog](https://github.com/graphprotocol/qlog/) для обработки и суммирования логов запросов Graph Node. Также можно включить `GRAPH_LOG_QUERY_TIMING` для выявления и отладки медленных запросов. При медленном запросе у индексаторов есть несколько вариантов. Разумеется, они могут изменить свою модель затрат, чтобы значительно увеличить стоимость отправки проблемного запроса. Это может привести к снижению частоты этого запроса. Однако это часто не устраняет основной причины проблемы. @@ -328,18 +328,18 @@ Indexers can use [qlog](https://github.com/graphprotocol/qlog/) to process and s Таблицы базы данных, в которых хранятся объекты, как правило, бывают двух видов: «подобные транзакциям», когда объекты, однажды созданные, никогда не обновляются, т. е. они хранят что-то вроде списка финансовых транзакций и «подобные учетной записи», где объекты обновляются очень часто, т. е. они хранят что-то вроде финансовых счетов, которые изменяются каждый раз при записи транзакции. Таблицы, подобные учетным записям, характеризуются тем, что они содержат большое количество версий объектов, но относительно мало отдельных объектов. Часто в таких таблицах количество отдельных объектов составляет 1% от общего количества строк (версий объектов) -For account-like tables, `graph-node` can generate queries that take advantage of details of how Postgres ends up storing data with such a high rate of change, namely that all of the versions for recent blocks are in a small subsection of the overall storage for such a table. +Для таблиц, подобных учетным записям, `graph-node` может генерировать запросы, в которых используются детали того, как Postgres в конечном итоге сохраняет данные с такой высокой скоростью изменения, а именно, что все версии последних блоков находятся в небольшом подразделе общего хранилища для такой таблицы. -The command `graphman stats show shows, for each entity type/table in a deployment, how many distinct entities, and how many entity versions each table contains. That data is based on Postgres-internal estimates, and is therefore necessarily imprecise, and can be off by an order of magnitude. A `-1` in the `entities` column means that Postgres believes that all rows contain a distinct entity. +Команда `graphman stats show ` показывает для каждого типа/таблицы объектов в развертывании, сколько различных объектов и сколько версий объектов содержит каждая таблица. Эти данные основаны на внутренних оценках Postgres и, следовательно, неточны и могут отличаться на порядок. `-1` в столбце `entities` означает, что Postgres считает, что все строки содержат отдельный объект. -In general, tables where the number of distinct entities are less than 1% of the total number of rows/entity versions are good candidates for the account-like optimization. When the output of `graphman stats show` indicates that a table might benefit from this optimization, running `graphman stats show
` will perform a full count of the table - that can be slow, but gives a precise measure of the ratio of distinct entities to overall entity versions. +В общем, таблицы, в которых количество отдельных объектов составляет менее 1 % от общего количества версий строк/объектов, являются хорошими кандидатами на оптимизацию по аналогии с учетными записями. Если выходные данные `graphman stats show` указывают на то, что эта оптимизация может принести пользу таблице, запуск `graphman stats show
` произведёт полный расчет таблицы. Этот процесс может быть медленным, но обеспечит точную степень соотношения отдельных объектов к общему количеству версий объекта. -Once a table has been determined to be account-like, running `graphman stats account-like .
` will turn on the account-like optimization for queries against that table. The optimization can be turned off again with `graphman stats account-like --clear .
` It takes up to 5 minutes for query nodes to notice that the optimization has been turned on or off. After turning the optimization on, it is necessary to verify that the change does not in fact make queries slower for that table. If you have configured Grafana to monitor Postgres, slow queries would show up in `pg_stat_activity`in large numbers, taking several seconds. In that case, the optimization needs to be turned off again. +Как только таблица будет определена как учетная запись, запуск `graphman stats account-like .
`, включит оптимизацию, подобную учетной записи, для запросов к этой таблице. Оптимизацию можно снова отключить с помощью `graphman stats account-like --clear .
`. Нодам запроса требуется до 5 минут, чтобы заметить, что оптимизация включена или выключена. После включения оптимизации необходимо убедиться, что изменение фактически не приводит к замедлению запросов к этой таблице. Если Вы настроили Grafana для мониторинга Postgres, медленные запросы будут отображаться в `pg_stat_activity` в больших количествах, это займет несколько секунд. В этом случае оптимизацию необходимо снова отключить. -For Uniswap-like subgraphs, the `pair` and `token` tables are prime candidates for this optimization, and can have a dramatic effect on database load. +For Uniswap-like Subgraphs, the `pair` and `token` tables are prime candidates for this optimization, and can have a dramatic effect on database load. -#### Удаление подграфов +#### Removing Subgraphs > Это новый функционал, который будет доступен в Graph Node 0.29.x -At some point an indexer might want to remove a given subgraph. This can be easily done via `graphman drop`, which deletes a deployment and all it's indexed data. The deployment can be specified as either a subgraph name, an IPFS hash `Qm..`, or the database namespace `sgdNNN`. Further documentation is available [here](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). +At some point an indexer might want to remove a given Subgraph. This can be easily done via `graphman drop`, which deletes a deployment and all it's indexed data. The deployment can be specified as either a Subgraph name, an IPFS hash `Qm..`, or the database namespace `sgdNNN`. Further documentation is available [here](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). From 9e6291704a6987a0d335e312212f96160eb68924 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:09:56 -0500 Subject: [PATCH 0109/1789] New translations graph-node.mdx (Swedish) --- .../pages/sv/indexing/tooling/graph-node.mdx | 72 +++++++++---------- 1 file changed, 36 insertions(+), 36 deletions(-) diff --git a/website/src/pages/sv/indexing/tooling/graph-node.mdx b/website/src/pages/sv/indexing/tooling/graph-node.mdx index e53a127b3fcd..0e6241f265fc 100644 --- a/website/src/pages/sv/indexing/tooling/graph-node.mdx +++ b/website/src/pages/sv/indexing/tooling/graph-node.mdx @@ -2,31 +2,31 @@ title: Graf Node --- -Graf Node är komponenten som indexerar subgraffar och gör den resulterande datan tillgänglig för förfrågan via en GraphQL API. Som sådan är den central för indexeringsstacken, och korrekt drift av Graph Node är avgörande för att driva en framgångsrik indexerare. +Graph Node is the component which indexes Subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. This provides a contextual overview of Graph Node, and some of the more advanced options available to indexers. Detailed documentation and instructions can be found in the [Graph Node repository](https://github.com/graphprotocol/graph-node). ## Graf Node -[Graph Node](https://github.com/graphprotocol/graph-node) is the reference implementation for indexing Subgraphs on The Graph Network, connecting to blockchain clients, indexing subgraphs and making indexed data available to query. +[Graph Node](https://github.com/graphprotocol/graph-node) is the reference implementation for indexing Subgraphs on The Graph Network, connecting to blockchain clients, indexing Subgraphs and making indexed data available to query. Graph Node (and the whole indexer stack) can be run on bare metal, or in a cloud environment. This flexibility of the central indexing component is crucial to the robustness of The Graph Protocol. Similarly, Graph Node can be [built from source](https://github.com/graphprotocol/graph-node), or indexers can use one of the [provided Docker Images](https://hub.docker.com/r/graphprotocol/graph-node). ### PostgreSQL-databas -Huvudlagret för Graph Node, här lagras subgrafdata, liksom metadata om subgraffar och nätverksdata som är oberoende av subgraffar, som blockcache och eth_call-cache. +The main store for the Graph Node, this is where Subgraph data is stored, as well as metadata about Subgraphs, and Subgraph-agnostic network data such as the block cache, and eth_call cache. ### Nätverkskunder För att indexera ett nätverk behöver Graf Node åtkomst till en nätverksklient via ett EVM-kompatibelt JSON-RPC API. Denna RPC kan ansluta till en enda klient eller så kan det vara en mer komplex konfiguration som lastbalanserar över flera. -While some subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). +While some Subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically Subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and Subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). **Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an Indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). ### IPFS-noder -Metadata för distribution av subgraffar lagras på IPFS-nätverket. Graf Node har främst åtkomst till IPFS-noden under distributionen av subgraffar för att hämta subgrafens manifest och alla länkade filer. Nätverksindexerare behöver inte värd sin egen IPFS-nod. En IPFS-nod för nätverket är värd på https://ipfs.network.thegraph.com. +Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during Subgraph deployment to fetch the Subgraph manifest and all linked files. Network indexers do not need to host their own IPFS node. An IPFS node for the network is hosted at https://ipfs.network.thegraph.com. ### Prometheus server för mätvärden @@ -77,19 +77,19 @@ A complete Kubernetes example configuration can be found in the [indexer reposit När Graph Node är igång exponerar den följande portar: -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | -| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | -| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | -| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for Subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for Subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | > **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC endpoint. ## Avancerad konfiguration av Graf Node -På sitt enklaste sätt kan Graph Node användas med en enda instans av Graph Node, en enda PostgreSQL-databas, en IPFS-nod och nätverksklienter som krävs av de subgrafer som ska indexeras. +At its simplest, Graph Node can be operated with a single instance of Graph Node, a single PostgreSQL database, an IPFS node, and the network clients as required by the Subgraphs to be indexed. This setup can be scaled horizontally, by adding multiple Graph Nodes, and multiple databases to support those Graph Nodes. Advanced users may want to take advantage of some of the horizontal scaling capabilities of Graph Node, as well as some of the more advanced configuration options, via the `config.toml` file and Graph Node's environment variables. @@ -114,13 +114,13 @@ Full documentation of `config.toml` can be found in the [Graph Node docs](https: #### Flera Grafnoder -Graph Node indexing can scale horizontally, running multiple instances of Graph Node to split indexing and querying across different nodes. This can be done simply by running Graph Nodes configured with a different `node_id` on startup (e.g. in the Docker Compose file), which can then be used in the `config.toml` file to specify [dedicated query nodes](#dedicated-query-nodes), [block ingestors](#dedicated-block-ingestion), and splitting subgraphs across nodes with [deployment rules](#deployment-rules). +Graph Node indexing can scale horizontally, running multiple instances of Graph Node to split indexing and querying across different nodes. This can be done simply by running Graph Nodes configured with a different `node_id` on startup (e.g. in the Docker Compose file), which can then be used in the `config.toml` file to specify [dedicated query nodes](#dedicated-query-nodes), [block ingestors](#dedicated-block-ingestion), and splitting Subgraphs across nodes with [deployment rules](#deployment-rules). > Observera att flera Graph Nodes alla kan konfigureras att använda samma databas, som i sig kan skalas horisontellt via sharding. #### Regler för utplacering -Given multiple Graph Nodes, it is necessary to manage deployment of new subgraphs so that the same subgraph isn't being indexed by two different nodes, which would lead to collisions. This can be done by using deployment rules, which can also specify which `shard` a subgraph's data should be stored in, if database sharding is being used. Deployment rules can match on the subgraph name and the network that the deployment is indexing in order to make a decision. +Given multiple Graph Nodes, it is necessary to manage deployment of new Subgraphs so that the same Subgraph isn't being indexed by two different nodes, which would lead to collisions. This can be done by using deployment rules, which can also specify which `shard` a Subgraph's data should be stored in, if database sharding is being used. Deployment rules can match on the Subgraph name and the network that the deployment is indexing in order to make a decision. Exempel på konfiguration av deployeringsregler: @@ -138,7 +138,7 @@ indexers = [ "index_node_kovan_0" ] match = { network = [ "xdai", "poa-core" ] } indexers = [ "index_node_other_0" ] [[deployment.rule]] -# There's no 'match', so any subgraph matches +# There's no 'match', so any Subgraph matches shards = [ "sharda", "shardb" ] indexers = [ "index_node_community_0", @@ -167,11 +167,11 @@ Alla noder vars --node-id matchar reguljärt uttryck kommer att konfigureras fö För de flesta användningsfall är en enda Postgres-databas tillräcklig för att stödja en graph-node-instans. När en graph-node-instans växer utöver en enda Postgres-databas är det möjligt att dela upp lagringen av graph-node-data över flera Postgres-databaser. Alla databaser tillsammans bildar lagringsutrymmet för graph-node-instansen. Varje individuell databas kallas en shard. -Shards can be used to split subgraph deployments across multiple databases, and can also be used to use replicas to spread query load across databases. This includes configuring the number of available database connections each `graph-node` should keep in its connection pool for each database, which becomes increasingly important as more subgraphs are being indexed. +Shards can be used to split Subgraph deployments across multiple databases, and can also be used to use replicas to spread query load across databases. This includes configuring the number of available database connections each `graph-node` should keep in its connection pool for each database, which becomes increasingly important as more Subgraphs are being indexed. Sharding blir användbart när din befintliga databas inte kan hålla jämna steg med belastningen som Graph Node sätter på den och när det inte längre är möjligt att öka databasens storlek. -> Det är generellt sett bättre att göra en enda databas så stor som möjligt innan man börjar med shards. Ett undantag är när frågetrafiken är mycket ojämnt fördelad mellan subgrafer; i dessa situationer kan det hjälpa dramatiskt om högvolymsubgraferna hålls i en shard och allt annat i en annan, eftersom den konfigurationen gör det mer troligt att data för högvolymsubgraferna stannar i databasens interna cache och inte ersätts av data som inte behövs lika mycket från lågvolymsubgrafer. +> It is generally better make a single database as big as possible, before starting with shards. One exception is where query traffic is split very unevenly between Subgraphs; in those situations it can help dramatically if the high-volume Subgraphs are kept in one shard and everything else in another because that setup makes it more likely that the data for the high-volume Subgraphs stays in the db-internal cache and doesn't get replaced by data that's not needed as much from low-volume Subgraphs. När det gäller att konfigurera anslutningar, börja med max_connections i postgresql.conf som är inställt på 400 (eller kanske till och med 200) och titta på Prometheus-metrarna store_connection_wait_time_ms och store_connection_checkout_count. Märkbara väntetider (något över 5 ms) är en indikation på att det finns för få anslutningar tillgängliga; höga väntetider beror också på att databasen är mycket upptagen (som hög CPU-belastning). Om databasen verkar annars stabil, indikerar höga väntetider att antalet anslutningar behöver ökas. I konfigurationen är det en övre gräns för hur många anslutningar varje graph-node-instans kan använda, och Graph Node kommer inte att hålla anslutningar öppna om det inte behöver dem. @@ -188,7 +188,7 @@ ingestor = "block_ingestor_node" #### Stöd för flera nätverk -The Graph Protocol is increasing the number of networks supported for indexing rewards, and there exist many subgraphs indexing unsupported networks which an indexer would like to process. The `config.toml` file allows for expressive and flexible configuration of: +The Graph Protocol is increasing the number of networks supported for indexing rewards, and there exist many Subgraphs indexing unsupported networks which an indexer would like to process. The `config.toml` file allows for expressive and flexible configuration of: - Flera nätverk - Flera leverantörer per nätverk (detta kan göra det möjligt att dela upp belastningen mellan leverantörer, och kan också möjliggöra konfiguration av fullständiga noder samt arkivnoder, där Graph Node föredrar billigare leverantörer om en viss arbetsbelastning tillåter det). @@ -225,11 +225,11 @@ Användare som driver en skalad indexering med avancerad konfiguration kan dra n ### Hantera Graf Noder -Med en körande Graph Node (eller Graph Nodes!) är utmaningen sedan att hantera distribuerade subgrafer över dessa noder. Graph Node erbjuder en rad verktyg för att hjälpa till med hanteringen av subgrafer. +Given a running Graph Node (or Graph Nodes!), the challenge is then to manage deployed Subgraphs across those nodes. Graph Node surfaces a range of tools to help with managing Subgraphs. #### Loggning -Graph Node's logs can provide useful information for debugging and optimisation of Graph Node and specific subgraphs. Graph Node supports different log levels via the `GRAPH_LOG` environment variable, with the following levels: error, warn, info, debug or trace. +Graph Node's logs can provide useful information for debugging and optimisation of Graph Node and specific Subgraphs. Graph Node supports different log levels via the `GRAPH_LOG` environment variable, with the following levels: error, warn, info, debug or trace. In addition setting `GRAPH_LOG_QUERY_TIMING` to `gql` provides more details about how GraphQL queries are running (though this will generate a large volume of logs). @@ -247,11 +247,11 @@ The graphman command is included in the official containers, and you can docker Full documentation of `graphman` commands is available in the Graph Node repository. See [/docs/graphman.md] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) in the Graph Node `/docs` -### Arbeta med undergrafer +### Working with Subgraphs #### Indexerings status API -Tillgänglig som standard på port 8030/graphql, exponerar indexeringstatus-API: en en rad metoder för att kontrollera indexeringstatus för olika subgrafer, kontrollera bevis för indexering, inspektera subgrafegenskaper och mer. +Available on port 8030/graphql by default, the indexing status API exposes a range of methods for checking indexing status for different Subgraphs, checking proofs of indexing, inspecting Subgraph features and more. The full schema is available [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). @@ -263,7 +263,7 @@ Det finns tre separata delar av indexeringsprocessen: - Bearbeta händelser i rätt ordning med lämpliga hanterare (detta kan innebära att kedjan anropas för status och att data hämtas från lagret) - Skriva de resulterande data till butiken -Dessa stadier är pipelinerade (det vill säga de kan utföras parallellt), men de är beroende av varandra. När subgrafer är långsamma att indexera beror orsaken på den specifika subgrafgen. +These stages are pipelined (i.e. they can be executed in parallel), but they are dependent on one another. Where Subgraphs are slow to index, the underlying cause will depend on the specific Subgraph. Vanliga orsaker till indexeringslångsamhet: @@ -276,24 +276,24 @@ Vanliga orsaker till indexeringslångsamhet: - Leverantören själv faller bakom kedjehuvudet - Långsamhet vid hämtning av nya kvitton från leverantören vid kedjehuvudet -Subgrafindexeringsmetriker kan hjälpa till att diagnostisera grunden till indexeringens långsamhet. I vissa fall ligger problemet med subgrafgenen själv, men i andra fall kan förbättrade nätverksleverantörer, minskad databaskonflikt och andra konfigurationsförbättringar markant förbättra indexeringens prestanda. +Subgraph indexing metrics can help diagnose the root cause of indexing slowness. In some cases, the problem lies with the Subgraph itself, but in others, improved network providers, reduced database contention and other configuration improvements can markedly improve indexing performance. -#### Undergrafer som misslyckats +#### Failed Subgraphs -Under indexering kan subgrafer misslyckas om de stöter på data som är oväntad, om någon komponent inte fungerar som förväntat eller om det finns något fel i händelsehanterare eller konfiguration. Det finns två allmänna typer av misslyckande: +During indexing Subgraphs might fail, if they encounter data that is unexpected, some component not working as expected, or if there is some bug in the event handlers or configuration. There are two general types of failure: - Deterministiska fel: detta är fel som inte kommer att lösas med retries - Icke-deterministiska fel: dessa kan bero på problem med leverantören eller något oväntat Graph Node-fel. När ett icke-deterministiskt fel inträffar kommer Graph Node att försöka igen med de felande hanterarna och backa över tid. -I vissa fall kan ett misslyckande vara lösbart av indexören (till exempel om felet beror på att det inte finns rätt typ av leverantör, kommer att tillåta indexering att fortsätta om den nödvändiga leverantören läggs till). Men i andra fall krävs en ändring i subgrafkoden. +In some cases a failure might be resolvable by the indexer (for example if the error is a result of not having the right kind of provider, adding the required provider will allow indexing to continue). However in others, a change in the Subgraph code is required. -> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-deterministic failures are not, as the subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. +> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-deterministic failures are not, as the Subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the Subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. #### Blockera och anropa cache -Graph Node caches certain data in the store in order to save refetching from the provider. Blocks are cached, as are the results of `eth_calls` (the latter being cached as of a specific block). This caching can dramatically increase indexing speed during "resyncing" of a slightly altered subgraph. +Graph Node caches certain data in the store in order to save refetching from the provider. Blocks are cached, as are the results of `eth_calls` (the latter being cached as of a specific block). This caching can dramatically increase indexing speed during "resyncing" of a slightly altered Subgraph. -However, in some instances, if an Ethereum node has provided incorrect data for some period, that can make its way into the cache, leading to incorrect data or failed subgraphs. In this case indexers can use `graphman` to clear the poisoned cache, and then rewind the affected subgraphs, which will then fetch fresh data from the (hopefully) healthy provider. +However, in some instances, if an Ethereum node has provided incorrect data for some period, that can make its way into the cache, leading to incorrect data or failed Subgraphs. In this case indexers can use `graphman` to clear the poisoned cache, and then rewind the affected Subgraphs, which will then fetch fresh data from the (hopefully) healthy provider. Om en blockcache-inkonsekvens misstänks, som att en tx-kvitto saknar händelse: @@ -304,7 +304,7 @@ Om en blockcache-inkonsekvens misstänks, som att en tx-kvitto saknar händelse: #### Fråga frågor och fel -När en subgraf har indexeras kan indexörer förvänta sig att servera frågor via subgrafens dedikerade frågendpunkt. Om indexören hoppas på att betjäna en betydande mängd frågor rekommenderas en dedikerad frågenod, och vid mycket höga frågevolymer kan indexörer vilja konfigurera replikskivor så att frågor inte påverkar indexeringsprocessen. +Once a Subgraph has been indexed, indexers can expect to serve queries via the Subgraph's dedicated query endpoint. If the indexer is hoping to serve significant query volume, a dedicated query node is recommended, and in case of very high query volumes, indexers may want to configure replica shards so that queries don't impact the indexing process. Men även med en dedikerad frågenod och repliker kan vissa frågor ta lång tid att utföra, och i vissa fall öka minnesanvändningen och negativt påverka frågetiden för andra användare. @@ -316,7 +316,7 @@ Graph Node caches GraphQL queries by default, which can significantly reduce dat ##### Analyserar frågor -Problematiska frågor dyker oftast upp på ett av två sätt. I vissa fall rapporterar användare själva att en viss fråga är långsam. I det fallet är utmaningen att diagnostisera orsaken till långsamheten - om det är ett generellt problem eller specifikt för den subgraf eller fråga. Och naturligtvis att lösa det om det är möjligt. +Problematic queries most often surface in one of two ways. In some cases, users themselves report that a given query is slow. In that case the challenge is to diagnose the reason for the slowness - whether it is a general issue, or specific to that Subgraph or query. And then of course to resolve it, if possible. I andra fall kan utlösaren vara hög minnesanvändning på en frågenod, i vilket fall utmaningen först är att identifiera frågan som orsakar problemet. @@ -336,10 +336,10 @@ In general, tables where the number of distinct entities are less than 1% of the Once a table has been determined to be account-like, running `graphman stats account-like .
` will turn on the account-like optimization for queries against that table. The optimization can be turned off again with `graphman stats account-like --clear .
` It takes up to 5 minutes for query nodes to notice that the optimization has been turned on or off. After turning the optimization on, it is necessary to verify that the change does not in fact make queries slower for that table. If you have configured Grafana to monitor Postgres, slow queries would show up in `pg_stat_activity`in large numbers, taking several seconds. In that case, the optimization needs to be turned off again. -For Uniswap-like subgraphs, the `pair` and `token` tables are prime candidates for this optimization, and can have a dramatic effect on database load. +For Uniswap-like Subgraphs, the `pair` and `token` tables are prime candidates for this optimization, and can have a dramatic effect on database load. -#### Ta bort undergrafer +#### Removing Subgraphs > Detta är ny funktionalitet, som kommer att vara tillgänglig i Graf Node 0.29.x -At some point an indexer might want to remove a given subgraph. This can be easily done via `graphman drop`, which deletes a deployment and all it's indexed data. The deployment can be specified as either a subgraph name, an IPFS hash `Qm..`, or the database namespace `sgdNNN`. Further documentation is available [here](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). +At some point an indexer might want to remove a given Subgraph. This can be easily done via `graphman drop`, which deletes a deployment and all it's indexed data. The deployment can be specified as either a Subgraph name, an IPFS hash `Qm..`, or the database namespace `sgdNNN`. Further documentation is available [here](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). From e6fe3f0fa9cc130db6715247e7e10ddb969764e1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:09:57 -0500 Subject: [PATCH 0110/1789] New translations graph-node.mdx (Turkish) --- .../pages/tr/indexing/tooling/graph-node.mdx | 72 +++++++++---------- 1 file changed, 36 insertions(+), 36 deletions(-) diff --git a/website/src/pages/tr/indexing/tooling/graph-node.mdx b/website/src/pages/tr/indexing/tooling/graph-node.mdx index 62f5fff90afc..7bf7cd3bc3be 100644 --- a/website/src/pages/tr/indexing/tooling/graph-node.mdx +++ b/website/src/pages/tr/indexing/tooling/graph-node.mdx @@ -2,31 +2,31 @@ title: Graph Node --- -Graph Düğümü, subgraphları indeksleyen ve sonuçta oluşan verileri GraphQL API aracılığıyla sorgulanabilir hale getiren bileşendir. Bu nedenle indeksleyici yığınının merkezi bir parçasıdır ve başarılı bir indeksleyici çalıştırmak için Graph Düğümü'nün doğru şekilde çalışması çok önemlidir. +Graph Node is the component which indexes Subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. This provides a contextual overview of Graph Node, and some of the more advanced options available to indexers. Detailed documentation and instructions can be found in the [Graph Node repository](https://github.com/graphprotocol/graph-node). ## Graph Node -[Graph Node](https://github.com/graphprotocol/graph-node) is the reference implementation for indexing Subgraphs on The Graph Network, connecting to blockchain clients, indexing subgraphs and making indexed data available to query. +[Graph Node](https://github.com/graphprotocol/graph-node) is the reference implementation for indexing Subgraphs on The Graph Network, connecting to blockchain clients, indexing Subgraphs and making indexed data available to query. Graph Node (and the whole indexer stack) can be run on bare metal, or in a cloud environment. This flexibility of the central indexing component is crucial to the robustness of The Graph Protocol. Similarly, Graph Node can be [built from source](https://github.com/graphprotocol/graph-node), or indexers can use one of the [provided Docker Images](https://hub.docker.com/r/graphprotocol/graph-node). ### PostgreSQL veritabanı -Graph Düğümü'nün ana deposu, burada subgraph verileri yanı sıra subgraphlarla ilgili üst veriler ve blok önbelleği ve eth_call önbelleği gibi subgraphtan bağımsız ağ verileri saklanır. +The main store for the Graph Node, this is where Subgraph data is stored, as well as metadata about Subgraphs, and Subgraph-agnostic network data such as the block cache, and eth_call cache. ### Ağ istemcileri In order to index a network, Graph Node needs access to a network client via an EVM-compatible JSON-RPC API. This RPC may connect to a single client or it could be a more complex setup that load balances across multiple. -While some subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). +While some Subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically Subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and Subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). **Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an Indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). ### IPFS Düğümleri -Subgraph dağıtım üst verilerini IPFS ağında depolanır. Graph düğümü, subgraph manifestini ve tüm bağlantılı dosyaları almak için subgraph dağıtımı sırasında öncelikle IPFS düğümüne erişir. Ağ indeksleyicilerinin kendi IPFS düğümlerini barındırmaları gerekmez. Ağ için bir IPFS düğümü https://ipfs.network.thegraph.com adresinde barındırılmaktadır. +Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during Subgraph deployment to fetch the Subgraph manifest and all linked files. Network indexers do not need to host their own IPFS node. An IPFS node for the network is hosted at https://ipfs.network.thegraph.com. ### Prometheus metrik sunucusu @@ -77,19 +77,19 @@ A complete Kubernetes example configuration can be found in the [indexer reposit Graph Düğümü çalışırken aşağıdaki portları açar: -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | -| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | -| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | -| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for Subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for Subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | > **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC endpoint. ## Gelişmiş Graph Düğüm yapılandırması -En basit haliyle, Graph Düğümü tek bir Graph Düğüm örneği, bir PostgreSQL veritabanı, bir IPFS düğümü ve indekslenecek subgraphlar tarafından gerektirilen ağ istemcileri ile çalıştırılabilir. +At its simplest, Graph Node can be operated with a single instance of Graph Node, a single PostgreSQL database, an IPFS node, and the network clients as required by the Subgraphs to be indexed. This setup can be scaled horizontally, by adding multiple Graph Nodes, and multiple databases to support those Graph Nodes. Advanced users may want to take advantage of some of the horizontal scaling capabilities of Graph Node, as well as some of the more advanced configuration options, via the `config.toml` file and Graph Node's environment variables. @@ -114,13 +114,13 @@ Full documentation of `config.toml` can be found in the [Graph Node docs](https: #### Birden Fazla Graph Düğümü -Graph Node indexing can scale horizontally, running multiple instances of Graph Node to split indexing and querying across different nodes. This can be done simply by running Graph Nodes configured with a different `node_id` on startup (e.g. in the Docker Compose file), which can then be used in the `config.toml` file to specify [dedicated query nodes](#dedicated-query-nodes), [block ingestors](#dedicated-block-ingestion), and splitting subgraphs across nodes with [deployment rules](#deployment-rules). +Graph Node indexing can scale horizontally, running multiple instances of Graph Node to split indexing and querying across different nodes. This can be done simply by running Graph Nodes configured with a different `node_id` on startup (e.g. in the Docker Compose file), which can then be used in the `config.toml` file to specify [dedicated query nodes](#dedicated-query-nodes), [block ingestors](#dedicated-block-ingestion), and splitting Subgraphs across nodes with [deployment rules](#deployment-rules). > Birden fazla Graph Düğümü, aynı veritabanını kullanacak şekilde yapılandırılabilir ve veritabanı sharding kullanılarak yatay olarak ölçeklenebilir. #### Dağıtım kuralları -Given multiple Graph Nodes, it is necessary to manage deployment of new subgraphs so that the same subgraph isn't being indexed by two different nodes, which would lead to collisions. This can be done by using deployment rules, which can also specify which `shard` a subgraph's data should be stored in, if database sharding is being used. Deployment rules can match on the subgraph name and the network that the deployment is indexing in order to make a decision. +Given multiple Graph Nodes, it is necessary to manage deployment of new Subgraphs so that the same Subgraph isn't being indexed by two different nodes, which would lead to collisions. This can be done by using deployment rules, which can also specify which `shard` a Subgraph's data should be stored in, if database sharding is being used. Deployment rules can match on the Subgraph name and the network that the deployment is indexing in order to make a decision. Örnek dağıtım kuralı yapılandırması: @@ -138,7 +138,7 @@ indexers = [ "index_node_kovan_0" ] match = { network = [ "xdai", "poa-core" ] } indexers = [ "index_node_other_0" ] [[deployment.rule]] -# There's no 'match', so any subgraph matches +# There's no 'match', so any Subgraph matches shards = [ "sharda", "shardb" ] indexers = [ "index_node_community_0", @@ -167,11 +167,11 @@ query = "" Çoğu kullanım durumu için, tek bir Postgres veritabanı bir graph-düğümü örneğini desteklemek için yeterlidir. Bir graph-düğümü örneği tek bir Postgres veritabanından daha büyük hale geldiğinde, bu graph düğümü verilerinin depolanmasını birden fazla Postgres veritabanına yaymak mümkündür. Tüm veritabanları birlikte, graph-düğümü örneğinin deposunu oluşturur. Her tekil veritabanına bir shard denir. -Shards can be used to split subgraph deployments across multiple databases, and can also be used to use replicas to spread query load across databases. This includes configuring the number of available database connections each `graph-node` should keep in its connection pool for each database, which becomes increasingly important as more subgraphs are being indexed. +Shards can be used to split Subgraph deployments across multiple databases, and can also be used to use replicas to spread query load across databases. This includes configuring the number of available database connections each `graph-node` should keep in its connection pool for each database, which becomes increasingly important as more Subgraphs are being indexed. Sharding, Graph Düğümü'nün üzerine koyduğu yükü mevcut veritabanınıza koyamadığınızda ve veritabanı boyutunu artıramayacağınızda faydalı hale gelir. -> Genellikle, shard'larla başlamadan önce tek bir veritabanını mümkün olduğunca büyük hale getirmek daha mantıklıdır. Tek bir istisna, sorgu trafiği subgraphlar arasında çokta eşit olmayan bir şekilde bölünmesidir. Bu durumda, yüksek-hacimli subgraphlar'ın bir shard'da tutulması ve geriye kalan her şeyin diğer bir shard'da tutulması, yüksek hacimli subgraphlar için verinin veritabanı dahili önbellekte kalması ve düşük hacimli subgraphlar'daki daha az ihtiyaç duyulan veriler tarafından değiştirilmemesi daha olası olduğu için çok yardımcı olabilir. +> It is generally better make a single database as big as possible, before starting with shards. One exception is where query traffic is split very unevenly between Subgraphs; in those situations it can help dramatically if the high-volume Subgraphs are kept in one shard and everything else in another because that setup makes it more likely that the data for the high-volume Subgraphs stays in the db-internal cache and doesn't get replaced by data that's not needed as much from low-volume Subgraphs. Bağlantı yapılandırması açısından postgresql.conf'da max_connections değerinin 400 (veya belki de 200) olarak ayarlanması ve store_connection_wait_time_ms ve store_connection_checkout_count Prometheus metriklerine bakılması önerilir. Belirgin bekleme süreleri (5 milisaniye'nin üzerinde herhangi bir değer) yetersiz bağlantıların mevcut olduğunun bir işaretidir; yüksek bekleme süreleri veritabanının çok yoğun olması gibi sebeplerden de kaynaklanabilir. Ancak, veritabanı genel olarak stabil görünüyorsa, yüksek bekleme süreleri bağlantı sayısını arttırma ihtiyacını belirtir. Yapılandırmada her graph-düğümü örneğinin ne kadar bağlantı kullanabileceği bir üst sınırdır ve Graph Düğümü bunları gereksiz bulmadığı sürece açık tutmaz. @@ -188,7 +188,7 @@ ingestor = "block_ingestor_node" #### Birden fazla ağın desteklenmesi -The Graph Protocol is increasing the number of networks supported for indexing rewards, and there exist many subgraphs indexing unsupported networks which an indexer would like to process. The `config.toml` file allows for expressive and flexible configuration of: +The Graph Protocol is increasing the number of networks supported for indexing rewards, and there exist many Subgraphs indexing unsupported networks which an indexer would like to process. The `config.toml` file allows for expressive and flexible configuration of: - Birden fazla ağ - Ağ başına birden fazla sağlayıcı (bu, yükü sağlayıcılar arasında bölme ve bir Graph Düğümü'nün deneyimsel Firehose desteği gibi daha ucuz sağlayıcıları tercih etmesi ile tam düğümlerin yanı sıra arşiv düğümlerinin yapılandırılmasına da izin verebilir). @@ -225,11 +225,11 @@ Gelişmiş yapılandırmaya sahip ölçeklendirilmiş bir dizinleme kurulumu iş ### Graph Düğümü Yönetimi -Çalışan bir Graph Düğümüne (veya Graph Düğümlerine) sahip olunduktan sonra, dağıtılan subgraplar'ın bu düğümler üzerinde yönetilmesi zorluğu ortaya çıkar. Subgraphlar'ı yönetmeye yardımcı olmak için Graph Düğümü, bir dizi araç sunar. +Given a running Graph Node (or Graph Nodes!), the challenge is then to manage deployed Subgraphs across those nodes. Graph Node surfaces a range of tools to help with managing Subgraphs. #### Kayıt tutma -Graph Node's logs can provide useful information for debugging and optimisation of Graph Node and specific subgraphs. Graph Node supports different log levels via the `GRAPH_LOG` environment variable, with the following levels: error, warn, info, debug or trace. +Graph Node's logs can provide useful information for debugging and optimisation of Graph Node and specific Subgraphs. Graph Node supports different log levels via the `GRAPH_LOG` environment variable, with the following levels: error, warn, info, debug or trace. In addition setting `GRAPH_LOG_QUERY_TIMING` to `gql` provides more details about how GraphQL queries are running (though this will generate a large volume of logs). @@ -247,11 +247,11 @@ The graphman command is included in the official containers, and you can docker Full documentation of `graphman` commands is available in the Graph Node repository. See [/docs/graphman.md] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) in the Graph Node `/docs` -### Subgraphlarla çalışma +### Working with Subgraphs #### İndeksleme durum API'si -Varsayılan olarak 8030/graphql port'unda mevcut olan indeksleme durumu API'si, farklı subgraphlar için indeksleme durumunu ve ispatlarını kontrol etmek, subgraph özelliklerini incelemek ve daha fazlasını yapmak için çeşitli yöntemler sunar. +Available on port 8030/graphql by default, the indexing status API exposes a range of methods for checking indexing status for different Subgraphs, checking proofs of indexing, inspecting Subgraph features and more. The full schema is available [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). @@ -263,7 +263,7 @@ The full schema is available [here](https://github.com/graphprotocol/graph-node/ - Uygun işleyicilerle sırayla olayları işleme (bu, durumu sormak için zincire çağrı yapmayı ve depodan veri getirmeyi içerebilir) - Elde edilen verileri depoya yazma -Bu aşamalar boru hattında (yani eşzamanlı olarak yürütülebilir), ancak birbirlerine bağımlıdırlar. Subgraphlar'ın indekslenmesi yavaş olduğunda, bunun altındaki neden spesifik subgraphlar'a bağlı olacaktır. +These stages are pipelined (i.e. they can be executed in parallel), but they are dependent on one another. Where Subgraphs are slow to index, the underlying cause will depend on the specific Subgraph. İndeksleme yavaşlığının yaygın nedenleri: @@ -276,24 +276,24 @@ Bu aşamalar boru hattında (yani eşzamanlı olarak yürütülebilir), ancak bi - Sağlayıcının zincir başından geriye düşmesi - Sağlayıcıdan zincir başındaki yeni makbuzların alınmasındaki yavaşlık -Subgraph indeksleme metrikleri, indeksleme yavaşlığının temel nedenini teşhis etmede yardımcı olabilir. Bazı durumlarda, sorun subgraph'ın kendisiyle ilgilidir, ancak diğer durumlarda, geliştirilmiş ağ sağlayıcıları, azaltılmış veritabanı çekişmesi ve diğer yapılandırma iyileştirmeleri indeksleme performansını belirgin şekilde artırabilir. +Subgraph indexing metrics can help diagnose the root cause of indexing slowness. In some cases, the problem lies with the Subgraph itself, but in others, improved network providers, reduced database contention and other configuration improvements can markedly improve indexing performance. -#### Başarısıız subgraphlar +#### Failed Subgraphs -İndekslemesi sırasında subgraphlar beklenmedik veri, beklendiği gibi çalışmayan bir bileşen veya olay işleyicilerinde veya yapılandırmada bir hata olması durumunda başarısız olabilir. İki genel başarısızlık türü mevcuttur: +During indexing Subgraphs might fail, if they encounter data that is unexpected, some component not working as expected, or if there is some bug in the event handlers or configuration. There are two general types of failure: - Deterministik başarısızlıklar: Bu, yeniden denemelerle çözülmeyecek hatalardır - Deterministik olmayan başarısızlıklar: Bunlar, sağlayıcının sorunları veya beklenmedik bir Graph Düğüm hatası gibi nedenlere bağlı olabilir. Deterministik olmayan bir başarısızlık meydana geldiğinde Graph Düğümü, başarısız olan işleyicileri yeniden deneyecek ve zamanla geri çekilecektir. -Bazı durumlarda, başarısızlık indeksleyici tarafından çözülebilir (örneğin, hatanın doğru türde sağlayıcıya sahip olmamasından kaynaklanması durumunda, gerekli sağlayıcı eklenirse indeksleme devam ettirilebilir). Ancak diğer durumlarda, subgraph kodunda bir değişiklik gereklidir. +In some cases a failure might be resolvable by the indexer (for example if the error is a result of not having the right kind of provider, adding the required provider will allow indexing to continue). However in others, a change in the Subgraph code is required. -> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-deterministic failures are not, as the subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. +> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-deterministic failures are not, as the Subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the Subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. #### Blok ve çağrı önbelleği -Graph Node caches certain data in the store in order to save refetching from the provider. Blocks are cached, as are the results of `eth_calls` (the latter being cached as of a specific block). This caching can dramatically increase indexing speed during "resyncing" of a slightly altered subgraph. +Graph Node caches certain data in the store in order to save refetching from the provider. Blocks are cached, as are the results of `eth_calls` (the latter being cached as of a specific block). This caching can dramatically increase indexing speed during "resyncing" of a slightly altered Subgraph. -However, in some instances, if an Ethereum node has provided incorrect data for some period, that can make its way into the cache, leading to incorrect data or failed subgraphs. In this case indexers can use `graphman` to clear the poisoned cache, and then rewind the affected subgraphs, which will then fetch fresh data from the (hopefully) healthy provider. +However, in some instances, if an Ethereum node has provided incorrect data for some period, that can make its way into the cache, leading to incorrect data or failed Subgraphs. In this case indexers can use `graphman` to clear the poisoned cache, and then rewind the affected Subgraphs, which will then fetch fresh data from the (hopefully) healthy provider. Örneğin tx makbuzu etkinlik eksikliği gibi bir blok önbellek tutarsızlığı şüphesi varsa: @@ -304,7 +304,7 @@ However, in some instances, if an Ethereum node has provided incorrect data for #### Sorgulama sorunları ve hataları -Bir subgraph indekslendikten sonra, indeksleyiciler subgraph'ın ayrılmış sorgu son noktası aracılığıyla sorguları sunmayı bekleyebilirler. İndeksleyiciler önemli sorgu hacmi sunmayı umuyorlarsa, bunun için ayrılmış bir sorgu düğümü önerilir ve çok yüksek sorgu hacimleri durumunda indeksleyiciler sorguların indeksleme sürecini etkilememesi için replika shardlar yapılandırmak isteyebilirler. +Once a Subgraph has been indexed, indexers can expect to serve queries via the Subgraph's dedicated query endpoint. If the indexer is hoping to serve significant query volume, a dedicated query node is recommended, and in case of very high query volumes, indexers may want to configure replica shards so that queries don't impact the indexing process. Bununla birlikte, özel bir sorgu düğümü ve replikalarda bile, belirli sorguların yürütülmesi uzun zaman alabilir, bazı durumlarda bellek kullanımını artırabilir ve diğer kullanıcılar için sorgu süresini olumsuz etkileyebilir. @@ -316,7 +316,7 @@ Graph Node caches GraphQL queries by default, which can significantly reduce dat ##### Sorguların analizi -Sorunlu sorgular genellikle iki şekilde ortaya çıkar. Bazı durumlarda, kullanıcılar kendileri belirli bir sorgunun yavaş olduğunu bildirirler. Bu durumda zorluk, yavaşlığın nedenini teşhis etmektir - genel bir sorun mu, yoksa subgraph'a veya sorguya özgü mü olduğunu belirlemek ve tabii ki mümkünse sonra çözmek olacaktır. +Problematic queries most often surface in one of two ways. In some cases, users themselves report that a given query is slow. In that case the challenge is to diagnose the reason for the slowness - whether it is a general issue, or specific to that Subgraph or query. And then of course to resolve it, if possible. Diğer durumlarda, tetikleyici sorgu düğümündee yüksek bellek kullanımı olabilir, bu durumda zorluk ilk olarak soruna neden olan sorguyu belirlemektir. @@ -336,10 +336,10 @@ In general, tables where the number of distinct entities are less than 1% of the Once a table has been determined to be account-like, running `graphman stats account-like .
` will turn on the account-like optimization for queries against that table. The optimization can be turned off again with `graphman stats account-like --clear .
` It takes up to 5 minutes for query nodes to notice that the optimization has been turned on or off. After turning the optimization on, it is necessary to verify that the change does not in fact make queries slower for that table. If you have configured Grafana to monitor Postgres, slow queries would show up in `pg_stat_activity`in large numbers, taking several seconds. In that case, the optimization needs to be turned off again. -For Uniswap-like subgraphs, the `pair` and `token` tables are prime candidates for this optimization, and can have a dramatic effect on database load. +For Uniswap-like Subgraphs, the `pair` and `token` tables are prime candidates for this optimization, and can have a dramatic effect on database load. -#### Subgraphları kaldırma +#### Removing Subgraphs > Bu, Graph Node 0.29.x sürümünde kullanılabilir olan yeni bir fonksiyonelliktir -At some point an indexer might want to remove a given subgraph. This can be easily done via `graphman drop`, which deletes a deployment and all it's indexed data. The deployment can be specified as either a subgraph name, an IPFS hash `Qm..`, or the database namespace `sgdNNN`. Further documentation is available [here](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). +At some point an indexer might want to remove a given Subgraph. This can be easily done via `graphman drop`, which deletes a deployment and all it's indexed data. The deployment can be specified as either a Subgraph name, an IPFS hash `Qm..`, or the database namespace `sgdNNN`. Further documentation is available [here](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). From 9b2b8f4da4396803137c0d6d5a0cfae0cc10403f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:09:58 -0500 Subject: [PATCH 0111/1789] New translations graph-node.mdx (Ukrainian) --- .../pages/uk/indexing/tooling/graph-node.mdx | 72 +++++++++---------- 1 file changed, 36 insertions(+), 36 deletions(-) diff --git a/website/src/pages/uk/indexing/tooling/graph-node.mdx b/website/src/pages/uk/indexing/tooling/graph-node.mdx index 0250f14a3d08..f5778789213d 100644 --- a/website/src/pages/uk/indexing/tooling/graph-node.mdx +++ b/website/src/pages/uk/indexing/tooling/graph-node.mdx @@ -2,31 +2,31 @@ title: Graph Node --- -Graph Node is the component which indexes subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. +Graph Node is the component which indexes Subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. This provides a contextual overview of Graph Node, and some of the more advanced options available to indexers. Detailed documentation and instructions can be found in the [Graph Node repository](https://github.com/graphprotocol/graph-node). ## Graph Node -[Graph Node](https://github.com/graphprotocol/graph-node) is the reference implementation for indexing Subgraphs on The Graph Network, connecting to blockchain clients, indexing subgraphs and making indexed data available to query. +[Graph Node](https://github.com/graphprotocol/graph-node) is the reference implementation for indexing Subgraphs on The Graph Network, connecting to blockchain clients, indexing Subgraphs and making indexed data available to query. Graph Node (and the whole indexer stack) can be run on bare metal, or in a cloud environment. This flexibility of the central indexing component is crucial to the robustness of The Graph Protocol. Similarly, Graph Node can be [built from source](https://github.com/graphprotocol/graph-node), or indexers can use one of the [provided Docker Images](https://hub.docker.com/r/graphprotocol/graph-node). ### PostgreSQL database -The main store for the Graph Node, this is where subgraph data is stored, as well as metadata about subgraphs, and subgraph-agnostic network data such as the block cache, and eth_call cache. +The main store for the Graph Node, this is where Subgraph data is stored, as well as metadata about Subgraphs, and Subgraph-agnostic network data such as the block cache, and eth_call cache. ### Network clients In order to index a network, Graph Node needs access to a network client via an EVM-compatible JSON-RPC API. This RPC may connect to a single client or it could be a more complex setup that load balances across multiple. -While some subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). +While some Subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically Subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and Subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). **Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an Indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). ### IPFS Nodes -Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network indexers do not need to host their own IPFS node. An IPFS node for the network is hosted at https://ipfs.network.thegraph.com. +Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during Subgraph deployment to fetch the Subgraph manifest and all linked files. Network indexers do not need to host their own IPFS node. An IPFS node for the network is hosted at https://ipfs.network.thegraph.com. ### Prometheus metrics server @@ -77,19 +77,19 @@ A complete Kubernetes example configuration can be found in the [indexer reposit When it is running Graph Node exposes the following ports: -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | -| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | -| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | -| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for Subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for Subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | > **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC endpoint. ## Advanced Graph Node configuration -At its simplest, Graph Node can be operated with a single instance of Graph Node, a single PostgreSQL database, an IPFS node, and the network clients as required by the subgraphs to be indexed. +At its simplest, Graph Node can be operated with a single instance of Graph Node, a single PostgreSQL database, an IPFS node, and the network clients as required by the Subgraphs to be indexed. This setup can be scaled horizontally, by adding multiple Graph Nodes, and multiple databases to support those Graph Nodes. Advanced users may want to take advantage of some of the horizontal scaling capabilities of Graph Node, as well as some of the more advanced configuration options, via the `config.toml` file and Graph Node's environment variables. @@ -114,13 +114,13 @@ Full documentation of `config.toml` can be found in the [Graph Node docs](https: #### Multiple Graph Nodes -Graph Node indexing can scale horizontally, running multiple instances of Graph Node to split indexing and querying across different nodes. This can be done simply by running Graph Nodes configured with a different `node_id` on startup (e.g. in the Docker Compose file), which can then be used in the `config.toml` file to specify [dedicated query nodes](#dedicated-query-nodes), [block ingestors](#dedicated-block-ingestion), and splitting subgraphs across nodes with [deployment rules](#deployment-rules). +Graph Node indexing can scale horizontally, running multiple instances of Graph Node to split indexing and querying across different nodes. This can be done simply by running Graph Nodes configured with a different `node_id` on startup (e.g. in the Docker Compose file), which can then be used in the `config.toml` file to specify [dedicated query nodes](#dedicated-query-nodes), [block ingestors](#dedicated-block-ingestion), and splitting Subgraphs across nodes with [deployment rules](#deployment-rules). > Note that multiple Graph Nodes can all be configured to use the same database, which itself can be horizontally scaled via sharding. #### Deployment rules -Given multiple Graph Nodes, it is necessary to manage deployment of new subgraphs so that the same subgraph isn't being indexed by two different nodes, which would lead to collisions. This can be done by using deployment rules, which can also specify which `shard` a subgraph's data should be stored in, if database sharding is being used. Deployment rules can match on the subgraph name and the network that the deployment is indexing in order to make a decision. +Given multiple Graph Nodes, it is necessary to manage deployment of new Subgraphs so that the same Subgraph isn't being indexed by two different nodes, which would lead to collisions. This can be done by using deployment rules, which can also specify which `shard` a Subgraph's data should be stored in, if database sharding is being used. Deployment rules can match on the Subgraph name and the network that the deployment is indexing in order to make a decision. Example deployment rule configuration: @@ -138,7 +138,7 @@ indexers = [ "index_node_kovan_0" ] match = { network = [ "xdai", "poa-core" ] } indexers = [ "index_node_other_0" ] [[deployment.rule]] -# There's no 'match', so any subgraph matches +# There's no 'match', so any Subgraph matches shards = [ "sharda", "shardb" ] indexers = [ "index_node_community_0", @@ -167,11 +167,11 @@ Any node whose --node-id matches the regular expression will be set up to only r For most use cases, a single Postgres database is sufficient to support a graph-node instance. When a graph-node instance outgrows a single Postgres database, it is possible to split the storage of graph-node's data across multiple Postgres databases. All databases together form the store of the graph-node instance. Each individual database is called a shard. -Shards can be used to split subgraph deployments across multiple databases, and can also be used to use replicas to spread query load across databases. This includes configuring the number of available database connections each `graph-node` should keep in its connection pool for each database, which becomes increasingly important as more subgraphs are being indexed. +Shards can be used to split Subgraph deployments across multiple databases, and can also be used to use replicas to spread query load across databases. This includes configuring the number of available database connections each `graph-node` should keep in its connection pool for each database, which becomes increasingly important as more Subgraphs are being indexed. Sharding becomes useful when your existing database can't keep up with the load that Graph Node puts on it, and when it's not possible to increase the database size anymore. -> It is generally better make a single database as big as possible, before starting with shards. One exception is where query traffic is split very unevenly between subgraphs; in those situations it can help dramatically if the high-volume subgraphs are kept in one shard and everything else in another because that setup makes it more likely that the data for the high-volume subgraphs stays in the db-internal cache and doesn't get replaced by data that's not needed as much from low-volume subgraphs. +> It is generally better make a single database as big as possible, before starting with shards. One exception is where query traffic is split very unevenly between Subgraphs; in those situations it can help dramatically if the high-volume Subgraphs are kept in one shard and everything else in another because that setup makes it more likely that the data for the high-volume Subgraphs stays in the db-internal cache and doesn't get replaced by data that's not needed as much from low-volume Subgraphs. In terms of configuring connections, start with max_connections in postgresql.conf set to 400 (or maybe even 200) and look at the store_connection_wait_time_ms and store_connection_checkout_count Prometheus metrics. Noticeable wait times (anything above 5ms) is an indication that there are too few connections available; high wait times there will also be caused by the database being very busy (like high CPU load). However if the database seems otherwise stable, high wait times indicate a need to increase the number of connections. In the configuration, how many connections each graph-node instance can use is an upper limit, and Graph Node will not keep connections open if it doesn't need them. @@ -188,7 +188,7 @@ ingestor = "block_ingestor_node" #### Supporting multiple networks -The Graph Protocol is increasing the number of networks supported for indexing rewards, and there exist many subgraphs indexing unsupported networks which an indexer would like to process. The `config.toml` file allows for expressive and flexible configuration of: +The Graph Protocol is increasing the number of networks supported for indexing rewards, and there exist many Subgraphs indexing unsupported networks which an indexer would like to process. The `config.toml` file allows for expressive and flexible configuration of: - Multiple networks - Multiple providers per network (this can allow splitting of load across providers, and can also allow for configuration of full nodes as well as archive nodes, with Graph Node preferring cheaper providers if a given workload allows). @@ -225,11 +225,11 @@ Users who are operating a scaled indexing setup with advanced configuration may ### Managing Graph Node -Given a running Graph Node (or Graph Nodes!), the challenge is then to manage deployed subgraphs across those nodes. Graph Node surfaces a range of tools to help with managing subgraphs. +Given a running Graph Node (or Graph Nodes!), the challenge is then to manage deployed Subgraphs across those nodes. Graph Node surfaces a range of tools to help with managing Subgraphs. #### Logging -Graph Node's logs can provide useful information for debugging and optimisation of Graph Node and specific subgraphs. Graph Node supports different log levels via the `GRAPH_LOG` environment variable, with the following levels: error, warn, info, debug or trace. +Graph Node's logs can provide useful information for debugging and optimisation of Graph Node and specific Subgraphs. Graph Node supports different log levels via the `GRAPH_LOG` environment variable, with the following levels: error, warn, info, debug or trace. In addition setting `GRAPH_LOG_QUERY_TIMING` to `gql` provides more details about how GraphQL queries are running (though this will generate a large volume of logs). @@ -247,11 +247,11 @@ The graphman command is included in the official containers, and you can docker Full documentation of `graphman` commands is available in the Graph Node repository. See [/docs/graphman.md] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) in the Graph Node `/docs` -### Working with subgraphs +### Working with Subgraphs #### Indexing status API -Available on port 8030/graphql by default, the indexing status API exposes a range of methods for checking indexing status for different subgraphs, checking proofs of indexing, inspecting subgraph features and more. +Available on port 8030/graphql by default, the indexing status API exposes a range of methods for checking indexing status for different Subgraphs, checking proofs of indexing, inspecting Subgraph features and more. The full schema is available [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). @@ -263,7 +263,7 @@ There are three separate parts of the indexing process: - Processing events in order with the appropriate handlers (this can involve calling the chain for state, and fetching data from the store) - Writing the resulting data to the store -These stages are pipelined (i.e. they can be executed in parallel), but they are dependent on one another. Where subgraphs are slow to index, the underlying cause will depend on the specific subgraph. +These stages are pipelined (i.e. they can be executed in parallel), but they are dependent on one another. Where Subgraphs are slow to index, the underlying cause will depend on the specific Subgraph. Common causes of indexing slowness: @@ -276,24 +276,24 @@ Common causes of indexing slowness: - The provider itself falling behind the chain head - Slowness in fetching new receipts at the chain head from the provider -Subgraph indexing metrics can help diagnose the root cause of indexing slowness. In some cases, the problem lies with the subgraph itself, but in others, improved network providers, reduced database contention and other configuration improvements can markedly improve indexing performance. +Subgraph indexing metrics can help diagnose the root cause of indexing slowness. In some cases, the problem lies with the Subgraph itself, but in others, improved network providers, reduced database contention and other configuration improvements can markedly improve indexing performance. -#### Failed subgraphs +#### Failed Subgraphs -During indexing subgraphs might fail, if they encounter data that is unexpected, some component not working as expected, or if there is some bug in the event handlers or configuration. There are two general types of failure: +During indexing Subgraphs might fail, if they encounter data that is unexpected, some component not working as expected, or if there is some bug in the event handlers or configuration. There are two general types of failure: - Deterministic failures: these are failures which will not be resolved with retries - Non-deterministic failures: these might be down to issues with the provider, or some unexpected Graph Node error. When a non-deterministic failure occurs, Graph Node will retry the failing handlers, backing off over time. -In some cases a failure might be resolvable by the indexer (for example if the error is a result of not having the right kind of provider, adding the required provider will allow indexing to continue). However in others, a change in the subgraph code is required. +In some cases a failure might be resolvable by the indexer (for example if the error is a result of not having the right kind of provider, adding the required provider will allow indexing to continue). However in others, a change in the Subgraph code is required. -> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-deterministic failures are not, as the subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. +> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-deterministic failures are not, as the Subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the Subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. #### Block and call cache -Graph Node caches certain data in the store in order to save refetching from the provider. Blocks are cached, as are the results of `eth_calls` (the latter being cached as of a specific block). This caching can dramatically increase indexing speed during "resyncing" of a slightly altered subgraph. +Graph Node caches certain data in the store in order to save refetching from the provider. Blocks are cached, as are the results of `eth_calls` (the latter being cached as of a specific block). This caching can dramatically increase indexing speed during "resyncing" of a slightly altered Subgraph. -However, in some instances, if an Ethereum node has provided incorrect data for some period, that can make its way into the cache, leading to incorrect data or failed subgraphs. In this case indexers can use `graphman` to clear the poisoned cache, and then rewind the affected subgraphs, which will then fetch fresh data from the (hopefully) healthy provider. +However, in some instances, if an Ethereum node has provided incorrect data for some period, that can make its way into the cache, leading to incorrect data or failed Subgraphs. In this case indexers can use `graphman` to clear the poisoned cache, and then rewind the affected Subgraphs, which will then fetch fresh data from the (hopefully) healthy provider. If a block cache inconsistency is suspected, such as a tx receipt missing event: @@ -304,7 +304,7 @@ If a block cache inconsistency is suspected, such as a tx receipt missing event: #### Querying issues and errors -Once a subgraph has been indexed, indexers can expect to serve queries via the subgraph's dedicated query endpoint. If the indexer is hoping to serve significant query volume, a dedicated query node is recommended, and in case of very high query volumes, indexers may want to configure replica shards so that queries don't impact the indexing process. +Once a Subgraph has been indexed, indexers can expect to serve queries via the Subgraph's dedicated query endpoint. If the indexer is hoping to serve significant query volume, a dedicated query node is recommended, and in case of very high query volumes, indexers may want to configure replica shards so that queries don't impact the indexing process. However, even with a dedicated query node and replicas, certain queries can take a long time to execute, and in some cases increase memory usage and negatively impact the query time for other users. @@ -316,7 +316,7 @@ Graph Node caches GraphQL queries by default, which can significantly reduce dat ##### Analysing queries -Problematic queries most often surface in one of two ways. In some cases, users themselves report that a given query is slow. In that case the challenge is to diagnose the reason for the slowness - whether it is a general issue, or specific to that subgraph or query. And then of course to resolve it, if possible. +Problematic queries most often surface in one of two ways. In some cases, users themselves report that a given query is slow. In that case the challenge is to diagnose the reason for the slowness - whether it is a general issue, or specific to that Subgraph or query. And then of course to resolve it, if possible. In other cases, the trigger might be high memory usage on a query node, in which case the challenge is first to identify the query causing the issue. @@ -336,10 +336,10 @@ In general, tables where the number of distinct entities are less than 1% of the Once a table has been determined to be account-like, running `graphman stats account-like .
` will turn on the account-like optimization for queries against that table. The optimization can be turned off again with `graphman stats account-like --clear .
` It takes up to 5 minutes for query nodes to notice that the optimization has been turned on or off. After turning the optimization on, it is necessary to verify that the change does not in fact make queries slower for that table. If you have configured Grafana to monitor Postgres, slow queries would show up in `pg_stat_activity`in large numbers, taking several seconds. In that case, the optimization needs to be turned off again. -For Uniswap-like subgraphs, the `pair` and `token` tables are prime candidates for this optimization, and can have a dramatic effect on database load. +For Uniswap-like Subgraphs, the `pair` and `token` tables are prime candidates for this optimization, and can have a dramatic effect on database load. -#### Removing subgraphs +#### Removing Subgraphs > This is new functionality, which will be available in Graph Node 0.29.x -At some point an indexer might want to remove a given subgraph. This can be easily done via `graphman drop`, which deletes a deployment and all it's indexed data. The deployment can be specified as either a subgraph name, an IPFS hash `Qm..`, or the database namespace `sgdNNN`. Further documentation is available [here](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). +At some point an indexer might want to remove a given Subgraph. This can be easily done via `graphman drop`, which deletes a deployment and all it's indexed data. The deployment can be specified as either a Subgraph name, an IPFS hash `Qm..`, or the database namespace `sgdNNN`. Further documentation is available [here](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). From 3f810ef61c0476bc2955adb20dc582d9f3e12a92 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:09:59 -0500 Subject: [PATCH 0112/1789] New translations graph-node.mdx (Chinese Simplified) --- .../pages/zh/indexing/tooling/graph-node.mdx | 72 +++++++++---------- 1 file changed, 36 insertions(+), 36 deletions(-) diff --git a/website/src/pages/zh/indexing/tooling/graph-node.mdx b/website/src/pages/zh/indexing/tooling/graph-node.mdx index c8e88c9a82b9..a475534425cc 100644 --- a/website/src/pages/zh/indexing/tooling/graph-node.mdx +++ b/website/src/pages/zh/indexing/tooling/graph-node.mdx @@ -2,31 +2,31 @@ title: Graph 节点 --- -Graph节点是索引子图的组件,并使生成的数据可通过GraphQL API进行查询。因此,它是索引器堆栈的中心,Graph节点的正确运作对于运行成功的索引器至关重要。 +Graph Node is the component which indexes Subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. This provides a contextual overview of Graph Node, and some of the more advanced options available to indexers. Detailed documentation and instructions can be found in the [Graph Node repository](https://github.com/graphprotocol/graph-node). ## Graph 节点 -[Graph Node](https://github.com/graphprotocol/graph-node) is the reference implementation for indexing Subgraphs on The Graph Network, connecting to blockchain clients, indexing subgraphs and making indexed data available to query. +[Graph Node](https://github.com/graphprotocol/graph-node) is the reference implementation for indexing Subgraphs on The Graph Network, connecting to blockchain clients, indexing Subgraphs and making indexed data available to query. Graph Node (and the whole indexer stack) can be run on bare metal, or in a cloud environment. This flexibility of the central indexing component is crucial to the robustness of The Graph Protocol. Similarly, Graph Node can be [built from source](https://github.com/graphprotocol/graph-node), or indexers can use one of the [provided Docker Images](https://hub.docker.com/r/graphprotocol/graph-node). ### PostgreSQL 数据库 -Graph节点的主存储区,这是存储子图数据、子图元数据以及子图不可知的网络数据(如区块缓存和eth_call缓存)的地方。 +The main store for the Graph Node, this is where Subgraph data is stored, as well as metadata about Subgraphs, and Subgraph-agnostic network data such as the block cache, and eth_call cache. ### 网络客户端 为了索引网络,Graph节点需要通过以太坊兼容的JSON-RPC访问网络客户端。此RPC可能连接到单个以太坊客户端,也可能是跨多个客户端进行负载平衡的更复杂的设置。 -While some subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). +While some Subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically Subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and Subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). **Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an Indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). ### IPFS节点 -子图部署元数据存储在IPFS网络上。Graph节点主要在子图部署期间访问IPFS节点,以获取子图清单和所有链接文件。网络索引人不需要托管自己的IPFS节点。网络的IPFS节点托管于https://ipfs.network.thegraph.com。 +Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during Subgraph deployment to fetch the Subgraph manifest and all linked files. Network indexers do not need to host their own IPFS node. An IPFS node for the network is hosted at https://ipfs.network.thegraph.com. ### Prometheus指标服务器 @@ -77,19 +77,19 @@ A complete Kubernetes example configuration can be found in the [indexer reposit 当运行Graph Node时,会暴露以下端口: -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | -| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | -| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | -| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for Subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for Subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | > **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC endpoint. ## 高级 Graph 节点配置 -最简单的是,Graph节点可以使用Graph节点的单个实例、单个PostgreSQL数据库、IPFS节点和要索引的子图所需的网络客户端来操作。 +At its simplest, Graph Node can be operated with a single instance of Graph Node, a single PostgreSQL database, an IPFS node, and the network clients as required by the Subgraphs to be indexed. This setup can be scaled horizontally, by adding multiple Graph Nodes, and multiple databases to support those Graph Nodes. Advanced users may want to take advantage of some of the horizontal scaling capabilities of Graph Node, as well as some of the more advanced configuration options, via the `config.toml` file and Graph Node's environment variables. @@ -114,13 +114,13 @@ Full documentation of `config.toml` can be found in the [Graph Node docs](https: #### 多个 Graph 节点 -Graph Node indexing can scale horizontally, running multiple instances of Graph Node to split indexing and querying across different nodes. This can be done simply by running Graph Nodes configured with a different `node_id` on startup (e.g. in the Docker Compose file), which can then be used in the `config.toml` file to specify [dedicated query nodes](#dedicated-query-nodes), [block ingestors](#dedicated-block-ingestion), and splitting subgraphs across nodes with [deployment rules](#deployment-rules). +Graph Node indexing can scale horizontally, running multiple instances of Graph Node to split indexing and querying across different nodes. This can be done simply by running Graph Nodes configured with a different `node_id` on startup (e.g. in the Docker Compose file), which can then be used in the `config.toml` file to specify [dedicated query nodes](#dedicated-query-nodes), [block ingestors](#dedicated-block-ingestion), and splitting Subgraphs across nodes with [deployment rules](#deployment-rules). > 请注意,可以将多个Graph节点配置为使用同一个数据库,该数据库本身可以通过分片进行水平扩展。 #### 部署规则 -Given multiple Graph Nodes, it is necessary to manage deployment of new subgraphs so that the same subgraph isn't being indexed by two different nodes, which would lead to collisions. This can be done by using deployment rules, which can also specify which `shard` a subgraph's data should be stored in, if database sharding is being used. Deployment rules can match on the subgraph name and the network that the deployment is indexing in order to make a decision. +Given multiple Graph Nodes, it is necessary to manage deployment of new Subgraphs so that the same Subgraph isn't being indexed by two different nodes, which would lead to collisions. This can be done by using deployment rules, which can also specify which `shard` a Subgraph's data should be stored in, if database sharding is being used. Deployment rules can match on the Subgraph name and the network that the deployment is indexing in order to make a decision. 部署规则配置示例: @@ -138,7 +138,7 @@ indexers = [ "index_node_kovan_0" ] match = { network = [ "xdai", "poa-core" ] } indexers = [ "index_node_other_0" ] [[deployment.rule]] -# There's no 'match', so any subgraph matches +# There's no 'match', so any Subgraph matches shards = [ "sharda", "shardb" ] indexers = [ "index_node_community_0", @@ -167,11 +167,11 @@ query = "" 对于大多数用例,单个Postgres数据库足以支持graph节点实例。当一个graph节点实例超过一个Postgres数据库时,可以将graph节点的数据存储拆分到多个Postgres数据库中。所有数据库一起构成graph节点实例的存储。每个单独的数据库都称为分片。 -Shards can be used to split subgraph deployments across multiple databases, and can also be used to use replicas to spread query load across databases. This includes configuring the number of available database connections each `graph-node` should keep in its connection pool for each database, which becomes increasingly important as more subgraphs are being indexed. +Shards can be used to split Subgraph deployments across multiple databases, and can also be used to use replicas to spread query load across databases. This includes configuring the number of available database connections each `graph-node` should keep in its connection pool for each database, which becomes increasingly important as more Subgraphs are being indexed. 当您的现有数据库无法跟上Graph节点给它带来的负载时,以及当无法再增加数据库大小时,分片变得非常有用。 -> 在开始使用分片之前,通常最好使单个数据库尽可能大。一个例外是查询流量在子图之间分配非常不均匀;在这些情况下,如果将高容量子图保存在一个分片中,而将其他所有内容都保存在另一个分片上,这会有很大的帮助,因为这种设置使高容量子图的数据更有可能保留在数据库内部缓存中,而不会被低容量子图中不需要的数据所取代。 +> It is generally better make a single database as big as possible, before starting with shards. One exception is where query traffic is split very unevenly between Subgraphs; in those situations it can help dramatically if the high-volume Subgraphs are kept in one shard and everything else in another because that setup makes it more likely that the data for the high-volume Subgraphs stays in the db-internal cache and doesn't get replaced by data that's not needed as much from low-volume Subgraphs. 在配置连接方面,首先将 postgresql.conf 中的 max_connections 设置为400(或甚至200),然后查看 store_connection_wait_time_ms 和 store_connecion_checkout_count Prometheus 度量。明显的等待时间(任何超过5ms的时间)表明可用连接太少;高等待时间也将由数据库非常繁忙(如高CPU负载)引起。然而,如果数据库在其他方面看起来很稳定,那么高等待时间表明需要增加连接数量。在配置中,每个graph节点实例可以使用的连接数是一个上限,如果不需要,Graph节点将不会保持连接打开。 @@ -188,7 +188,7 @@ ingestor = "block_ingestor_node" #### 支持多个网络 -The Graph Protocol is increasing the number of networks supported for indexing rewards, and there exist many subgraphs indexing unsupported networks which an indexer would like to process. The `config.toml` file allows for expressive and flexible configuration of: +The Graph Protocol is increasing the number of networks supported for indexing rewards, and there exist many Subgraphs indexing unsupported networks which an indexer would like to process. The `config.toml` file allows for expressive and flexible configuration of: - 多个网络。 - 每个网络有多个提供程序(这可以允许跨提供程序分配负载,也可以允许配置完整节点和归档节点,如果给定的工作负载允许,Graph Node更喜欢便宜些的提供程序)。 @@ -225,11 +225,11 @@ Graph Node supports a range of environment variables which can enable features, ### 管理Graph节点 -给定一个正在运行的 Graph 节点(或多個 Graph 节点!),接下来的挑战是如何跨这些节点管理部署的子图。Graph 节点覆盖了一系列工具,以帮助管理子图。 +Given a running Graph Node (or Graph Nodes!), the challenge is then to manage deployed Subgraphs across those nodes. Graph Node surfaces a range of tools to help with managing Subgraphs. #### 日志 -Graph Node's logs can provide useful information for debugging and optimisation of Graph Node and specific subgraphs. Graph Node supports different log levels via the `GRAPH_LOG` environment variable, with the following levels: error, warn, info, debug or trace. +Graph Node's logs can provide useful information for debugging and optimisation of Graph Node and specific Subgraphs. Graph Node supports different log levels via the `GRAPH_LOG` environment variable, with the following levels: error, warn, info, debug or trace. In addition setting `GRAPH_LOG_QUERY_TIMING` to `gql` provides more details about how GraphQL queries are running (though this will generate a large volume of logs). @@ -247,11 +247,11 @@ The graphman command is included in the official containers, and you can docker Full documentation of `graphman` commands is available in the Graph Node repository. See [/docs/graphman.md] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) in the Graph Node `/docs` -### 使用子图 +### Working with Subgraphs #### 索引状态API -默认情况下,在端口8030/graphql上可用,索引状态API公开了一系列方法,用于检查不同子图的索引状态、检查索引证明、检查子图特征等。 +Available on port 8030/graphql by default, the indexing status API exposes a range of methods for checking indexing status for different Subgraphs, checking proofs of indexing, inspecting Subgraph features and more. The full schema is available [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). @@ -263,7 +263,7 @@ The full schema is available [here](https://github.com/graphprotocol/graph-node/ - 使用适当的处理程序按顺序处理事件(这可能涉及调用状态链,并从存储中获取数据) - 将生成的数据写入存储 -这些阶段是流水线的(即可以并行执行),但它们彼此依赖。如果子图索引速度慢,则根本原因将取决于特定的子图。 +These stages are pipelined (i.e. they can be executed in parallel), but they are dependent on one another. Where Subgraphs are slow to index, the underlying cause will depend on the specific Subgraph. 索引速度慢的常见原因: @@ -276,24 +276,24 @@ The full schema is available [here](https://github.com/graphprotocol/graph-node/ - 提供商本身落后于链头 - 在链头从提供商获取新收据的速度较慢 -子图索引度量可以帮助诊断索引速度慢的根本原因。在某些情况下,问题在于子图本身,但在其他情况下,改进的网络提供商、减少的数据库冲突和其他配置改进可以显著提高索引性能。 +Subgraph indexing metrics can help diagnose the root cause of indexing slowness. In some cases, the problem lies with the Subgraph itself, but in others, improved network providers, reduced database contention and other configuration improvements can markedly improve indexing performance. -#### 失败的子图 +#### Failed Subgraphs -在为子图编制索引期间,如果遇到意外数据、某些组件未按预期工作,或者事件处理程序或配置中存在错误,则子图可能会失败。有两种常见的故障类型: +During indexing Subgraphs might fail, if they encounter data that is unexpected, some component not working as expected, or if there is some bug in the event handlers or configuration. There are two general types of failure: - 确定性故障:这些故障不会通过重试解决 - 非确定性故障:这些故障可能是由于提供程序的问题,或者是一些意外的Graph节点错误。当发生非确定性故障时,Graph节点将重试失败的处理程序,并随着时间的推移而后退。 -在某些情况下,索引人可能会解决故障(例如,如果错误是由于没有正确类型的提供程序导致的,则添加所需的提供程序将允许继续索引)。然而,在其他情况下,需要更改子图代码。 +In some cases a failure might be resolvable by the indexer (for example if the error is a result of not having the right kind of provider, adding the required provider will allow indexing to continue). However in others, a change in the Subgraph code is required. -> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-deterministic failures are not, as the subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. +> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-deterministic failures are not, as the Subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the Subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. #### 区块和调用缓存 -Graph Node caches certain data in the store in order to save refetching from the provider. Blocks are cached, as are the results of `eth_calls` (the latter being cached as of a specific block). This caching can dramatically increase indexing speed during "resyncing" of a slightly altered subgraph. +Graph Node caches certain data in the store in order to save refetching from the provider. Blocks are cached, as are the results of `eth_calls` (the latter being cached as of a specific block). This caching can dramatically increase indexing speed during "resyncing" of a slightly altered Subgraph. -However, in some instances, if an Ethereum node has provided incorrect data for some period, that can make its way into the cache, leading to incorrect data or failed subgraphs. In this case indexers can use `graphman` to clear the poisoned cache, and then rewind the affected subgraphs, which will then fetch fresh data from the (hopefully) healthy provider. +However, in some instances, if an Ethereum node has provided incorrect data for some period, that can make its way into the cache, leading to incorrect data or failed Subgraphs. In this case indexers can use `graphman` to clear the poisoned cache, and then rewind the affected Subgraphs, which will then fetch fresh data from the (hopefully) healthy provider. 如果怀疑区块缓存不一致,例如tx收据丢失事件: @@ -304,7 +304,7 @@ However, in some instances, if an Ethereum node has provided incorrect data for #### 查询问题和错误 -一旦子图被索引,索引器就可以期望通过子图的专用查询端点来服务查询。如果索引器希望为大量查询量提供服务,建议使用专用查询节点,如果查询量非常大,索引器可能需要配置副本分片,以便查询不会影响索引过程。 +Once a Subgraph has been indexed, indexers can expect to serve queries via the Subgraph's dedicated query endpoint. If the indexer is hoping to serve significant query volume, a dedicated query node is recommended, and in case of very high query volumes, indexers may want to configure replica shards so that queries don't impact the indexing process. 然而,即使使用专用的查询节点和副本,某些查询也可能需要很长时间才能执行,在某些情况下还会增加内存使用量,并对其他用户的查询时间产生负面影响。 @@ -316,7 +316,7 @@ Graph Node caches GraphQL queries by default, which can significantly reduce dat ##### 分析查询 -有问题的查询通常以两种方式之一出现。在某些情况下,用户自己报告给定的查询很慢。在这种情况下,挑战是诊断缓慢的原因——无论是一般问题,还是特定于子图或查询。如果可能的话,当然要解决它。 +Problematic queries most often surface in one of two ways. In some cases, users themselves report that a given query is slow. In that case the challenge is to diagnose the reason for the slowness - whether it is a general issue, or specific to that Subgraph or query. And then of course to resolve it, if possible. 在其他情况下,触发因素可能是查询节点上的高内存使用率,在这种情况下,首要挑战是要确定导致问题的查询。 @@ -336,10 +336,10 @@ In general, tables where the number of distinct entities are less than 1% of the Once a table has been determined to be account-like, running `graphman stats account-like .
` will turn on the account-like optimization for queries against that table. The optimization can be turned off again with `graphman stats account-like --clear .
` It takes up to 5 minutes for query nodes to notice that the optimization has been turned on or off. After turning the optimization on, it is necessary to verify that the change does not in fact make queries slower for that table. If you have configured Grafana to monitor Postgres, slow queries would show up in `pg_stat_activity`in large numbers, taking several seconds. In that case, the optimization needs to be turned off again. -For Uniswap-like subgraphs, the `pair` and `token` tables are prime candidates for this optimization, and can have a dramatic effect on database load. +For Uniswap-like Subgraphs, the `pair` and `token` tables are prime candidates for this optimization, and can have a dramatic effect on database load. -#### 删除子图 +#### Removing Subgraphs > 这是一项新功能,将在Graph节点0.29.x中提供。 -At some point an indexer might want to remove a given subgraph. This can be easily done via `graphman drop`, which deletes a deployment and all it's indexed data. The deployment can be specified as either a subgraph name, an IPFS hash `Qm..`, or the database namespace `sgdNNN`. Further documentation is available [here](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). +At some point an indexer might want to remove a given Subgraph. This can be easily done via `graphman drop`, which deletes a deployment and all it's indexed data. The deployment can be specified as either a Subgraph name, an IPFS hash `Qm..`, or the database namespace `sgdNNN`. Further documentation is available [here](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). From c80fd89004603768bdb1477623cf451fe44238a5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:10:00 -0500 Subject: [PATCH 0113/1789] New translations graph-node.mdx (Urdu (Pakistan)) --- .../pages/ur/indexing/tooling/graph-node.mdx | 72 +++++++++---------- 1 file changed, 36 insertions(+), 36 deletions(-) diff --git a/website/src/pages/ur/indexing/tooling/graph-node.mdx b/website/src/pages/ur/indexing/tooling/graph-node.mdx index 3e6d0c1e3d44..e2389b034a2f 100644 --- a/website/src/pages/ur/indexing/tooling/graph-node.mdx +++ b/website/src/pages/ur/indexing/tooling/graph-node.mdx @@ -2,31 +2,31 @@ title: گراف نوڈ --- -گراف نوڈ وہ جزو ہے جو سب گراف کو انڈیکس کرتا ہے، اور نتیجے میں ڈیٹا کو GraphQL API کے ذریعے کیوری کے لیے دستیاب کرتا ہے. اس طرح یہ انڈیکسر اسٹیک میں مرکزی حیثیت رکھتا ہے، اور ایک کامیاب انڈیکسر چلانے کے لیے گراف نوڈ کا درست آپریشن بہت ضروری ہے. +Graph Node is the component which indexes Subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. This provides a contextual overview of Graph Node, and some of the more advanced options available to indexers. Detailed documentation and instructions can be found in the [Graph Node repository](https://github.com/graphprotocol/graph-node). ## گراف نوڈ -[Graph Node](https://github.com/graphprotocol/graph-node) is the reference implementation for indexing Subgraphs on The Graph Network, connecting to blockchain clients, indexing subgraphs and making indexed data available to query. +[Graph Node](https://github.com/graphprotocol/graph-node) is the reference implementation for indexing Subgraphs on The Graph Network, connecting to blockchain clients, indexing Subgraphs and making indexed data available to query. Graph Node (and the whole indexer stack) can be run on bare metal, or in a cloud environment. This flexibility of the central indexing component is crucial to the robustness of The Graph Protocol. Similarly, Graph Node can be [built from source](https://github.com/graphprotocol/graph-node), or indexers can use one of the [provided Docker Images](https://hub.docker.com/r/graphprotocol/graph-node). ### PostgreSQL ڈیٹا بیس -گراف نوڈ کا مرکزی اسٹور، یہ وہ جگہ ہے جہاں سب گراف کا ڈیٹا ذخیرہ کیا جاتا ہے، ساتھ ہی سب گراف کے بارے میں میٹا ڈیٹا، اور سب گراف-اگنوسٹک نیٹ ورک ڈیٹا جیسے کہ بلاک کیشے، اور ایتھ_کال کیشے. +The main store for the Graph Node, this is where Subgraph data is stored, as well as metadata about Subgraphs, and Subgraph-agnostic network data such as the block cache, and eth_call cache. ### نیٹ ورک کلائنٹس کسی نیٹ ورک کو انڈیکس کرنے کے لیے، گراف نوڈ کو EVM سے مطابقت رکھنے والے JSON-RPC API کے ذریعے نیٹ ورک کلائنٹ تک رسائی کی ضرورت ہے۔ یہ RPC کسی ایک کلائنٹ سے منسلک ہو سکتا ہے یا یہ زیادہ پیچیدہ سیٹ اپ ہو سکتا ہے جو متعدد پر بیلنس لوڈ کرتا ہے. -While some subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). +While some Subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically Subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and Subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). **Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an Indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). ### IPFS نوڈس -سب گراف تعیناتی کا میٹا ڈیٹا IPFS نیٹ ورک پر محفوظ کیا جاتا ہے. گراف نوڈ بنیادی طور پر سب گراف کی تعیناتی کے دوران IPFS نوڈ تک رسائی حاصل کرتا ہے تاکہ سب گراف مینی فیسٹ اور تمام منسلک فائلوں کو حاصل کیا جا سکے. نیٹ ورک انڈیکسرز کو اپنے IPFص نوڈ کو ہوسٹ کرنے کی ضرورت نہیں ہے. نیٹ ورک کے لیے ایک IPFS نوڈ https://ipfs.network.thegraph.com پر ہوسٹ کیا گیا ہے. +Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during Subgraph deployment to fetch the Subgraph manifest and all linked files. Network indexers do not need to host their own IPFS node. An IPFS node for the network is hosted at https://ipfs.network.thegraph.com. ### Prometheus میٹرکس سرور @@ -77,19 +77,19 @@ A complete Kubernetes example configuration can be found in the [indexer reposit جب یہ چل رہا ہوتا ہے گراف نوڈ مندرجہ ذیل پورٹس کو بے نقاب کرتا ہے: -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | -| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | -| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | -| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for Subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for Subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | > **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC endpoint. ## اعلی درجے کی گراف نوڈ کنفیگریشن -اس کے آسان ترین طور پر، گراف نوڈ کو گراف نوڈ کے ایک انسٹینس, واحد PostgreSQL ڈیٹا بیس، ایک IPFS نوڈ، اور نیٹ ورک کلائنٹس کے ساتھ آپریٹ کیا جا سکتا ہے جیسا کہ سب گراف کو انڈیکس کرنے کے لیے ضرورت ہوتی ہے. +At its simplest, Graph Node can be operated with a single instance of Graph Node, a single PostgreSQL database, an IPFS node, and the network clients as required by the Subgraphs to be indexed. This setup can be scaled horizontally, by adding multiple Graph Nodes, and multiple databases to support those Graph Nodes. Advanced users may want to take advantage of some of the horizontal scaling capabilities of Graph Node, as well as some of the more advanced configuration options, via the `config.toml` file and Graph Node's environment variables. @@ -114,13 +114,13 @@ Full documentation of `config.toml` can be found in the [Graph Node docs](https: #### متعدد گراف نوڈس -Graph Node indexing can scale horizontally, running multiple instances of Graph Node to split indexing and querying across different nodes. This can be done simply by running Graph Nodes configured with a different `node_id` on startup (e.g. in the Docker Compose file), which can then be used in the `config.toml` file to specify [dedicated query nodes](#dedicated-query-nodes), [block ingestors](#dedicated-block-ingestion), and splitting subgraphs across nodes with [deployment rules](#deployment-rules). +Graph Node indexing can scale horizontally, running multiple instances of Graph Node to split indexing and querying across different nodes. This can be done simply by running Graph Nodes configured with a different `node_id` on startup (e.g. in the Docker Compose file), which can then be used in the `config.toml` file to specify [dedicated query nodes](#dedicated-query-nodes), [block ingestors](#dedicated-block-ingestion), and splitting Subgraphs across nodes with [deployment rules](#deployment-rules). > نوٹ کریں کہ ایک سے زیادہ گراف نوڈس کو ایک ہی ڈیٹا بیس کو استعمال کرنے کے لیے کنفیگر کیا جا سکتا ہے، جسے خود کو شارڈنگ کے ذریعے افقی طور پر سکیل کیا جا سکتا ہے. #### تعیناتی کے قواعد -Given multiple Graph Nodes, it is necessary to manage deployment of new subgraphs so that the same subgraph isn't being indexed by two different nodes, which would lead to collisions. This can be done by using deployment rules, which can also specify which `shard` a subgraph's data should be stored in, if database sharding is being used. Deployment rules can match on the subgraph name and the network that the deployment is indexing in order to make a decision. +Given multiple Graph Nodes, it is necessary to manage deployment of new Subgraphs so that the same Subgraph isn't being indexed by two different nodes, which would lead to collisions. This can be done by using deployment rules, which can also specify which `shard` a Subgraph's data should be stored in, if database sharding is being used. Deployment rules can match on the Subgraph name and the network that the deployment is indexing in order to make a decision. مثال کی تعیناتی کے اصول کی کنفگریشن: @@ -138,7 +138,7 @@ indexers = [ "index_node_kovan_0" ] match = { network = [ "xdai", "poa-core" ] } indexers = [ "index_node_other_0" ] [[deployment.rule]] -# There's no 'match', so any subgraph matches +# There's no 'match', so any Subgraph matches shards = [ "sharda", "shardb" ] indexers = [ "index_node_community_0", @@ -167,11 +167,11 @@ query = "" زیادہ تر استعمال کے معاملات میں، ایک واحد Postgres ڈیٹا بیس گراف نوڈ کی انسٹینس کو سپورٹ کرنے کے لیے کافی ہے. جب ایک گراف نوڈ کی انسٹینس ایک واحد postgres ڈیٹا بیس سے بڑھ جاتی ہے، تو یہ ممکن ہے کہ گراف نوڈ کے ڈیٹا کے ذخیرہ کو متعدد پوسٹگریس ڈیٹا بیس میں تقسیم کیا جا سکے. تمام ڈیٹا بیس مل کر گراف نوڈ انسٹینس کا اسٹور بناتے ہیں. ہر انفرادی ڈیٹا بیس کو شارڈ کہا جاتا ہے. -Shards can be used to split subgraph deployments across multiple databases, and can also be used to use replicas to spread query load across databases. This includes configuring the number of available database connections each `graph-node` should keep in its connection pool for each database, which becomes increasingly important as more subgraphs are being indexed. +Shards can be used to split Subgraph deployments across multiple databases, and can also be used to use replicas to spread query load across databases. This includes configuring the number of available database connections each `graph-node` should keep in its connection pool for each database, which becomes increasingly important as more Subgraphs are being indexed. شارڈنگ مفید ہو جاتا ہے جب آپ کا موجودہ ڈیٹا بیس اس بوجھ کو برقرار نہیں رکھ سکتا جو گراف نوڈ اس پر ڈالتا ہے، اور جب ڈیٹا بیس کے سائز کو مزید بڑھانا ممکن نہ ہو. -> عام طور پر یہ بہتر ہے کہ شارڈز کے ساتھ شروع کرنے سے پہلے، ایک ہی ڈیٹا بیس کو جتنا ہو سکے بڑا بنائیں. ایک استثناء وہ ہے جہاں کیوری ٹریفک کو سب گرافس کے درمیان بہت غیر مساوی طور پر تقسیم کیا جاتا ہے; ان حالات میں یہ بھاری طور پر مدد کر سکتا ہے اگر اعلی حجم کے سب گراف کو ایک شارڈ میں اور باقی سب کچھ دوسرے میں رکھا جائے کیونکہ اس سیٹ اپ سے یہ زیادہ امکان ہوتا ہے کہ زیادہ حجم والے سب گرافس کا ڈیٹا db-internal کیشے میں رہتا ہے اور ایسا نہیں ہوتا ہے کہ وہ کم حجم والے سب گرافس سے ڈیٹا کی جگہ لے لیں جس کی ضرورت نہیں ہے. +> It is generally better make a single database as big as possible, before starting with shards. One exception is where query traffic is split very unevenly between Subgraphs; in those situations it can help dramatically if the high-volume Subgraphs are kept in one shard and everything else in another because that setup makes it more likely that the data for the high-volume Subgraphs stays in the db-internal cache and doesn't get replaced by data that's not needed as much from low-volume Subgraphs. کنکشن کنفیگر کرنے کے معاملے میں، postgresql.conf میں max_connections کے ساتھ شروع کریں 400 سیٹ کریں(یا شاید 200 بھی) اور store_connection_wait_time_ms اور store_connection_checkout_count Prometheus میٹرکس دیکھیں. قابل توجہ انتظار کے اوقات (5ms سے اوپر کی کوئی بھی چیز) اس بات کا اشارہ ہے کہ بہت کم کنکشن دستیاب ہیں; زیادہ انتظار کا وقت بھی ڈیٹا بیس کے بہت مصروف ہونے کی وجہ سے ہوگا (جیسے زیادہ CPU لوڈ). تاہم اگر ڈیٹا بیس بصورت دیگر مستحکم معلوم ہوتا ہے تو، زیادہ انتظار کے اوقات کنکشن کی تعداد بڑھانے کی ضرورت کی نشاندہی کرتے ہیں. کنفیگریشن میں، ہر گراف نوڈ انسٹینس کتنے کنکشن استعمال کر سکتا ہے ایک بالائی حد ہے، اور اگر گراف نوڈ کو ان کی ضرورت نہ ہو تو کنکشن کو کھلا نہیں رکھے گا. @@ -188,7 +188,7 @@ ingestor = "block_ingestor_node" #### متعدد نیٹ ورکس کو سپورٹ کرنا -The Graph Protocol is increasing the number of networks supported for indexing rewards, and there exist many subgraphs indexing unsupported networks which an indexer would like to process. The `config.toml` file allows for expressive and flexible configuration of: +The Graph Protocol is increasing the number of networks supported for indexing rewards, and there exist many Subgraphs indexing unsupported networks which an indexer would like to process. The `config.toml` file allows for expressive and flexible configuration of: - متعدد نیٹ ورکس - ایک سے زیادہ فراہم کنندگان فی نیٹ ورک (یہ فراہم کنندگان میں بوجھ کو تقسیم کرنے کی اجازت دے سکتا ہے، اور مکمل نوڈس کے ساتھ ساتھ آرکائیو نوڈس کی ترتیب کی بھی اجازت دے سکتا ہے، گراف نوڈ سستے فراہم کنندگان کو ترجیح دیتا ہے اگر کام کا بوجھ اجازت دیتا ہے). @@ -225,11 +225,11 @@ Graph Node supports a range of environment variables which can enable features, ### گراف نوڈ کا انتظام -چلتے ہوئے گراف نوڈ (یا گراف نوڈس!) کو دیکھتے ہوئے، پھر چیلنج یہ ہے کہ ان نوڈس میں تعینات سب گراف کا انتظام کرنا. گراف نوڈ سب گرافس کو منظم کرنے میں مدد کے لیے ٹولز کی ایک رینج پیش کرتا ہے. +Given a running Graph Node (or Graph Nodes!), the challenge is then to manage deployed Subgraphs across those nodes. Graph Node surfaces a range of tools to help with managing Subgraphs. #### لاگنگ -Graph Node's logs can provide useful information for debugging and optimisation of Graph Node and specific subgraphs. Graph Node supports different log levels via the `GRAPH_LOG` environment variable, with the following levels: error, warn, info, debug or trace. +Graph Node's logs can provide useful information for debugging and optimisation of Graph Node and specific Subgraphs. Graph Node supports different log levels via the `GRAPH_LOG` environment variable, with the following levels: error, warn, info, debug or trace. In addition setting `GRAPH_LOG_QUERY_TIMING` to `gql` provides more details about how GraphQL queries are running (though this will generate a large volume of logs). @@ -247,11 +247,11 @@ The graphman command is included in the official containers, and you can docker Full documentation of `graphman` commands is available in the Graph Node repository. See [/docs/graphman.md] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) in the Graph Node `/docs` -### سب گرافس کے ساتھ کام کرنا +### Working with Subgraphs #### انڈیکسنگ اسٹیٹس API -پورٹ 8030/graphql پر بطور ڈیفالٹ دستیاب ہے، انڈیکسنگ اسٹیٹس API مختلف سب گرافس کے لیے انڈیکسنگ کی حیثیت کو جانچنے، انڈیکسنگ کے ثبوتوں کی جانچ، سب گراف کی خصوصیات کا معائنہ کرنے اور مزید بہت سے طریقوں کو ظاہر کرتا ہے. +Available on port 8030/graphql by default, the indexing status API exposes a range of methods for checking indexing status for different Subgraphs, checking proofs of indexing, inspecting Subgraph features and more. The full schema is available [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). @@ -263,7 +263,7 @@ The full schema is available [here](https://github.com/graphprotocol/graph-node/ - مناسب ہینڈلرز کے ساتھ ایوینٹس پر کارروائی کرنا (اس میں سٹیٹ کے لیے چین کو کال کرنا، اور اسٹور سے ڈیٹا حاصل کرنا شامل ہو سکتا ہے) - نتیجے کے ڈیٹا کو اسٹور پر لکھنا -یہ مراحل پائپ لائنڈ ہیں (یعنی انہیں متوازی طور پر انجام دیا جا سکتا ہے)، لیکن وہ ایک دوسرے پر منحصر ہیں. جہاں سب گراف انڈیکس میں سست ہیں، بنیادی وجہ مخصوص سب گراف پر منحصر ہوگی. +These stages are pipelined (i.e. they can be executed in parallel), but they are dependent on one another. Where Subgraphs are slow to index, the underlying cause will depend on the specific Subgraph. انڈیکسنگ میں سستی کی عام وجوہات: @@ -276,24 +276,24 @@ The full schema is available [here](https://github.com/graphprotocol/graph-node/ - فراہم کنندہ خود چین ہیڈ کے پیچھے پڑا ہے - فراہم کنندہ سے چین ہیڈ پر نئی رسیدیں لانے میں سست روی -سب گراف انڈیکسنگ میٹرکس انڈیکسنگ کی سستی کی بنیادی وجہ کی تشخیص میں مدد کر سکتی ہے. کچھ معاملات میں، مسئلہ خود سب گراف کے ساتھ ہوتا ہے، لیکن دوسروں میں، بہتر نیٹ ورک فراہم کرنے والے، ڈیٹا بیس کے تنازعہ میں کمی اور دیگر ترتیب میں بہتری انڈیکسنگ کی کارکردگی کو نمایاں طور پر بہتر بنا سکتی ہے. +Subgraph indexing metrics can help diagnose the root cause of indexing slowness. In some cases, the problem lies with the Subgraph itself, but in others, improved network providers, reduced database contention and other configuration improvements can markedly improve indexing performance. -#### ناکام سب گراف +#### Failed Subgraphs -انڈیکسنگ کے دوران سب گرافس ناکام ہو سکتے ہیں، اگر وہ غیر متوقع ڈیٹا کا سامنا کرتے ہیں، کچھ جزو توقع کے مطابق کام نہیں کر رہا ہے، یا اگر ایونٹ ہینڈلرز یا کنفیگریشن میں کچھ بگ ہے۔ ناکامی کی دو عمومی قسمیں ہیں: +During indexing Subgraphs might fail, if they encounter data that is unexpected, some component not working as expected, or if there is some bug in the event handlers or configuration. There are two general types of failure: - تعییناتی ناکامیاں: یہ وہ ناکامیاں ہیں جو دوبارہ کوششوں سے حل نہیں ہوں گی - غیر مقررہ ناکامیاں: یہ فراہم کنندہ کے ساتھ مسائل، یا کچھ غیر متوقع گراف نوڈ کی خرابی کی وجہ سے ہوسکتی ہیں. جب ایک غیر مقررہ ناکامی واقع ہوتی ہے تو، گراف نوڈ ناکام ہونے والے ہینڈلرز کو دوبارہ کوشش کرے گا، وقت کے ساتھ پیچھے ہٹتا ہے. -بعض صورتوں میں ایک ناکامی کو انڈیکسر کے ذریعے حل کیا جا سکتا ہے (مثال کے طور پر اگر غلطی صحیح قسم کا فراہم کنندہ نہ ہونے کا نتیجہ ہے، مطلوبہ فراہم کنندہ کو شامل کرنے سے انڈیکسنگ جاری رہے گی). تاہم دوسری صورتوں میں، سب گراف کوڈ میں تبدیلی کی ضرورت ہے. +In some cases a failure might be resolvable by the indexer (for example if the error is a result of not having the right kind of provider, adding the required provider will allow indexing to continue). However in others, a change in the Subgraph code is required. -> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-deterministic failures are not, as the subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. +> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-deterministic failures are not, as the Subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the Subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. #### کیشے -Graph Node caches certain data in the store in order to save refetching from the provider. Blocks are cached, as are the results of `eth_calls` (the latter being cached as of a specific block). This caching can dramatically increase indexing speed during "resyncing" of a slightly altered subgraph. +Graph Node caches certain data in the store in order to save refetching from the provider. Blocks are cached, as are the results of `eth_calls` (the latter being cached as of a specific block). This caching can dramatically increase indexing speed during "resyncing" of a slightly altered Subgraph. -However, in some instances, if an Ethereum node has provided incorrect data for some period, that can make its way into the cache, leading to incorrect data or failed subgraphs. In this case indexers can use `graphman` to clear the poisoned cache, and then rewind the affected subgraphs, which will then fetch fresh data from the (hopefully) healthy provider. +However, in some instances, if an Ethereum node has provided incorrect data for some period, that can make its way into the cache, leading to incorrect data or failed Subgraphs. In this case indexers can use `graphman` to clear the poisoned cache, and then rewind the affected Subgraphs, which will then fetch fresh data from the (hopefully) healthy provider. اگر کسی بلاک کیشے کی عدم مطابقت کا شبہ ہے، جیسے کہ tx رسید غائب ہونے کا ایوینٹ: @@ -304,7 +304,7 @@ However, in some instances, if an Ethereum node has provided incorrect data for #### مسائل اور غلطیوں کو کیوری کرنا -ایک بار ایک سب گراف کو انڈیکس کرنے کے بعد، انڈیکسرز سب گراف کے وقف کردہ کیوری کے اختتامی نقطہ کے ذریعے کیوریز پیش کرنے کی توقع کر سکتے ہیں. اگر انڈیکسر کافی تعداد میں کیوریز کے حجم کو پیش کرنے کی امید کر رہا ہے تو، ایک وقف شدہ کیوری نوڈ کی تجویز کی جاتی ہے، اور بہت زیادہ کیوریز کی تعداد کی صورت میں، انڈیکسر نقل شارڈز کو ترتیب دینا چاہیں گے تاکہ کیوریز انڈیکسنگ کے عمل کو متاثر نہ کریں. +Once a Subgraph has been indexed, indexers can expect to serve queries via the Subgraph's dedicated query endpoint. If the indexer is hoping to serve significant query volume, a dedicated query node is recommended, and in case of very high query volumes, indexers may want to configure replica shards so that queries don't impact the indexing process. تاہم، ایک وقف شدہ کیوری نوڈ اور نقل کے ساتھ بھی، بعض کیوریز کو عمل میں لانے میں کافی وقت لگ سکتا ہے، اور بعض صورتوں میں میموری کے استعمال میں اضافہ ہوتا ہے اور دوسرے صارفین کے لیے کیوری کے وقت پر منفی اثر پڑتا ہے. @@ -316,7 +316,7 @@ Graph Node caches GraphQL queries by default, which can significantly reduce dat ##### کیوریز کا تجزیہ کرنا -مشکل کیوریز اکثر دو طریقوں میں سے ایک میں سامنے آتی ہیں. کچھ معاملات میں، صارفین خود رپورٹ کرتے ہیں کہ دی گئی کیوری آہستہ ہے. اس صورت میں چیلنج آہستگی کی وجہ کی تشخیص کرنا ہے ء چاہے یہ عام مسئلہ ہو، یا اس سب گراف یا کیوری کے لیے مخصوص ہو. اور پھر اگر ممکن ہو تو یقیناً اسے حل کرنا. +Problematic queries most often surface in one of two ways. In some cases, users themselves report that a given query is slow. In that case the challenge is to diagnose the reason for the slowness - whether it is a general issue, or specific to that Subgraph or query. And then of course to resolve it, if possible. دوسری صورتوں میں، مسئلہ ایک کیوری نوڈ پر زیادہ میموری کا استعمال ہو سکتا ہے، ایسی صورت میں چیلنج سب سے پہلے اس کیوری کی نشاندہی کرنا ہے جس کی وجہ سے مسئلہ ہے. @@ -336,10 +336,10 @@ In general, tables where the number of distinct entities are less than 1% of the Once a table has been determined to be account-like, running `graphman stats account-like .
` will turn on the account-like optimization for queries against that table. The optimization can be turned off again with `graphman stats account-like --clear .
` It takes up to 5 minutes for query nodes to notice that the optimization has been turned on or off. After turning the optimization on, it is necessary to verify that the change does not in fact make queries slower for that table. If you have configured Grafana to monitor Postgres, slow queries would show up in `pg_stat_activity`in large numbers, taking several seconds. In that case, the optimization needs to be turned off again. -For Uniswap-like subgraphs, the `pair` and `token` tables are prime candidates for this optimization, and can have a dramatic effect on database load. +For Uniswap-like Subgraphs, the `pair` and `token` tables are prime candidates for this optimization, and can have a dramatic effect on database load. -#### سب گراف کو ختم کرنا +#### Removing Subgraphs > یہ نئی فعالیت ہے، جو گراف نوڈ 0.29.x میں دستیاب ہوگی -At some point an indexer might want to remove a given subgraph. This can be easily done via `graphman drop`, which deletes a deployment and all it's indexed data. The deployment can be specified as either a subgraph name, an IPFS hash `Qm..`, or the database namespace `sgdNNN`. Further documentation is available [here](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). +At some point an indexer might want to remove a given Subgraph. This can be easily done via `graphman drop`, which deletes a deployment and all it's indexed data. The deployment can be specified as either a Subgraph name, an IPFS hash `Qm..`, or the database namespace `sgdNNN`. Further documentation is available [here](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). From 7382b4ea77c52abae9af0aff036099c8460e516e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:10:01 -0500 Subject: [PATCH 0114/1789] New translations graph-node.mdx (Vietnamese) --- .../pages/vi/indexing/tooling/graph-node.mdx | 72 +++++++++---------- 1 file changed, 36 insertions(+), 36 deletions(-) diff --git a/website/src/pages/vi/indexing/tooling/graph-node.mdx b/website/src/pages/vi/indexing/tooling/graph-node.mdx index 0250f14a3d08..f5778789213d 100644 --- a/website/src/pages/vi/indexing/tooling/graph-node.mdx +++ b/website/src/pages/vi/indexing/tooling/graph-node.mdx @@ -2,31 +2,31 @@ title: Graph Node --- -Graph Node is the component which indexes subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. +Graph Node is the component which indexes Subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. This provides a contextual overview of Graph Node, and some of the more advanced options available to indexers. Detailed documentation and instructions can be found in the [Graph Node repository](https://github.com/graphprotocol/graph-node). ## Graph Node -[Graph Node](https://github.com/graphprotocol/graph-node) is the reference implementation for indexing Subgraphs on The Graph Network, connecting to blockchain clients, indexing subgraphs and making indexed data available to query. +[Graph Node](https://github.com/graphprotocol/graph-node) is the reference implementation for indexing Subgraphs on The Graph Network, connecting to blockchain clients, indexing Subgraphs and making indexed data available to query. Graph Node (and the whole indexer stack) can be run on bare metal, or in a cloud environment. This flexibility of the central indexing component is crucial to the robustness of The Graph Protocol. Similarly, Graph Node can be [built from source](https://github.com/graphprotocol/graph-node), or indexers can use one of the [provided Docker Images](https://hub.docker.com/r/graphprotocol/graph-node). ### PostgreSQL database -The main store for the Graph Node, this is where subgraph data is stored, as well as metadata about subgraphs, and subgraph-agnostic network data such as the block cache, and eth_call cache. +The main store for the Graph Node, this is where Subgraph data is stored, as well as metadata about Subgraphs, and Subgraph-agnostic network data such as the block cache, and eth_call cache. ### Network clients In order to index a network, Graph Node needs access to a network client via an EVM-compatible JSON-RPC API. This RPC may connect to a single client or it could be a more complex setup that load balances across multiple. -While some subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). +While some Subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically Subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and Subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). **Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an Indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). ### IPFS Nodes -Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network indexers do not need to host their own IPFS node. An IPFS node for the network is hosted at https://ipfs.network.thegraph.com. +Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during Subgraph deployment to fetch the Subgraph manifest and all linked files. Network indexers do not need to host their own IPFS node. An IPFS node for the network is hosted at https://ipfs.network.thegraph.com. ### Prometheus metrics server @@ -77,19 +77,19 @@ A complete Kubernetes example configuration can be found in the [indexer reposit When it is running Graph Node exposes the following ports: -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | -| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | -| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | -| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for Subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for Subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | > **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC endpoint. ## Advanced Graph Node configuration -At its simplest, Graph Node can be operated with a single instance of Graph Node, a single PostgreSQL database, an IPFS node, and the network clients as required by the subgraphs to be indexed. +At its simplest, Graph Node can be operated with a single instance of Graph Node, a single PostgreSQL database, an IPFS node, and the network clients as required by the Subgraphs to be indexed. This setup can be scaled horizontally, by adding multiple Graph Nodes, and multiple databases to support those Graph Nodes. Advanced users may want to take advantage of some of the horizontal scaling capabilities of Graph Node, as well as some of the more advanced configuration options, via the `config.toml` file and Graph Node's environment variables. @@ -114,13 +114,13 @@ Full documentation of `config.toml` can be found in the [Graph Node docs](https: #### Multiple Graph Nodes -Graph Node indexing can scale horizontally, running multiple instances of Graph Node to split indexing and querying across different nodes. This can be done simply by running Graph Nodes configured with a different `node_id` on startup (e.g. in the Docker Compose file), which can then be used in the `config.toml` file to specify [dedicated query nodes](#dedicated-query-nodes), [block ingestors](#dedicated-block-ingestion), and splitting subgraphs across nodes with [deployment rules](#deployment-rules). +Graph Node indexing can scale horizontally, running multiple instances of Graph Node to split indexing and querying across different nodes. This can be done simply by running Graph Nodes configured with a different `node_id` on startup (e.g. in the Docker Compose file), which can then be used in the `config.toml` file to specify [dedicated query nodes](#dedicated-query-nodes), [block ingestors](#dedicated-block-ingestion), and splitting Subgraphs across nodes with [deployment rules](#deployment-rules). > Note that multiple Graph Nodes can all be configured to use the same database, which itself can be horizontally scaled via sharding. #### Deployment rules -Given multiple Graph Nodes, it is necessary to manage deployment of new subgraphs so that the same subgraph isn't being indexed by two different nodes, which would lead to collisions. This can be done by using deployment rules, which can also specify which `shard` a subgraph's data should be stored in, if database sharding is being used. Deployment rules can match on the subgraph name and the network that the deployment is indexing in order to make a decision. +Given multiple Graph Nodes, it is necessary to manage deployment of new Subgraphs so that the same Subgraph isn't being indexed by two different nodes, which would lead to collisions. This can be done by using deployment rules, which can also specify which `shard` a Subgraph's data should be stored in, if database sharding is being used. Deployment rules can match on the Subgraph name and the network that the deployment is indexing in order to make a decision. Example deployment rule configuration: @@ -138,7 +138,7 @@ indexers = [ "index_node_kovan_0" ] match = { network = [ "xdai", "poa-core" ] } indexers = [ "index_node_other_0" ] [[deployment.rule]] -# There's no 'match', so any subgraph matches +# There's no 'match', so any Subgraph matches shards = [ "sharda", "shardb" ] indexers = [ "index_node_community_0", @@ -167,11 +167,11 @@ Any node whose --node-id matches the regular expression will be set up to only r For most use cases, a single Postgres database is sufficient to support a graph-node instance. When a graph-node instance outgrows a single Postgres database, it is possible to split the storage of graph-node's data across multiple Postgres databases. All databases together form the store of the graph-node instance. Each individual database is called a shard. -Shards can be used to split subgraph deployments across multiple databases, and can also be used to use replicas to spread query load across databases. This includes configuring the number of available database connections each `graph-node` should keep in its connection pool for each database, which becomes increasingly important as more subgraphs are being indexed. +Shards can be used to split Subgraph deployments across multiple databases, and can also be used to use replicas to spread query load across databases. This includes configuring the number of available database connections each `graph-node` should keep in its connection pool for each database, which becomes increasingly important as more Subgraphs are being indexed. Sharding becomes useful when your existing database can't keep up with the load that Graph Node puts on it, and when it's not possible to increase the database size anymore. -> It is generally better make a single database as big as possible, before starting with shards. One exception is where query traffic is split very unevenly between subgraphs; in those situations it can help dramatically if the high-volume subgraphs are kept in one shard and everything else in another because that setup makes it more likely that the data for the high-volume subgraphs stays in the db-internal cache and doesn't get replaced by data that's not needed as much from low-volume subgraphs. +> It is generally better make a single database as big as possible, before starting with shards. One exception is where query traffic is split very unevenly between Subgraphs; in those situations it can help dramatically if the high-volume Subgraphs are kept in one shard and everything else in another because that setup makes it more likely that the data for the high-volume Subgraphs stays in the db-internal cache and doesn't get replaced by data that's not needed as much from low-volume Subgraphs. In terms of configuring connections, start with max_connections in postgresql.conf set to 400 (or maybe even 200) and look at the store_connection_wait_time_ms and store_connection_checkout_count Prometheus metrics. Noticeable wait times (anything above 5ms) is an indication that there are too few connections available; high wait times there will also be caused by the database being very busy (like high CPU load). However if the database seems otherwise stable, high wait times indicate a need to increase the number of connections. In the configuration, how many connections each graph-node instance can use is an upper limit, and Graph Node will not keep connections open if it doesn't need them. @@ -188,7 +188,7 @@ ingestor = "block_ingestor_node" #### Supporting multiple networks -The Graph Protocol is increasing the number of networks supported for indexing rewards, and there exist many subgraphs indexing unsupported networks which an indexer would like to process. The `config.toml` file allows for expressive and flexible configuration of: +The Graph Protocol is increasing the number of networks supported for indexing rewards, and there exist many Subgraphs indexing unsupported networks which an indexer would like to process. The `config.toml` file allows for expressive and flexible configuration of: - Multiple networks - Multiple providers per network (this can allow splitting of load across providers, and can also allow for configuration of full nodes as well as archive nodes, with Graph Node preferring cheaper providers if a given workload allows). @@ -225,11 +225,11 @@ Users who are operating a scaled indexing setup with advanced configuration may ### Managing Graph Node -Given a running Graph Node (or Graph Nodes!), the challenge is then to manage deployed subgraphs across those nodes. Graph Node surfaces a range of tools to help with managing subgraphs. +Given a running Graph Node (or Graph Nodes!), the challenge is then to manage deployed Subgraphs across those nodes. Graph Node surfaces a range of tools to help with managing Subgraphs. #### Logging -Graph Node's logs can provide useful information for debugging and optimisation of Graph Node and specific subgraphs. Graph Node supports different log levels via the `GRAPH_LOG` environment variable, with the following levels: error, warn, info, debug or trace. +Graph Node's logs can provide useful information for debugging and optimisation of Graph Node and specific Subgraphs. Graph Node supports different log levels via the `GRAPH_LOG` environment variable, with the following levels: error, warn, info, debug or trace. In addition setting `GRAPH_LOG_QUERY_TIMING` to `gql` provides more details about how GraphQL queries are running (though this will generate a large volume of logs). @@ -247,11 +247,11 @@ The graphman command is included in the official containers, and you can docker Full documentation of `graphman` commands is available in the Graph Node repository. See [/docs/graphman.md] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) in the Graph Node `/docs` -### Working with subgraphs +### Working with Subgraphs #### Indexing status API -Available on port 8030/graphql by default, the indexing status API exposes a range of methods for checking indexing status for different subgraphs, checking proofs of indexing, inspecting subgraph features and more. +Available on port 8030/graphql by default, the indexing status API exposes a range of methods for checking indexing status for different Subgraphs, checking proofs of indexing, inspecting Subgraph features and more. The full schema is available [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). @@ -263,7 +263,7 @@ There are three separate parts of the indexing process: - Processing events in order with the appropriate handlers (this can involve calling the chain for state, and fetching data from the store) - Writing the resulting data to the store -These stages are pipelined (i.e. they can be executed in parallel), but they are dependent on one another. Where subgraphs are slow to index, the underlying cause will depend on the specific subgraph. +These stages are pipelined (i.e. they can be executed in parallel), but they are dependent on one another. Where Subgraphs are slow to index, the underlying cause will depend on the specific Subgraph. Common causes of indexing slowness: @@ -276,24 +276,24 @@ Common causes of indexing slowness: - The provider itself falling behind the chain head - Slowness in fetching new receipts at the chain head from the provider -Subgraph indexing metrics can help diagnose the root cause of indexing slowness. In some cases, the problem lies with the subgraph itself, but in others, improved network providers, reduced database contention and other configuration improvements can markedly improve indexing performance. +Subgraph indexing metrics can help diagnose the root cause of indexing slowness. In some cases, the problem lies with the Subgraph itself, but in others, improved network providers, reduced database contention and other configuration improvements can markedly improve indexing performance. -#### Failed subgraphs +#### Failed Subgraphs -During indexing subgraphs might fail, if they encounter data that is unexpected, some component not working as expected, or if there is some bug in the event handlers or configuration. There are two general types of failure: +During indexing Subgraphs might fail, if they encounter data that is unexpected, some component not working as expected, or if there is some bug in the event handlers or configuration. There are two general types of failure: - Deterministic failures: these are failures which will not be resolved with retries - Non-deterministic failures: these might be down to issues with the provider, or some unexpected Graph Node error. When a non-deterministic failure occurs, Graph Node will retry the failing handlers, backing off over time. -In some cases a failure might be resolvable by the indexer (for example if the error is a result of not having the right kind of provider, adding the required provider will allow indexing to continue). However in others, a change in the subgraph code is required. +In some cases a failure might be resolvable by the indexer (for example if the error is a result of not having the right kind of provider, adding the required provider will allow indexing to continue). However in others, a change in the Subgraph code is required. -> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-deterministic failures are not, as the subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. +> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-deterministic failures are not, as the Subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the Subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. #### Block and call cache -Graph Node caches certain data in the store in order to save refetching from the provider. Blocks are cached, as are the results of `eth_calls` (the latter being cached as of a specific block). This caching can dramatically increase indexing speed during "resyncing" of a slightly altered subgraph. +Graph Node caches certain data in the store in order to save refetching from the provider. Blocks are cached, as are the results of `eth_calls` (the latter being cached as of a specific block). This caching can dramatically increase indexing speed during "resyncing" of a slightly altered Subgraph. -However, in some instances, if an Ethereum node has provided incorrect data for some period, that can make its way into the cache, leading to incorrect data or failed subgraphs. In this case indexers can use `graphman` to clear the poisoned cache, and then rewind the affected subgraphs, which will then fetch fresh data from the (hopefully) healthy provider. +However, in some instances, if an Ethereum node has provided incorrect data for some period, that can make its way into the cache, leading to incorrect data or failed Subgraphs. In this case indexers can use `graphman` to clear the poisoned cache, and then rewind the affected Subgraphs, which will then fetch fresh data from the (hopefully) healthy provider. If a block cache inconsistency is suspected, such as a tx receipt missing event: @@ -304,7 +304,7 @@ If a block cache inconsistency is suspected, such as a tx receipt missing event: #### Querying issues and errors -Once a subgraph has been indexed, indexers can expect to serve queries via the subgraph's dedicated query endpoint. If the indexer is hoping to serve significant query volume, a dedicated query node is recommended, and in case of very high query volumes, indexers may want to configure replica shards so that queries don't impact the indexing process. +Once a Subgraph has been indexed, indexers can expect to serve queries via the Subgraph's dedicated query endpoint. If the indexer is hoping to serve significant query volume, a dedicated query node is recommended, and in case of very high query volumes, indexers may want to configure replica shards so that queries don't impact the indexing process. However, even with a dedicated query node and replicas, certain queries can take a long time to execute, and in some cases increase memory usage and negatively impact the query time for other users. @@ -316,7 +316,7 @@ Graph Node caches GraphQL queries by default, which can significantly reduce dat ##### Analysing queries -Problematic queries most often surface in one of two ways. In some cases, users themselves report that a given query is slow. In that case the challenge is to diagnose the reason for the slowness - whether it is a general issue, or specific to that subgraph or query. And then of course to resolve it, if possible. +Problematic queries most often surface in one of two ways. In some cases, users themselves report that a given query is slow. In that case the challenge is to diagnose the reason for the slowness - whether it is a general issue, or specific to that Subgraph or query. And then of course to resolve it, if possible. In other cases, the trigger might be high memory usage on a query node, in which case the challenge is first to identify the query causing the issue. @@ -336,10 +336,10 @@ In general, tables where the number of distinct entities are less than 1% of the Once a table has been determined to be account-like, running `graphman stats account-like .
` will turn on the account-like optimization for queries against that table. The optimization can be turned off again with `graphman stats account-like --clear .
` It takes up to 5 minutes for query nodes to notice that the optimization has been turned on or off. After turning the optimization on, it is necessary to verify that the change does not in fact make queries slower for that table. If you have configured Grafana to monitor Postgres, slow queries would show up in `pg_stat_activity`in large numbers, taking several seconds. In that case, the optimization needs to be turned off again. -For Uniswap-like subgraphs, the `pair` and `token` tables are prime candidates for this optimization, and can have a dramatic effect on database load. +For Uniswap-like Subgraphs, the `pair` and `token` tables are prime candidates for this optimization, and can have a dramatic effect on database load. -#### Removing subgraphs +#### Removing Subgraphs > This is new functionality, which will be available in Graph Node 0.29.x -At some point an indexer might want to remove a given subgraph. This can be easily done via `graphman drop`, which deletes a deployment and all it's indexed data. The deployment can be specified as either a subgraph name, an IPFS hash `Qm..`, or the database namespace `sgdNNN`. Further documentation is available [here](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). +At some point an indexer might want to remove a given Subgraph. This can be easily done via `graphman drop`, which deletes a deployment and all it's indexed data. The deployment can be specified as either a Subgraph name, an IPFS hash `Qm..`, or the database namespace `sgdNNN`. Further documentation is available [here](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). From 9e2b373529d74df5d2e8a1e1c0fcbb6fd55cc9dd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:10:02 -0500 Subject: [PATCH 0115/1789] New translations graph-node.mdx (Marathi) --- .../pages/mr/indexing/tooling/graph-node.mdx | 72 +++++++++---------- 1 file changed, 36 insertions(+), 36 deletions(-) diff --git a/website/src/pages/mr/indexing/tooling/graph-node.mdx b/website/src/pages/mr/indexing/tooling/graph-node.mdx index 30595816e62c..a85367e1c773 100644 --- a/website/src/pages/mr/indexing/tooling/graph-node.mdx +++ b/website/src/pages/mr/indexing/tooling/graph-node.mdx @@ -2,31 +2,31 @@ title: आलेख नोड --- -Graph Node is the component which indexes subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. +Graph Node is the component which indexes Subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. This provides a contextual overview of Graph Node, and some of the more advanced options available to indexers. Detailed documentation and instructions can be found in the [Graph Node repository](https://github.com/graphprotocol/graph-node). ## आलेख नोड -[Graph Node](https://github.com/graphprotocol/graph-node) is the reference implementation for indexing Subgraphs on The Graph Network, connecting to blockchain clients, indexing subgraphs and making indexed data available to query. +[Graph Node](https://github.com/graphprotocol/graph-node) is the reference implementation for indexing Subgraphs on The Graph Network, connecting to blockchain clients, indexing Subgraphs and making indexed data available to query. Graph Node (and the whole indexer stack) can be run on bare metal, or in a cloud environment. This flexibility of the central indexing component is crucial to the robustness of The Graph Protocol. Similarly, Graph Node can be [built from source](https://github.com/graphprotocol/graph-node), or indexers can use one of the [provided Docker Images](https://hub.docker.com/r/graphprotocol/graph-node). ### PostgreSQL डेटाबेस -The main store for the Graph Node, this is where subgraph data is stored, as well as metadata about subgraphs, and subgraph-agnostic network data such as the block cache, and eth_call cache. +The main store for the Graph Node, this is where Subgraph data is stored, as well as metadata about Subgraphs, and Subgraph-agnostic network data such as the block cache, and eth_call cache. ### नेटवर्क क्लायंट In order to index a network, Graph Node needs access to a network client via an EVM-compatible JSON-RPC API. This RPC may connect to a single client or it could be a more complex setup that load balances across multiple. -While some subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). +While some Subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically Subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and Subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). **Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an Indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). ### आयपीएफएस नोड्स -Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network indexers do not need to host their own IPFS node. An IPFS node for the network is hosted at https://ipfs.network.thegraph.com. +Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during Subgraph deployment to fetch the Subgraph manifest and all linked files. Network indexers do not need to host their own IPFS node. An IPFS node for the network is hosted at https://ipfs.network.thegraph.com. ### प्रोमिथियस मेट्रिक्स सर्व्हर @@ -77,19 +77,19 @@ A complete Kubernetes example configuration can be found in the [indexer reposit When it is running Graph Node exposes the following ports: -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | -| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | -| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | -| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for Subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for Subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | > **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC endpoint. ## प्रगत ग्राफ नोड कॉन्फिगरेशन -At its simplest, Graph Node can be operated with a single instance of Graph Node, a single PostgreSQL database, an IPFS node, and the network clients as required by the subgraphs to be indexed. +At its simplest, Graph Node can be operated with a single instance of Graph Node, a single PostgreSQL database, an IPFS node, and the network clients as required by the Subgraphs to be indexed. This setup can be scaled horizontally, by adding multiple Graph Nodes, and multiple databases to support those Graph Nodes. Advanced users may want to take advantage of some of the horizontal scaling capabilities of Graph Node, as well as some of the more advanced configuration options, via the `config.toml` file and Graph Node's environment variables. @@ -114,13 +114,13 @@ Full documentation of `config.toml` can be found in the [Graph Node docs](https: #### एकाधिक ग्राफ नोड्स -Graph Node indexing can scale horizontally, running multiple instances of Graph Node to split indexing and querying across different nodes. This can be done simply by running Graph Nodes configured with a different `node_id` on startup (e.g. in the Docker Compose file), which can then be used in the `config.toml` file to specify [dedicated query nodes](#dedicated-query-nodes), [block ingestors](#dedicated-block-ingestion), and splitting subgraphs across nodes with [deployment rules](#deployment-rules). +Graph Node indexing can scale horizontally, running multiple instances of Graph Node to split indexing and querying across different nodes. This can be done simply by running Graph Nodes configured with a different `node_id` on startup (e.g. in the Docker Compose file), which can then be used in the `config.toml` file to specify [dedicated query nodes](#dedicated-query-nodes), [block ingestors](#dedicated-block-ingestion), and splitting Subgraphs across nodes with [deployment rules](#deployment-rules). > Note that multiple Graph Nodes can all be configured to use the same database, which itself can be horizontally scaled via sharding. #### डिप्लॉयमेंट नियम -Given multiple Graph Nodes, it is necessary to manage deployment of new subgraphs so that the same subgraph isn't being indexed by two different nodes, which would lead to collisions. This can be done by using deployment rules, which can also specify which `shard` a subgraph's data should be stored in, if database sharding is being used. Deployment rules can match on the subgraph name and the network that the deployment is indexing in order to make a decision. +Given multiple Graph Nodes, it is necessary to manage deployment of new Subgraphs so that the same Subgraph isn't being indexed by two different nodes, which would lead to collisions. This can be done by using deployment rules, which can also specify which `shard` a Subgraph's data should be stored in, if database sharding is being used. Deployment rules can match on the Subgraph name and the network that the deployment is indexing in order to make a decision. उपयोजन नियम कॉन्फिगरेशनचे उदाहरण: @@ -138,7 +138,7 @@ indexers = [ "index_node_kovan_0" ] match = { network = [ "xdai", "poa-core" ] } indexers = [ "index_node_other_0" ] [[deployment.rule]] -# There's no 'match', so any subgraph matches +# There's no 'match', so any Subgraph matches shards = [ "sharda", "shardb" ] indexers = [ "index_node_community_0", @@ -167,11 +167,11 @@ Any node whose --node-id matches the regular expression will be set up to only r For most use cases, a single Postgres database is sufficient to support a graph-node instance. When a graph-node instance outgrows a single Postgres database, it is possible to split the storage of graph-node's data across multiple Postgres databases. All databases together form the store of the graph-node instance. Each individual database is called a shard. -Shards can be used to split subgraph deployments across multiple databases, and can also be used to use replicas to spread query load across databases. This includes configuring the number of available database connections each `graph-node` should keep in its connection pool for each database, which becomes increasingly important as more subgraphs are being indexed. +Shards can be used to split Subgraph deployments across multiple databases, and can also be used to use replicas to spread query load across databases. This includes configuring the number of available database connections each `graph-node` should keep in its connection pool for each database, which becomes increasingly important as more Subgraphs are being indexed. Sharding becomes useful when your existing database can't keep up with the load that Graph Node puts on it, and when it's not possible to increase the database size anymore. -> It is generally better make a single database as big as possible, before starting with shards. One exception is where query traffic is split very unevenly between subgraphs; in those situations it can help dramatically if the high-volume subgraphs are kept in one shard and everything else in another because that setup makes it more likely that the data for the high-volume subgraphs stays in the db-internal cache and doesn't get replaced by data that's not needed as much from low-volume subgraphs. +> It is generally better make a single database as big as possible, before starting with shards. One exception is where query traffic is split very unevenly between Subgraphs; in those situations it can help dramatically if the high-volume Subgraphs are kept in one shard and everything else in another because that setup makes it more likely that the data for the high-volume Subgraphs stays in the db-internal cache and doesn't get replaced by data that's not needed as much from low-volume Subgraphs. In terms of configuring connections, start with max_connections in postgresql.conf set to 400 (or maybe even 200) and look at the store_connection_wait_time_ms and store_connection_checkout_count Prometheus metrics. Noticeable wait times (anything above 5ms) is an indication that there are too few connections available; high wait times there will also be caused by the database being very busy (like high CPU load). However if the database seems otherwise stable, high wait times indicate a need to increase the number of connections. In the configuration, how many connections each graph-node instance can use is an upper limit, and Graph Node will not keep connections open if it doesn't need them. @@ -188,7 +188,7 @@ ingestor = "block_ingestor_node" #### एकाधिक नेटवर्क समर्थन -The Graph Protocol is increasing the number of networks supported for indexing rewards, and there exist many subgraphs indexing unsupported networks which an indexer would like to process. The `config.toml` file allows for expressive and flexible configuration of: +The Graph Protocol is increasing the number of networks supported for indexing rewards, and there exist many Subgraphs indexing unsupported networks which an indexer would like to process. The `config.toml` file allows for expressive and flexible configuration of: - एकाधिक नेटवर्क - Multiple providers per network (this can allow splitting of load across providers, and can also allow for configuration of full nodes as well as archive nodes, with Graph Node preferring cheaper providers if a given workload allows). @@ -225,11 +225,11 @@ Users who are operating a scaled indexing setup with advanced configuration may ### ग्राफ नोडचे व्यवस्थापन -Given a running Graph Node (or Graph Nodes!), the challenge is then to manage deployed subgraphs across those nodes. Graph Node surfaces a range of tools to help with managing subgraphs. +Given a running Graph Node (or Graph Nodes!), the challenge is then to manage deployed Subgraphs across those nodes. Graph Node surfaces a range of tools to help with managing Subgraphs. #### लॉगिंग -Graph Node's logs can provide useful information for debugging and optimisation of Graph Node and specific subgraphs. Graph Node supports different log levels via the `GRAPH_LOG` environment variable, with the following levels: error, warn, info, debug or trace. +Graph Node's logs can provide useful information for debugging and optimisation of Graph Node and specific Subgraphs. Graph Node supports different log levels via the `GRAPH_LOG` environment variable, with the following levels: error, warn, info, debug or trace. In addition setting `GRAPH_LOG_QUERY_TIMING` to `gql` provides more details about how GraphQL queries are running (though this will generate a large volume of logs). @@ -247,11 +247,11 @@ The graphman command is included in the official containers, and you can docker Full documentation of `graphman` commands is available in the Graph Node repository. See [/docs/graphman.md] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) in the Graph Node `/docs` -### सबग्राफसह कार्य करणे +### Working with Subgraphs #### अनुक्रमणिका स्थिती API -Available on port 8030/graphql by default, the indexing status API exposes a range of methods for checking indexing status for different subgraphs, checking proofs of indexing, inspecting subgraph features and more. +Available on port 8030/graphql by default, the indexing status API exposes a range of methods for checking indexing status for different Subgraphs, checking proofs of indexing, inspecting Subgraph features and more. The full schema is available [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). @@ -263,7 +263,7 @@ There are three separate parts of the indexing process: - Processing events in order with the appropriate handlers (this can involve calling the chain for state, and fetching data from the store) - परिणामी डेटा स्टोअरमध्ये लिहित आहे -These stages are pipelined (i.e. they can be executed in parallel), but they are dependent on one another. Where subgraphs are slow to index, the underlying cause will depend on the specific subgraph. +These stages are pipelined (i.e. they can be executed in parallel), but they are dependent on one another. Where Subgraphs are slow to index, the underlying cause will depend on the specific Subgraph. अनुक्रमणिका मंद होण्याची सामान्य कारणे: @@ -276,24 +276,24 @@ These stages are pipelined (i.e. they can be executed in parallel), but they are - प्रदाता स्वतः साखळी डोके मागे घसरण - Slowness in fetching new receipts at the chain head from the provider -Subgraph indexing metrics can help diagnose the root cause of indexing slowness. In some cases, the problem lies with the subgraph itself, but in others, improved network providers, reduced database contention and other configuration improvements can markedly improve indexing performance. +Subgraph indexing metrics can help diagnose the root cause of indexing slowness. In some cases, the problem lies with the Subgraph itself, but in others, improved network providers, reduced database contention and other configuration improvements can markedly improve indexing performance. -#### अयशस्वी सबग्राफ +#### Failed Subgraphs -During indexing subgraphs might fail, if they encounter data that is unexpected, some component not working as expected, or if there is some bug in the event handlers or configuration. There are two general types of failure: +During indexing Subgraphs might fail, if they encounter data that is unexpected, some component not working as expected, or if there is some bug in the event handlers or configuration. There are two general types of failure: - Deterministic failures: these are failures which will not be resolved with retries - Non-deterministic failures: these might be down to issues with the provider, or some unexpected Graph Node error. When a non-deterministic failure occurs, Graph Node will retry the failing handlers, backing off over time. -In some cases a failure might be resolvable by the indexer (for example if the error is a result of not having the right kind of provider, adding the required provider will allow indexing to continue). However in others, a change in the subgraph code is required. +In some cases a failure might be resolvable by the indexer (for example if the error is a result of not having the right kind of provider, adding the required provider will allow indexing to continue). However in others, a change in the Subgraph code is required. -> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-deterministic failures are not, as the subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. +> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-deterministic failures are not, as the Subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the Subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. #### ब्लॉक आणि कॉल कॅशे -Graph Node caches certain data in the store in order to save refetching from the provider. Blocks are cached, as are the results of `eth_calls` (the latter being cached as of a specific block). This caching can dramatically increase indexing speed during "resyncing" of a slightly altered subgraph. +Graph Node caches certain data in the store in order to save refetching from the provider. Blocks are cached, as are the results of `eth_calls` (the latter being cached as of a specific block). This caching can dramatically increase indexing speed during "resyncing" of a slightly altered Subgraph. -However, in some instances, if an Ethereum node has provided incorrect data for some period, that can make its way into the cache, leading to incorrect data or failed subgraphs. In this case indexers can use `graphman` to clear the poisoned cache, and then rewind the affected subgraphs, which will then fetch fresh data from the (hopefully) healthy provider. +However, in some instances, if an Ethereum node has provided incorrect data for some period, that can make its way into the cache, leading to incorrect data or failed Subgraphs. In this case indexers can use `graphman` to clear the poisoned cache, and then rewind the affected Subgraphs, which will then fetch fresh data from the (hopefully) healthy provider. If a block cache inconsistency is suspected, such as a tx receipt missing event: @@ -304,7 +304,7 @@ If a block cache inconsistency is suspected, such as a tx receipt missing event: #### समस्या आणि त्रुटींची चौकशी करणे -Once a subgraph has been indexed, indexers can expect to serve queries via the subgraph's dedicated query endpoint. If the indexer is hoping to serve significant query volume, a dedicated query node is recommended, and in case of very high query volumes, indexers may want to configure replica shards so that queries don't impact the indexing process. +Once a Subgraph has been indexed, indexers can expect to serve queries via the Subgraph's dedicated query endpoint. If the indexer is hoping to serve significant query volume, a dedicated query node is recommended, and in case of very high query volumes, indexers may want to configure replica shards so that queries don't impact the indexing process. However, even with a dedicated query node and replicas, certain queries can take a long time to execute, and in some cases increase memory usage and negatively impact the query time for other users. @@ -316,7 +316,7 @@ Graph Node caches GraphQL queries by default, which can significantly reduce dat ##### प्रश्नांचे विश्लेषण करत आहे -Problematic queries most often surface in one of two ways. In some cases, users themselves report that a given query is slow. In that case the challenge is to diagnose the reason for the slowness - whether it is a general issue, or specific to that subgraph or query. And then of course to resolve it, if possible. +Problematic queries most often surface in one of two ways. In some cases, users themselves report that a given query is slow. In that case the challenge is to diagnose the reason for the slowness - whether it is a general issue, or specific to that Subgraph or query. And then of course to resolve it, if possible. In other cases, the trigger might be high memory usage on a query node, in which case the challenge is first to identify the query causing the issue. @@ -336,10 +336,10 @@ In general, tables where the number of distinct entities are less than 1% of the Once a table has been determined to be account-like, running `graphman stats account-like .
` will turn on the account-like optimization for queries against that table. The optimization can be turned off again with `graphman stats account-like --clear .
` It takes up to 5 minutes for query nodes to notice that the optimization has been turned on or off. After turning the optimization on, it is necessary to verify that the change does not in fact make queries slower for that table. If you have configured Grafana to monitor Postgres, slow queries would show up in `pg_stat_activity`in large numbers, taking several seconds. In that case, the optimization needs to be turned off again. -For Uniswap-like subgraphs, the `pair` and `token` tables are prime candidates for this optimization, and can have a dramatic effect on database load. +For Uniswap-like Subgraphs, the `pair` and `token` tables are prime candidates for this optimization, and can have a dramatic effect on database load. -#### सबग्राफ काढून टाकत आहे +#### Removing Subgraphs > This is new functionality, which will be available in Graph Node 0.29.x -At some point an indexer might want to remove a given subgraph. This can be easily done via `graphman drop`, which deletes a deployment and all it's indexed data. The deployment can be specified as either a subgraph name, an IPFS hash `Qm..`, or the database namespace `sgdNNN`. Further documentation is available [here](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). +At some point an indexer might want to remove a given Subgraph. This can be easily done via `graphman drop`, which deletes a deployment and all it's indexed data. The deployment can be specified as either a Subgraph name, an IPFS hash `Qm..`, or the database namespace `sgdNNN`. Further documentation is available [here](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). From 8220f7ce158b23e883f7246035857cc6340f5856 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:10:04 -0500 Subject: [PATCH 0116/1789] New translations graph-node.mdx (Hindi) --- .../pages/hi/indexing/tooling/graph-node.mdx | 74 +++++++++---------- 1 file changed, 37 insertions(+), 37 deletions(-) diff --git a/website/src/pages/hi/indexing/tooling/graph-node.mdx b/website/src/pages/hi/indexing/tooling/graph-node.mdx index 9acca5cf6557..ccf764a95fc2 100644 --- a/website/src/pages/hi/indexing/tooling/graph-node.mdx +++ b/website/src/pages/hi/indexing/tooling/graph-node.mdx @@ -2,31 +2,31 @@ title: ग्राफ-नोड --- -Graph Node is the component which indexes subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. +Graph Node is the component which indexes Subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. ग्राफ-नोड का संदर्भ और indexers के लिए उपलब्ध कुछ उन्नत विकल्पों का परिचय प्रदान करता है। विस्तृत दस्तावेज़ और निर्देश [Graph Node repository](https://github.com/graphprotocol/graph-node) में पाए जा सकते हैं। ## ग्राफ-नोड -[Graph Node](https://github.com/graphprotocol/graph-node) The Graph Network पर सबग्राफ को indexing करने के लिए रेफरेंस इंप्लीमेंटेशन है, जो ब्लॉकचेन क्लाइंट्स से जुड़ता है, सबग्राफ को indexing करता है और इंडेक्स किए गए डेटा को queries के लिए उपलब्ध कराता है। +[Graph Node](https://github.com/graphprotocol/graph-node) is the reference implementation for indexing Subgraphs on The Graph Network, connecting to blockchain clients, indexing Subgraphs and making indexed data available to query. Graph Node (और पूरा indexer stack) को bare metal पर या एक cloud environment में चलाया जा सकता है। The Graph Protocol की मजबूती के लिए केंद्रीय indexing घटक की यह लचीलापन बहुत महत्वपूर्ण है। इसी तरह, ग्राफ-नोड को [साधन से बनाया जा सकता](https://github.com/graphprotocol/graph-node) है, या indexers [प्रदत्त Docker Images](https://hub.docker.com/r/graphprotocol/graph-node) में से एक का उपयोग कर सकते हैं। ### पोस्टग्रेएसक्यूएल डेटाबेस -The main store for the Graph Node, this is where subgraph data is stored, as well as metadata about subgraphs, and subgraph-agnostic network data such as the block cache, and eth_call cache. +The main store for the Graph Node, this is where Subgraph data is stored, as well as metadata about Subgraphs, and Subgraph-agnostic network data such as the block cache, and eth_call cache. ### नेटवर्क क्लाइंट किसी नेटवर्क को इंडेक्स करने के लिए, ग्राफ़ नोड को एथेरियम-संगत JSON-RPC के माध्यम से नेटवर्क क्लाइंट तक पहुंच की आवश्यकता होती है। यह आरपीसी एक एथेरियम क्लाइंट से जुड़ सकता है या यह एक अधिक जटिल सेटअप हो सकता है जो कई में संतुलन लोड करता है। -कुछ सबग्राफ को केवल एक पूर्ण नोड की आवश्यकता हो सकती है, लेकिन कुछ में indexing फीचर्स होते हैं, जिनके लिए अतिरिक्त RPC कार्यक्षमता की आवश्यकता होती है। विशेष रूप से, ऐसे सबग्राफ जो indexing के हिस्से के रूप में `eth_calls` करते हैं, उन्हें एक आर्काइव नोड की आवश्यकता होगी जो [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898) को सपोर्ट करता हो। साथ ही, ऐसे सबग्राफ जिनमें `callHandlers` या `blockHandlers` के साथ एक `call` फ़िल्टर हो, उन्हें `trace_filter` सपोर्ट की आवश्यकता होती है ([trace module documentation यहां देखें](https://openethereum.github.io/JSONRPC-trace-module))। +While some Subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically Subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and Subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). **नेटवर्क फायरहोज़** - फायरहोज़ एक gRPC सेवा है जो ब्लॉक्स का क्रमबद्ध, फिर भी फोर्क-अवेयर स्ट्रीम प्रदान करती है। इसे The Graph के कोर डेवलपर्स द्वारा बड़े पैमाने पर प्रभावी indexing का समर्थन करने के लिए विकसित किया गया है। यह वर्तमान में Indexer के लिए अनिवार्य नहीं है, लेकिन Indexers को इस तकनीक से परिचित होने के लिए प्रोत्साहित किया जाता है ताकि वे नेटवर्क के पूर्ण समर्थन के लिए तैयार रहें। फायरहोज़ के बारे में अधिक जानें [यहां](https://firehose.streamingfast.io/)। ### आईपीएफएस नोड्स -Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network indexers do not need to host their own IPFS node. An IPFS node for the network is hosted at https://ipfs.network.thegraph.com. +Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during Subgraph deployment to fetch the Subgraph manifest and all linked files. Network indexers do not need to host their own IPFS node. An IPFS node for the network is hosted at https://ipfs.network.thegraph.com. ### प्रोमेथियस मेट्रिक्स सर्वर @@ -77,19 +77,19 @@ Kubernetes का एक पूर्ण उदाहरण कॉन्फ़ When it is running Graph Node exposes the following ports: -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | -| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | -| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | -| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for Subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for Subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | > **प्रमुख बात**: सार्वजनिक रूप से पोर्ट्स को एक्सपोज़ करने में सावधानी बरतें - \*\*प्रशासनिक पोर्ट्स को लॉक रखना चाहिए। इसमें ग्राफ नोड JSON-RPC एंडपॉइंट भी शामिल है। ## Advanced Graph Node configuration -At its simplest, Graph Node can be operated with a single instance of Graph Node, a single PostgreSQL database, an IPFS node, and the network clients as required by the subgraphs to be indexed. +At its simplest, Graph Node can be operated with a single instance of Graph Node, a single PostgreSQL database, an IPFS node, and the network clients as required by the Subgraphs to be indexed. इस सेटअप को क्षैतिज रूप से स्केल किया जा सकता है, कई Graph नोड और उन Graph नोड को समर्थन देने के लिए कई डेटाबेस जोड़कर। उन्नत उपयोगकर्ता ग्राफ-नोड की कुछ क्षैतिज स्केलिंग क्षमताओं का लाभ उठाना चाह सकते हैं, साथ ही कुछ अधिक उन्नत कॉन्फ़िगरेशन विकल्पों का भी, `config.toml` फ़ाइल और ग्राफ-नोड के पर्यावरण वेरिएबल्स के माध्यम से। @@ -114,13 +114,13 @@ indexers = [ "<.. list of all indexing nodes ..>" ] #### Multiple Graph Nodes -ग्राफ-नोड indexing को क्षैतिज रूप से स्केल किया जा सकता है, कई ग्राफ-नोड instances चलाकर indexing और queries को विभिन्न नोड्स पर विभाजित किया जा सकता है। यह सरलता से किया जा सकता है, जब Graph नोड को एक अलग `node_id` के साथ शुरू किया जाता है (जैसे कि Docker Compose फ़ाइल में), जिसे फिर `config.toml` फ़ाइल में [dedicated query nodes](#dedicated-query-nodes), [block ingestors](#dedicated-block-ingestion) को निर्दिष्ट करने के लिए और [deployment rules](#deployment-rules) के साथ सबग्राफ को नोड्स के बीच विभाजित करने के लिए इस्तेमाल किया जा सकता है। +Graph Node indexing can scale horizontally, running multiple instances of Graph Node to split indexing and querying across different nodes. This can be done simply by running Graph Nodes configured with a different `node_id` on startup (e.g. in the Docker Compose file), which can then be used in the `config.toml` file to specify [dedicated query nodes](#dedicated-query-nodes), [block ingestors](#dedicated-block-ingestion), and splitting Subgraphs across nodes with [deployment rules](#deployment-rules). > ध्यान दें कि एक ही डेटाबेस का उपयोग करने के लिए कई ग्राफ़ नोड्स को कॉन्फ़िगर किया जा सकता है, जिसे स्वयं शार्डिंग के माध्यम से क्षैतिज रूप से बढ़ाया जा सकता है। #### Deployment rules -यहां कई Graph नोड दिए गए हैं, इसलिए नए सबग्राफ की तैनाती का प्रबंधन करना आवश्यक है ताकि एक ही subgraph को दो विभिन्न नोड द्वारा इंडेक्स न किया जाए, क्योंकि इससे टकराव हो सकता है। यह deployment नियमों का उपयोग करके किया जा सकता है, जो यह भी निर्दिष्ट कर सकते हैं कि यदि डेटाबेस sharding का उपयोग किया जा रहा है, तो subgraph का डेटा किस `shard` में स्टोर किया जाना चाहिए। Deployment नियम subgraph के नाम और उस नेटवर्क पर मिलान कर सकते हैं जिसमें तैनाती indexing हो रही है, ताकि निर्णय लिया जा सके। +Given multiple Graph Nodes, it is necessary to manage deployment of new Subgraphs so that the same Subgraph isn't being indexed by two different nodes, which would lead to collisions. This can be done by using deployment rules, which can also specify which `shard` a Subgraph's data should be stored in, if database sharding is being used. Deployment rules can match on the Subgraph name and the network that the deployment is indexing in order to make a decision. Example deployment rule configuration: @@ -138,7 +138,7 @@ indexers = [ "index_node_kovan_0" ] match = { network = [ "xdai", "poa-core" ] } indexers = [ "index_node_other_0" ] [[deployment.rule]] -# There's no 'match', so any subgraph matches +# There's no 'match', so any Subgraph matches shards = [ "sharda", "shardb" ] indexers = [ "index_node_community_0", @@ -167,11 +167,11 @@ Any node whose --node-id matches the regular expression will be set up to only r For most use cases, a single Postgres database is sufficient to support a graph-node instance. When a graph-node instance outgrows a single Postgres database, it is possible to split the storage of graph-node's data across multiple Postgres databases. All databases together form the store of the graph-node instance. Each individual database is called a shard. -Shard का उपयोग subgraph deployments को कई डेटाबेस में विभाजित करने के लिए किया जा सकता है, और प्रतिकृति का उपयोग करके query लोड को डेटाबेस में फैलाने के लिए भी किया जा सकता है। इसमें यह कॉन्फ़िगर करना शामिल है कि प्रत्येक डेटाबेस के लिए प्रत्येक `ग्राफ-नोड` को अपने कनेक्शन पूल में कितने उपलब्ध डेटाबेस कनेक्शन रखने चाहिए। जैसे-जैसे अधिक सबग्राफ को index किया जा रहा है, यह अधिक महत्वपूर्ण होता जा रहा है। +Shards can be used to split Subgraph deployments across multiple databases, and can also be used to use replicas to spread query load across databases. This includes configuring the number of available database connections each `graph-node` should keep in its connection pool for each database, which becomes increasingly important as more Subgraphs are being indexed. शेयरिंग तब उपयोगी हो जाती है जब आपका मौजूदा डेटाबेस ग्राफ़ नोड द्वारा डाले गए भार के साथ नहीं रह सकता है, और जब डेटाबेस का आकार बढ़ाना संभव नहीं होता है। -> It is generally better make a single database as big as possible, before starting with shards. One exception is where query traffic is split very unevenly between subgraphs; in those situations it can help dramatically if the high-volume subgraphs are kept in one shard and everything else in another because that setup makes it more likely that the data for the high-volume subgraphs stays in the db-internal cache and doesn't get replaced by data that's not needed as much from low-volume subgraphs. +> It is generally better make a single database as big as possible, before starting with shards. One exception is where query traffic is split very unevenly between Subgraphs; in those situations it can help dramatically if the high-volume Subgraphs are kept in one shard and everything else in another because that setup makes it more likely that the data for the high-volume Subgraphs stays in the db-internal cache and doesn't get replaced by data that's not needed as much from low-volume Subgraphs. In terms of configuring connections, start with max_connections in postgresql.conf set to 400 (or maybe even 200) and look at the store_connection_wait_time_ms and store_connection_checkout_count Prometheus metrics. Noticeable wait times (anything above 5ms) is an indication that there are too few connections available; high wait times there will also be caused by the database being very busy (like high CPU load). However if the database seems otherwise stable, high wait times indicate a need to increase the number of connections. In the configuration, how many connections each graph-node instance can use is an upper limit, and Graph Node will not keep connections open if it doesn't need them. @@ -188,7 +188,7 @@ ingestor = "block_ingestor_node" #### Supporting multiple networks -The Graph Protocol उन नेटवर्क्स की संख्या बढ़ा रहा है जो indexing रिवार्ड्स के लिए सपोर्टेड हैं, और ऐसे कई सबग्राफ हैं जो अनसपोर्टेड नेटवर्क्स को indexing कर रहे हैं जिन्हें एक indexer प्रोसेस करना चाहेगा। `config.toml` फ़ाइल अभिव्यक्त और लचीली कॉन्फ़िगरेशन की अनुमति देती है: +The Graph Protocol is increasing the number of networks supported for indexing rewards, and there exist many Subgraphs indexing unsupported networks which an indexer would like to process. The `config.toml` file allows for expressive and flexible configuration of: - Multiple networks - Multiple providers per network (this can allow splitting of load across providers, and can also allow for configuration of full nodes as well as archive nodes, with Graph Node preferring cheaper providers if a given workload allows). @@ -225,11 +225,11 @@ provider = [ { label = "kovan", url = "http://..", features = [] } ] ### Managing Graph Node -Given a running Graph Node (or Graph Nodes!), the challenge is then to manage deployed subgraphs across those nodes. Graph Node surfaces a range of tools to help with managing subgraphs. +Given a running Graph Node (or Graph Nodes!), the challenge is then to manage deployed Subgraphs across those nodes. Graph Node surfaces a range of tools to help with managing Subgraphs. #### लॉगिंग -ग्राफ-नोड के log डिबगिंग और ग्राफ-नोड और विशिष्ट सबग्राफ के ऑप्टिमाइजेशन के लिए उपयोगी जानकारी प्रदान कर सकते हैं। ग्राफ-नोड विभिन्न log स्तरों का समर्थन करता है via `GRAPH_LOG` पर्यावरण चर, जिनमें निम्नलिखित स्तर होते हैं: error, warn, info, debug या trace। +Graph Node's logs can provide useful information for debugging and optimisation of Graph Node and specific Subgraphs. Graph Node supports different log levels via the `GRAPH_LOG` environment variable, with the following levels: error, warn, info, debug or trace. GraphQL queries कैसे चल रही हैं, इस बारे में अधिक विवरण प्राप्त करने के लिए `GRAPH_LOG_QUERY_TIMING` को `gql` पर सेट करना उपयोगी हो सकता है (हालांकि इससे बड़ी मात्रा में लॉग उत्पन्न होंगे)। @@ -245,13 +245,13 @@ Indexer रिपॉजिटरी एक [example Grafana configuration](https The graphman कमांड आधिकारिक कंटेनरों में शामिल है, और आप अपने ग्राफ-नोड कंटेनर में docker exec कमांड का उपयोग करके इसे चला सकते हैं। इसके लिए एक `config.toml` फ़ाइल की आवश्यकता होती है। -`graphman` कमांड्स का पूरा दस्तावेज़ ग्राफ नोड रिपॉजिटरी में उपलब्ध है। ग्राफ नोड `/docs` में [/docs/graphman.md](https://github.com/graphprotocol/ग्राफ-नोड/blob/master/docs/graphman.md) देखें। +`graphman` कमांड्स का पूरा दस्तावेज़ ग्राफ नोड रिपॉजिटरी में उपलब्ध है। ग्राफ नोड `/docs` में [/docs/graphman.md](https://github.com/graphprotocol/ग्राफ-नोड/blob/master/docs/graphman.md) देखें। -### सबग्राफ के साथ काम करना +### Working with Subgraphs #### अनुक्रमण स्थिति एपीआई -Available on port 8030/graphql by default, the indexing status API exposes a range of methods for checking indexing status for different subgraphs, checking proofs of indexing, inspecting subgraph features and more. +Available on port 8030/graphql by default, the indexing status API exposes a range of methods for checking indexing status for different Subgraphs, checking proofs of indexing, inspecting Subgraph features and more. पूर्ण स्कीमा [यहां](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql) उपलब्ध है। @@ -263,7 +263,7 @@ There are three separate parts of the indexing process: - उपयुक्त संचालकों के साथ घटनाओं को संसाधित करना (इसमें राज्य के लिए श्रृंखला को कॉल करना और स्टोर से डेटा प्राप्त करना शामिल हो सकता है) - Writing the resulting data to the store -These stages are pipelined (i.e. they can be executed in parallel), but they are dependent on one another. Where subgraphs are slow to index, the underlying cause will depend on the specific subgraph. +These stages are pipelined (i.e. they can be executed in parallel), but they are dependent on one another. Where Subgraphs are slow to index, the underlying cause will depend on the specific Subgraph. Common causes of indexing slowness: @@ -276,24 +276,24 @@ Common causes of indexing slowness: - The provider itself falling behind the chain head - Slowness in fetching new receipts at the chain head from the provider -Subgraph indexing metrics can help diagnose the root cause of indexing slowness. In some cases, the problem lies with the subgraph itself, but in others, improved network providers, reduced database contention and other configuration improvements can markedly improve indexing performance. +Subgraph indexing metrics can help diagnose the root cause of indexing slowness. In some cases, the problem lies with the Subgraph itself, but in others, improved network providers, reduced database contention and other configuration improvements can markedly improve indexing performance. -#### विफल सबग्राफ +#### Failed Subgraphs -During indexing subgraphs might fail, if they encounter data that is unexpected, some component not working as expected, or if there is some bug in the event handlers or configuration. There are two general types of failure: +During indexing Subgraphs might fail, if they encounter data that is unexpected, some component not working as expected, or if there is some bug in the event handlers or configuration. There are two general types of failure: - Deterministic failures: these are failures which will not be resolved with retries - गैर-नियतात्मक विफलताएँ: ये प्रदाता के साथ समस्याओं या कुछ अप्रत्याशित ग्राफ़ नोड त्रुटि के कारण हो सकती हैं। जब एक गैर-नियतात्मक विफलता होती है, तो ग्राफ़ नोड समय के साथ पीछे हटते हुए विफल हैंडलर को फिर से प्रयास करेगा। -In some cases a failure might be resolvable by the indexer (for example if the error is a result of not having the right kind of provider, adding the required provider will allow indexing to continue). However in others, a change in the subgraph code is required. +In some cases a failure might be resolvable by the indexer (for example if the error is a result of not having the right kind of provider, adding the required provider will allow indexing to continue). However in others, a change in the Subgraph code is required. -> निश्चितात्मक विफलताएँ "अंतिम" मानी जाती हैं, जिनके लिए विफल ब्लॉक के लिए एक Proof of Indexing उत्पन्न किया जाता है, जबकि अनिर्णायक विफलताएँ नहीं होतीं, क्योंकि Subgraph "अविफल" हो सकता है और indexing जारी रख सकता है। कुछ मामलों में, अनिर्णायक लेबल गलत होता है, और Subgraph कभी भी त्रुटि को पार नहीं कर पाएगा; ऐसी विफलताओं को ग्राफ नोड रिपॉजिटरी पर मुद्दों के रूप में रिपोर्ट किया जाना चाहिए। +> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-deterministic failures are not, as the Subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the Subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. #### कैश को ब्लॉक और कॉल करें -ग्राफ-नोड कुछ डेटा को स्टोर में कैश करता है ताकि प्रोवाइडर से फिर से प्राप्त करने की आवश्यकता न हो। ब्लॉक्स को कैश किया जाता है, साथ ही `eth_calls` के परिणाम (जो कि एक विशिष्ट ब्लॉक से कैश किए जाते हैं)। यह कैशिंग "थोड़े बदले हुए subgraph" के दौरान indexing की गति को नाटकीय रूप से बढ़ा सकती है। +Graph Node caches certain data in the store in order to save refetching from the provider. Blocks are cached, as are the results of `eth_calls` (the latter being cached as of a specific block). This caching can dramatically increase indexing speed during "resyncing" of a slightly altered Subgraph. -यदि कभी Ethereum नोड ने किसी समय अवधि के लिए गलत डेटा प्रदान किया है, तो वह कैश में जा सकता है, जिसके परिणामस्वरूप गलत डेटा या विफल सबग्राफ हो सकते हैं। इस स्थिति में, Indexer `graphman` का उपयोग करके ज़हरीले कैश को हटा सकते हैं, और फिर प्रभावित सबग्राफ को रीवाइंड कर सकते हैं, जो फिर (आशा है) स्वस्थ प्रदाता से ताज़ा डेटा प्राप्त करेंगे। +However, in some instances, if an Ethereum node has provided incorrect data for some period, that can make its way into the cache, leading to incorrect data or failed Subgraphs. In this case indexers can use `graphman` to clear the poisoned cache, and then rewind the affected Subgraphs, which will then fetch fresh data from the (hopefully) healthy provider. If a block cache inconsistency is suspected, such as a tx receipt missing event: @@ -304,7 +304,7 @@ If a block cache inconsistency is suspected, such as a tx receipt missing event: #### Querying issues and errors -Once a subgraph has been indexed, indexers can expect to serve queries via the subgraph's dedicated query endpoint. If the indexer is hoping to serve significant query volume, a dedicated query node is recommended, and in case of very high query volumes, indexers may want to configure replica shards so that queries don't impact the indexing process. +Once a Subgraph has been indexed, indexers can expect to serve queries via the Subgraph's dedicated query endpoint. If the indexer is hoping to serve significant query volume, a dedicated query node is recommended, and in case of very high query volumes, indexers may want to configure replica shards so that queries don't impact the indexing process. हालाँकि, एक समर्पित क्वेरी नोड और प्रतिकृतियों के साथ भी, कुछ प्रश्नों को निष्पादित करने में लंबा समय लग सकता है, और कुछ मामलों में मेमोरी उपयोग में वृद्धि होती है और अन्य उपयोगकर्ताओं के लिए क्वेरी समय को नकारात्मक रूप से प्रभावित करती है। @@ -316,7 +316,7 @@ Once a subgraph has been indexed, indexers can expect to serve queries via the s ##### Analysing queries -Problematic queries most often surface in one of two ways. In some cases, users themselves report that a given query is slow. In that case the challenge is to diagnose the reason for the slowness - whether it is a general issue, or specific to that subgraph or query. And then of course to resolve it, if possible. +Problematic queries most often surface in one of two ways. In some cases, users themselves report that a given query is slow. In that case the challenge is to diagnose the reason for the slowness - whether it is a general issue, or specific to that Subgraph or query. And then of course to resolve it, if possible. अन्य मामलों में, क्वेरी नोड पर ट्रिगर उच्च मेमोरी उपयोग हो सकता है, इस मामले में सबसे पहले समस्या उत्पन्न करने वाली क्वेरी की पहचान करना चुनौती है। @@ -336,10 +336,10 @@ Database tables that store entities seem to generally come in two varieties: 'tr एक बार जब यह तय कर लिया जाता है कि एक तालिका खाता जैसी है, तो `graphman stats account-like .
` चलाने से उस तालिका के खिलाफ queries के लिए खाता जैसी अनुकूलन सक्षम हो जाएगा। इस अनुकूलन को फिर से बंद किया जा सकता है `graphman stats account-like --clear .
` के साथ। queries नोड्स को यह नोटिस करने में 5 मिनट तक का समय लग सकता है कि अनुकूलन को चालू या बंद किया गया है। अनुकूलन को चालू करने के बाद, यह सत्यापित करना आवश्यक है कि बदलाव वास्तव में उस तालिका के लिए queries को धीमा नहीं कर रहा है। यदि आपने Grafana को Postgres की निगरानी के लिए कॉन्फ़िगर किया है, तो धीमी queries `pg_stat_activity` में बड़ी संख्या में दिखाई देंगी, जो कई सेकंड ले रही हैं। ऐसे में, अनुकूलन को फिर से बंद करने की आवश्यकता होती है। -Uniswap- जैसे सबग्राफ़ के लिए, `pair` और `token` तालिकाएँ इस अनुकूलन के प्रमुख उम्मीदवार हैं, और ये डेटाबेस लोड पर नाटकीय प्रभाव डाल सकते हैं। +For Uniswap-like Subgraphs, the `pair` and `token` tables are prime candidates for this optimization, and can have a dramatic effect on database load. -#### सबग्राफ हटाना +#### Removing Subgraphs > This is new functionality, which will be available in Graph Node 0.29.x -किसी बिंदु पर एक indexer एक दिए गए subgraph को हटाना चाहता है। इसे आसानी से `graphman drop` के माध्यम से किया जा सकता है, जो एक deployment और उसके सभी indexed डेटा को हटा देता है। डिप्लॉयमेंट को subgraph नाम, एक IPFS हैश `Qm..`, या डेटाबेस नामस्थान `sgdNNN` के रूप में निर्दिष्ट किया जा सकता है। आगे की दस्तावेज़ीकरण यहां उपलब्ध है [here](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop)। +At some point an indexer might want to remove a given Subgraph. This can be easily done via `graphman drop`, which deletes a deployment and all it's indexed data. The deployment can be specified as either a Subgraph name, an IPFS hash `Qm..`, or the database namespace `sgdNNN`. Further documentation is available [here](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). From 3df2c6bdc4a8654c7543571c9dd3c6fdc6cd41be Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:10:05 -0500 Subject: [PATCH 0117/1789] New translations graph-node.mdx (Swahili) --- .../pages/sw/indexing/tooling/graph-node.mdx | 345 ++++++++++++++++++ 1 file changed, 345 insertions(+) create mode 100644 website/src/pages/sw/indexing/tooling/graph-node.mdx diff --git a/website/src/pages/sw/indexing/tooling/graph-node.mdx b/website/src/pages/sw/indexing/tooling/graph-node.mdx new file mode 100644 index 000000000000..f5778789213d --- /dev/null +++ b/website/src/pages/sw/indexing/tooling/graph-node.mdx @@ -0,0 +1,345 @@ +--- +title: Graph Node +--- + +Graph Node is the component which indexes Subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. + +This provides a contextual overview of Graph Node, and some of the more advanced options available to indexers. Detailed documentation and instructions can be found in the [Graph Node repository](https://github.com/graphprotocol/graph-node). + +## Graph Node + +[Graph Node](https://github.com/graphprotocol/graph-node) is the reference implementation for indexing Subgraphs on The Graph Network, connecting to blockchain clients, indexing Subgraphs and making indexed data available to query. + +Graph Node (and the whole indexer stack) can be run on bare metal, or in a cloud environment. This flexibility of the central indexing component is crucial to the robustness of The Graph Protocol. Similarly, Graph Node can be [built from source](https://github.com/graphprotocol/graph-node), or indexers can use one of the [provided Docker Images](https://hub.docker.com/r/graphprotocol/graph-node). + +### PostgreSQL database + +The main store for the Graph Node, this is where Subgraph data is stored, as well as metadata about Subgraphs, and Subgraph-agnostic network data such as the block cache, and eth_call cache. + +### Network clients + +In order to index a network, Graph Node needs access to a network client via an EVM-compatible JSON-RPC API. This RPC may connect to a single client or it could be a more complex setup that load balances across multiple. + +While some Subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically Subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and Subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). + +**Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an Indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). + +### IPFS Nodes + +Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during Subgraph deployment to fetch the Subgraph manifest and all linked files. Network indexers do not need to host their own IPFS node. An IPFS node for the network is hosted at https://ipfs.network.thegraph.com. + +### Prometheus metrics server + +To enable monitoring and reporting, Graph Node can optionally log metrics to a Prometheus metrics server. + +### Getting started from source + +#### Install prerequisites + +- **Rust** + +- **PostgreSQL** + +- **IPFS** + +- **Additional Requirements for Ubuntu users** - To run a Graph Node on Ubuntu a few additional packages may be needed. + +```sh +sudo apt-get install -y clang libpq-dev libssl-dev pkg-config +``` + +#### Setup + +1. Start a PostgreSQL database server + +```sh +initdb -D .postgres +pg_ctl -D .postgres -l logfile start +createdb graph-node +``` + +2. Clone [Graph Node](https://github.com/graphprotocol/graph-node) repo and build the source by running `cargo build` + +3. Now that all the dependencies are setup, start the Graph Node: + +```sh +cargo run -p graph-node --release -- \ + --postgres-url postgresql://[USERNAME]:[PASSWORD]@localhost:5432/graph-node \ + --ethereum-rpc [NETWORK_NAME]:[URL] \ + --ipfs https://ipfs.network.thegraph.com +``` + +### Getting started with Kubernetes + +A complete Kubernetes example configuration can be found in the [indexer repository](https://github.com/graphprotocol/indexer/tree/main/k8s). + +### Ports + +When it is running Graph Node exposes the following ports: + +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for Subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for Subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | + +> **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC endpoint. + +## Advanced Graph Node configuration + +At its simplest, Graph Node can be operated with a single instance of Graph Node, a single PostgreSQL database, an IPFS node, and the network clients as required by the Subgraphs to be indexed. + +This setup can be scaled horizontally, by adding multiple Graph Nodes, and multiple databases to support those Graph Nodes. Advanced users may want to take advantage of some of the horizontal scaling capabilities of Graph Node, as well as some of the more advanced configuration options, via the `config.toml` file and Graph Node's environment variables. + +### `config.toml` + +A [TOML](https://toml.io/en/) configuration file can be used to set more complex configurations than those exposed in the CLI. The location of the file is passed with the --config command line switch. + +> When using a configuration file, it is not possible to use the options --postgres-url, --postgres-secondary-hosts, and --postgres-host-weights. + +A minimal `config.toml` file can be provided; the following file is equivalent to using the --postgres-url command line option: + +```toml +[store] +[store.primary] +connection="<.. postgres-url argument ..>" +[deployment] +[[deployment.rule]] +indexers = [ "<.. list of all indexing nodes ..>" ] +``` + +Full documentation of `config.toml` can be found in the [Graph Node docs](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md). + +#### Multiple Graph Nodes + +Graph Node indexing can scale horizontally, running multiple instances of Graph Node to split indexing and querying across different nodes. This can be done simply by running Graph Nodes configured with a different `node_id` on startup (e.g. in the Docker Compose file), which can then be used in the `config.toml` file to specify [dedicated query nodes](#dedicated-query-nodes), [block ingestors](#dedicated-block-ingestion), and splitting Subgraphs across nodes with [deployment rules](#deployment-rules). + +> Note that multiple Graph Nodes can all be configured to use the same database, which itself can be horizontally scaled via sharding. + +#### Deployment rules + +Given multiple Graph Nodes, it is necessary to manage deployment of new Subgraphs so that the same Subgraph isn't being indexed by two different nodes, which would lead to collisions. This can be done by using deployment rules, which can also specify which `shard` a Subgraph's data should be stored in, if database sharding is being used. Deployment rules can match on the Subgraph name and the network that the deployment is indexing in order to make a decision. + +Example deployment rule configuration: + +```toml +[deployment] +[[deployment.rule]] +match = { name = "(vip|important)/.*" } +shard = "vip" +indexers = [ "index_node_vip_0", "index_node_vip_1" ] +[[deployment.rule]] +match = { network = "kovan" } +# No shard, so we use the default shard called 'primary' +indexers = [ "index_node_kovan_0" ] +[[deployment.rule]] +match = { network = [ "xdai", "poa-core" ] } +indexers = [ "index_node_other_0" ] +[[deployment.rule]] +# There's no 'match', so any Subgraph matches +shards = [ "sharda", "shardb" ] +indexers = [ + "index_node_community_0", + "index_node_community_1", + "index_node_community_2", + "index_node_community_3", + "index_node_community_4", + "index_node_community_5" + ] +``` + +Read more about deployment rules [here](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#controlling-deployment). + +#### Dedicated query nodes + +Nodes can be configured to explicitly be query nodes by including the following in the configuration file: + +```toml +[general] +query = "" +``` + +Any node whose --node-id matches the regular expression will be set up to only respond to queries. + +#### Database scaling via sharding + +For most use cases, a single Postgres database is sufficient to support a graph-node instance. When a graph-node instance outgrows a single Postgres database, it is possible to split the storage of graph-node's data across multiple Postgres databases. All databases together form the store of the graph-node instance. Each individual database is called a shard. + +Shards can be used to split Subgraph deployments across multiple databases, and can also be used to use replicas to spread query load across databases. This includes configuring the number of available database connections each `graph-node` should keep in its connection pool for each database, which becomes increasingly important as more Subgraphs are being indexed. + +Sharding becomes useful when your existing database can't keep up with the load that Graph Node puts on it, and when it's not possible to increase the database size anymore. + +> It is generally better make a single database as big as possible, before starting with shards. One exception is where query traffic is split very unevenly between Subgraphs; in those situations it can help dramatically if the high-volume Subgraphs are kept in one shard and everything else in another because that setup makes it more likely that the data for the high-volume Subgraphs stays in the db-internal cache and doesn't get replaced by data that's not needed as much from low-volume Subgraphs. + +In terms of configuring connections, start with max_connections in postgresql.conf set to 400 (or maybe even 200) and look at the store_connection_wait_time_ms and store_connection_checkout_count Prometheus metrics. Noticeable wait times (anything above 5ms) is an indication that there are too few connections available; high wait times there will also be caused by the database being very busy (like high CPU load). However if the database seems otherwise stable, high wait times indicate a need to increase the number of connections. In the configuration, how many connections each graph-node instance can use is an upper limit, and Graph Node will not keep connections open if it doesn't need them. + +Read more about store configuration [here](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-multiple-databases). + +#### Dedicated block ingestion + +If there are multiple nodes configured, it will be necessary to specify one node which is responsible for ingestion of new blocks, so that all configured index nodes aren't polling the chain head. This is done as part of the `chains` namespace, specifying the `node_id` to be used for block ingestion: + +```toml +[chains] +ingestor = "block_ingestor_node" +``` + +#### Supporting multiple networks + +The Graph Protocol is increasing the number of networks supported for indexing rewards, and there exist many Subgraphs indexing unsupported networks which an indexer would like to process. The `config.toml` file allows for expressive and flexible configuration of: + +- Multiple networks +- Multiple providers per network (this can allow splitting of load across providers, and can also allow for configuration of full nodes as well as archive nodes, with Graph Node preferring cheaper providers if a given workload allows). +- Additional provider details, such as features, authentication and the type of provider (for experimental Firehose support) + +The `[chains]` section controls the ethereum providers that graph-node connects to, and where blocks and other metadata for each chain are stored. The following example configures two chains, mainnet and kovan, where blocks for mainnet are stored in the vip shard and blocks for kovan are stored in the primary shard. The mainnet chain can use two different providers, whereas kovan only has one provider. + +```toml +[chains] +ingestor = "block_ingestor_node" +[chains.mainnet] +shard = "vip" +provider = [ + { label = "mainnet1", url = "http://..", features = [], headers = { Authorization = "Bearer foo" } }, + { label = "mainnet2", url = "http://..", features = [ "archive", "traces" ] } +] +[chains.kovan] +shard = "primary" +provider = [ { label = "kovan", url = "http://..", features = [] } ] +``` + +Read more about provider configuration [here](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-ethereum-providers). + +### Environment variables + +Graph Node supports a range of environment variables which can enable features, or change Graph Node behaviour. These are documented [here](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md). + +### Continuous deployment + +Users who are operating a scaled indexing setup with advanced configuration may benefit from managing their Graph Nodes with Kubernetes. + +- The indexer repository has an [example Kubernetes reference](https://github.com/graphprotocol/indexer/tree/main/k8s) +- [Launchpad](https://docs.graphops.xyz/launchpad/intro) is a toolkit for running a Graph Protocol Indexer on Kubernetes maintained by GraphOps. It provides a set of Helm charts and a CLI to manage a Graph Node deployment. + +### Managing Graph Node + +Given a running Graph Node (or Graph Nodes!), the challenge is then to manage deployed Subgraphs across those nodes. Graph Node surfaces a range of tools to help with managing Subgraphs. + +#### Logging + +Graph Node's logs can provide useful information for debugging and optimisation of Graph Node and specific Subgraphs. Graph Node supports different log levels via the `GRAPH_LOG` environment variable, with the following levels: error, warn, info, debug or trace. + +In addition setting `GRAPH_LOG_QUERY_TIMING` to `gql` provides more details about how GraphQL queries are running (though this will generate a large volume of logs). + +#### Monitoring & alerting + +Graph Node provides the metrics via Prometheus endpoint on 8040 port by default. Grafana can then be used to visualise these metrics. + +The indexer repository provides an [example Grafana configuration](https://github.com/graphprotocol/indexer/blob/main/k8s/base/grafana.yaml). + +#### Graphman + +`graphman` is a maintenance tool for Graph Node, helping with diagnosis and resolution of different day-to-day and exceptional tasks. + +The graphman command is included in the official containers, and you can docker exec into your graph-node container to run it. It requires a `config.toml` file. + +Full documentation of `graphman` commands is available in the Graph Node repository. See [/docs/graphman.md] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) in the Graph Node `/docs` + +### Working with Subgraphs + +#### Indexing status API + +Available on port 8030/graphql by default, the indexing status API exposes a range of methods for checking indexing status for different Subgraphs, checking proofs of indexing, inspecting Subgraph features and more. + +The full schema is available [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). + +#### Indexing performance + +There are three separate parts of the indexing process: + +- Fetching events of interest from the provider +- Processing events in order with the appropriate handlers (this can involve calling the chain for state, and fetching data from the store) +- Writing the resulting data to the store + +These stages are pipelined (i.e. they can be executed in parallel), but they are dependent on one another. Where Subgraphs are slow to index, the underlying cause will depend on the specific Subgraph. + +Common causes of indexing slowness: + +- Time taken to find relevant events from the chain (call handlers in particular can be slow, given the reliance on `trace_filter`) +- Making large numbers of `eth_calls` as part of handlers +- A large amount of store interaction during execution +- A large amount of data to save to the store +- A large number of events to process +- Slow database connection time, for crowded nodes +- The provider itself falling behind the chain head +- Slowness in fetching new receipts at the chain head from the provider + +Subgraph indexing metrics can help diagnose the root cause of indexing slowness. In some cases, the problem lies with the Subgraph itself, but in others, improved network providers, reduced database contention and other configuration improvements can markedly improve indexing performance. + +#### Failed Subgraphs + +During indexing Subgraphs might fail, if they encounter data that is unexpected, some component not working as expected, or if there is some bug in the event handlers or configuration. There are two general types of failure: + +- Deterministic failures: these are failures which will not be resolved with retries +- Non-deterministic failures: these might be down to issues with the provider, or some unexpected Graph Node error. When a non-deterministic failure occurs, Graph Node will retry the failing handlers, backing off over time. + +In some cases a failure might be resolvable by the indexer (for example if the error is a result of not having the right kind of provider, adding the required provider will allow indexing to continue). However in others, a change in the Subgraph code is required. + +> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-deterministic failures are not, as the Subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the Subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. + +#### Block and call cache + +Graph Node caches certain data in the store in order to save refetching from the provider. Blocks are cached, as are the results of `eth_calls` (the latter being cached as of a specific block). This caching can dramatically increase indexing speed during "resyncing" of a slightly altered Subgraph. + +However, in some instances, if an Ethereum node has provided incorrect data for some period, that can make its way into the cache, leading to incorrect data or failed Subgraphs. In this case indexers can use `graphman` to clear the poisoned cache, and then rewind the affected Subgraphs, which will then fetch fresh data from the (hopefully) healthy provider. + +If a block cache inconsistency is suspected, such as a tx receipt missing event: + +1. `graphman chain list` to find the chain name. +2. `graphman chain check-blocks by-number ` will check if the cached block matches the provider, and deletes the block from the cache if it doesn’t. + 1. If there is a difference, it may be safer to truncate the whole cache with `graphman chain truncate `. + 2. If the block matches the provider, then the issue can be debugged directly against the provider. + +#### Querying issues and errors + +Once a Subgraph has been indexed, indexers can expect to serve queries via the Subgraph's dedicated query endpoint. If the indexer is hoping to serve significant query volume, a dedicated query node is recommended, and in case of very high query volumes, indexers may want to configure replica shards so that queries don't impact the indexing process. + +However, even with a dedicated query node and replicas, certain queries can take a long time to execute, and in some cases increase memory usage and negatively impact the query time for other users. + +There is not one "silver bullet", but a range of tools for preventing, diagnosing and dealing with slow queries. + +##### Query caching + +Graph Node caches GraphQL queries by default, which can significantly reduce database load. This can be further configured with the `GRAPH_QUERY_CACHE_BLOCKS` and `GRAPH_QUERY_CACHE_MAX_MEM` settings - read more [here](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md#graphql-caching). + +##### Analysing queries + +Problematic queries most often surface in one of two ways. In some cases, users themselves report that a given query is slow. In that case the challenge is to diagnose the reason for the slowness - whether it is a general issue, or specific to that Subgraph or query. And then of course to resolve it, if possible. + +In other cases, the trigger might be high memory usage on a query node, in which case the challenge is first to identify the query causing the issue. + +Indexers can use [qlog](https://github.com/graphprotocol/qlog/) to process and summarize Graph Node's query logs. `GRAPH_LOG_QUERY_TIMING` can also be enabled to help identify and debug slow queries. + +Given a slow query, indexers have a few options. Of course they can alter their cost model, to significantly increase the cost of sending the problematic query. This may result in a reduction in the frequency of that query. However this often doesn't resolve the root cause of the issue. + +##### Account-like optimisation + +Database tables that store entities seem to generally come in two varieties: 'transaction-like', where entities, once created, are never updated, i.e., they store something akin to a list of financial transactions, and 'account-like' where entities are updated very often, i.e., they store something like financial accounts that get modified every time a transaction is recorded. Account-like tables are characterized by the fact that they contain a large number of entity versions, but relatively few distinct entities. Often, in such tables the number of distinct entities is 1% of the total number of rows (entity versions) + +For account-like tables, `graph-node` can generate queries that take advantage of details of how Postgres ends up storing data with such a high rate of change, namely that all of the versions for recent blocks are in a small subsection of the overall storage for such a table. + +The command `graphman stats show shows, for each entity type/table in a deployment, how many distinct entities, and how many entity versions each table contains. That data is based on Postgres-internal estimates, and is therefore necessarily imprecise, and can be off by an order of magnitude. A `-1` in the `entities` column means that Postgres believes that all rows contain a distinct entity. + +In general, tables where the number of distinct entities are less than 1% of the total number of rows/entity versions are good candidates for the account-like optimization. When the output of `graphman stats show` indicates that a table might benefit from this optimization, running `graphman stats show
` will perform a full count of the table - that can be slow, but gives a precise measure of the ratio of distinct entities to overall entity versions. + +Once a table has been determined to be account-like, running `graphman stats account-like .
` will turn on the account-like optimization for queries against that table. The optimization can be turned off again with `graphman stats account-like --clear .
` It takes up to 5 minutes for query nodes to notice that the optimization has been turned on or off. After turning the optimization on, it is necessary to verify that the change does not in fact make queries slower for that table. If you have configured Grafana to monitor Postgres, slow queries would show up in `pg_stat_activity`in large numbers, taking several seconds. In that case, the optimization needs to be turned off again. + +For Uniswap-like Subgraphs, the `pair` and `token` tables are prime candidates for this optimization, and can have a dramatic effect on database load. + +#### Removing Subgraphs + +> This is new functionality, which will be available in Graph Node 0.29.x + +At some point an indexer might want to remove a given Subgraph. This can be easily done via `graphman drop`, which deletes a deployment and all it's indexed data. The deployment can be specified as either a Subgraph name, an IPFS hash `Qm..`, or the database namespace `sgdNNN`. Further documentation is available [here](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). From 494ddb9225839c32c62b9d187cf8ed269668750a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:10:06 -0500 Subject: [PATCH 0118/1789] New translations advanced.mdx (Romanian) --- .../developing/creating/advanced.mdx | 78 +++++++++---------- 1 file changed, 39 insertions(+), 39 deletions(-) diff --git a/website/src/pages/ro/subgraphs/developing/creating/advanced.mdx b/website/src/pages/ro/subgraphs/developing/creating/advanced.mdx index ee9918f5f254..d19755ac0876 100644 --- a/website/src/pages/ro/subgraphs/developing/creating/advanced.mdx +++ b/website/src/pages/ro/subgraphs/developing/creating/advanced.mdx @@ -4,9 +4,9 @@ title: Advanced Subgraph Features ## Overview -Add and implement advanced subgraph features to enhanced your subgraph's built. +Add and implement advanced Subgraph features to enhanced your Subgraph's built. -Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: +Starting from `specVersion` `0.0.4`, Subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: | Feature | Name | | ---------------------------------------------------- | ---------------- | @@ -14,7 +14,7 @@ Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declar | [Full-text Search](#defining-fulltext-search-fields) | `fullTextSearch` | | [Grafting](#grafting-onto-existing-subgraphs) | `grafting` | -For instance, if a subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: +For instance, if a Subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: ```yaml specVersion: 0.0.4 @@ -25,7 +25,7 @@ features: dataSources: ... ``` -> Note that using a feature without declaring it will incur a **validation error** during subgraph deployment, but no errors will occur if a feature is declared but not used. +> Note that using a feature without declaring it will incur a **validation error** during Subgraph deployment, but no errors will occur if a feature is declared but not used. ## Timeseries and Aggregations @@ -33,9 +33,9 @@ Prerequisites: - Subgraph specVersion must be ≥1.1.0. -Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, and more. +Timeseries and aggregations enable your Subgraph to track statistics like daily average price, hourly total transfers, and more. -This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. +This feature introduces two new types of Subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. ### Example Schema @@ -97,11 +97,11 @@ Aggregation entities are automatically calculated on the basis of the specified ## Non-fatal errors -Indexing errors on already synced subgraphs will, by default, cause the subgraph to fail and stop syncing. Subgraphs can alternatively be configured to continue syncing in the presence of errors, by ignoring the changes made by the handler which provoked the error. This gives subgraph authors time to correct their subgraphs while queries continue to be served against the latest block, though the results might be inconsistent due to the bug that caused the error. Note that some errors are still always fatal. To be non-fatal, the error must be known to be deterministic. +Indexing errors on already synced Subgraphs will, by default, cause the Subgraph to fail and stop syncing. Subgraphs can alternatively be configured to continue syncing in the presence of errors, by ignoring the changes made by the handler which provoked the error. This gives Subgraph authors time to correct their Subgraphs while queries continue to be served against the latest block, though the results might be inconsistent due to the bug that caused the error. Note that some errors are still always fatal. To be non-fatal, the error must be known to be deterministic. -> **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy subgraphs using that functionality to the network via the Studio. +> **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy Subgraphs using that functionality to the network via the Studio. -Enabling non-fatal errors requires setting the following feature flag on the subgraph manifest: +Enabling non-fatal errors requires setting the following feature flag on the Subgraph manifest: ```yaml specVersion: 0.0.4 @@ -111,7 +111,7 @@ features: ... ``` -The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the subgraph has skipped over errors, as in the example: +The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the Subgraph has skipped over errors, as in the example: ```graphql foos(first: 100, subgraphError: allow) { @@ -123,7 +123,7 @@ _meta { } ``` -If the subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: +If the Subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: ```graphql "data": { @@ -145,7 +145,7 @@ If the subgraph encounters an error, that query will return both the data and a ## IPFS/Arweave File Data Sources -File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. +File data sources are a new Subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. > This also lays the groundwork for deterministic indexing of off-chain data, as well as the potential introduction of arbitrary HTTP-sourced data. @@ -290,7 +290,7 @@ Example: import { TokenMetadata as TokenMetadataTemplate } from '../generated/templates' const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' -//This example code is for a Crypto coven subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. +//This example code is for a Crypto coven Subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. export function handleTransfer(event: TransferEvent): void { let token = Token.load(event.params.tokenId.toString()) @@ -317,23 +317,23 @@ This will create a new file data source, which will poll Graph Node's configured This example is using the CID as the lookup between the parent `Token` entity and the resulting `TokenMetadata` entity. -> Previously, this is the point at which a subgraph developer would have called `ipfs.cat(CID)` to fetch the file +> Previously, this is the point at which a Subgraph developer would have called `ipfs.cat(CID)` to fetch the file Congratulations, you are using file data sources! -#### Deploying your subgraphs +#### Deploying your Subgraphs -You can now `build` and `deploy` your subgraph to any Graph Node >=v0.30.0-rc.0. +You can now `build` and `deploy` your Subgraph to any Graph Node >=v0.30.0-rc.0. #### Limitations -File data source handlers and entities are isolated from other subgraph entities, ensuring that they are deterministic when executed, and ensuring no contamination of chain-based data sources. To be specific: +File data source handlers and entities are isolated from other Subgraph entities, ensuring that they are deterministic when executed, and ensuring no contamination of chain-based data sources. To be specific: - Entities created by File Data Sources are immutable, and cannot be updated - File Data Source handlers cannot access entities from other file data sources - Entities associated with File Data Sources cannot be accessed by chain-based handlers -> While this constraint should not be problematic for most use-cases, it may introduce complexity for some. Please get in touch via Discord if you are having issues modelling your file-based data in a subgraph! +> While this constraint should not be problematic for most use-cases, it may introduce complexity for some. Please get in touch via Discord if you are having issues modelling your file-based data in a Subgraph! Additionally, it is not possible to create data sources from a file data source, be it an onchain data source or another file data source. This restriction may be lifted in the future. @@ -365,15 +365,15 @@ Handlers for File Data Sources cannot be in files which import `eth_call` contra > **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0` -Topic filters, also known as indexed argument filters, are a powerful feature in subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. +Topic filters, also known as indexed argument filters, are a powerful feature in Subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. -- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing subgraphs to operate more efficiently by focusing only on relevant data. +- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing Subgraphs to operate more efficiently by focusing only on relevant data. -- This is useful for creating personal subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. +- This is useful for creating personal Subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. ### How Topic Filters Work -When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a subgraph's manifest. This allows the subgraph to listen selectively for events that match these indexed arguments. +When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a Subgraph's manifest. This allows the Subgraph to listen selectively for events that match these indexed arguments. - The event's first indexed argument corresponds to `topic1`, the second to `topic2`, and so on, up to `topic3`, since the Ethereum Virtual Machine (EVM) allows up to three indexed arguments per event. @@ -401,7 +401,7 @@ In this example: #### Configuration in Subgraphs -Topic filters are defined directly within the event handler configuration in the subgraph manifest. Here is how they are configured: +Topic filters are defined directly within the event handler configuration in the Subgraph manifest. Here is how they are configured: ```yaml eventHandlers: @@ -436,7 +436,7 @@ In this configuration: - `topic1` is configured to filter `Transfer` events where `0xAddressA` is the sender. - `topic2` is configured to filter `Transfer` events where `0xAddressB` is the receiver. -- The subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. +- The Subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. #### Example 2: Tracking Transactions in Either Direction Between Two or More Addresses @@ -452,17 +452,17 @@ In this configuration: - `topic1` is configured to filter `Transfer` events where `0xAddressA`, `0xAddressB`, `0xAddressC` is the sender. - `topic2` is configured to filter `Transfer` events where `0xAddressB` and `0xAddressC` is the receiver. -- The subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. +- The Subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. ## Declared eth_call > Note: This is an experimental feature that is not currently available in a stable Graph Node release yet. You can only use it in Subgraph Studio or your self-hosted node. -Declarative `eth_calls` are a valuable subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. +Declarative `eth_calls` are a valuable Subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. This feature does the following: -- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the subgraph's overall efficiency. +- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the Subgraph's overall efficiency. - Allows faster data fetching, resulting in quicker query responses and a better user experience. - Reduces wait times for applications that need to aggregate data from multiple Ethereum calls, making the data retrieval process more efficient. @@ -474,7 +474,7 @@ This feature does the following: #### Scenario without Declarative `eth_calls` -Imagine you have a subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. +Imagine you have a Subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. Traditionally, these calls might be made sequentially: @@ -498,15 +498,15 @@ Total time taken = max (3, 2, 4) = 4 seconds #### How it Works -1. Declarative Definition: In the subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. +1. Declarative Definition: In the Subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. 2. Parallel Execution Engine: The Graph Node's execution engine recognizes these declarations and runs the calls simultaneously. -3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the subgraph for further processing. +3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the Subgraph for further processing. #### Example Configuration in Subgraph Manifest Declared `eth_calls` can access the `event.address` of the underlying event as well as all the `event.params`. -`Subgraph.yaml` using `event.address`: +`subgraph.yaml` using `event.address`: ```yaml eventHandlers: @@ -524,7 +524,7 @@ Details for the example above: - The text (`Pool[event.address].feeGrowthGlobal0X128()`) is the actual `eth_call` that will be executed, which is in the form of `Contract[address].function(arguments)` - The `address` and `arguments` can be replaced with variables that will be available when the handler is executed. -`Subgraph.yaml` using `event.params` +`subgraph.yaml` using `event.params` ```yaml calls: @@ -535,22 +535,22 @@ calls: > **Note:** it is not recommended to use grafting when initially upgrading to The Graph Network. Learn more [here](/subgraphs/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). -When a subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. +When a Subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing Subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing Subgraph working again after it has failed. -A subgraph is grafted onto a base subgraph when the subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: +A Subgraph is grafted onto a base Subgraph when the Subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: ```yaml description: ... graft: - base: Qm... # Subgraph ID of base subgraph + base: Qm... # Subgraph ID of base Subgraph block: 7345624 # Block number ``` -When a subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` subgraph up to and including the given `block` and then continue indexing the new subgraph from that block on. The base subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted subgraph. +When a Subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` Subgraph up to and including the given `block` and then continue indexing the new Subgraph from that block on. The base Subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted Subgraph. -Because grafting copies rather than indexes base data, it is much quicker to get the subgraph to the desired block than indexing from scratch, though the initial data copy can still take several hours for very large subgraphs. While the grafted subgraph is being initialized, the Graph Node will log information about the entity types that have already been copied. +Because grafting copies rather than indexes base data, it is much quicker to get the Subgraph to the desired block than indexing from scratch, though the initial data copy can still take several hours for very large Subgraphs. While the grafted Subgraph is being initialized, the Graph Node will log information about the entity types that have already been copied. -The grafted subgraph can use a GraphQL schema that is not identical to the one of the base subgraph, but merely compatible with it. It has to be a valid subgraph schema in its own right, but may deviate from the base subgraph's schema in the following ways: +The grafted Subgraph can use a GraphQL schema that is not identical to the one of the base Subgraph, but merely compatible with it. It has to be a valid Subgraph schema in its own right, but may deviate from the base Subgraph's schema in the following ways: - It adds or removes entity types - It removes attributes from entity types @@ -560,4 +560,4 @@ The grafted subgraph can use a GraphQL schema that is not identical to the one o - It adds or removes interfaces - It changes for which entity types an interface is implemented -> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the subgraph manifest. +> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the Subgraph manifest. From 96e163d0ad05f61aa53383fee3e47c939723fc81 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:10:07 -0500 Subject: [PATCH 0119/1789] New translations advanced.mdx (French) --- .../developing/creating/advanced.mdx | 90 +++++++++---------- 1 file changed, 45 insertions(+), 45 deletions(-) diff --git a/website/src/pages/fr/subgraphs/developing/creating/advanced.mdx b/website/src/pages/fr/subgraphs/developing/creating/advanced.mdx index 12e0f444c4d8..d1df09202088 100644 --- a/website/src/pages/fr/subgraphs/developing/creating/advanced.mdx +++ b/website/src/pages/fr/subgraphs/developing/creating/advanced.mdx @@ -4,17 +4,17 @@ title: Fonctionnalités avancées des subgraphs ## Aperçu -Ajoutez et implémentez des fonctionnalités avancées de subgraph pour améliorer la construction de votre subgraph. +Add and implement advanced Subgraph features to enhanced your Subgraph's built. -À partir de `specVersion` `0.0.4`, les fonctionnalités de subgraph doivent être explicitement déclarées dans la section `features` au niveau supérieur du fichier de manifeste, en utilisant leur nom en `camelCase` comme indiqué dans le tableau ci-dessous : +Starting from `specVersion` `0.0.4`, Subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: -| Fonctionnalité | Nom | -| --------------------------------------------------------- | ---------------- | -| [Erreurs non fatales](#non-fatal-errors) | `nonFatalErrors` | -| [Recherche plein texte](#defining-fulltext-search-fields) | `fullTextSearch` | -| [Greffage](#grafting-onto-existing-subgraphs) | `grafting` | +| Fonctionnalité | Nom | +| ----------------------------------------------------------- | ---------------- | +| [Erreurs non fatales](#non-fatal-errors) | `nonFatalErrors` | +| [Recherche plein texte](#defining-fulltext-search-fields) | `fullTextSearch` | +| [Greffage](#grafting-onto-existing-subgraphs) | `grafting` | -Par exemple, si un subgraph utilise les fonctionnalités **Full-Text Search** et **Non-fatal Errors**, le champ `features` dans le manifeste devrait être : +For instance, if a Subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: ```yaml specVersion: 0.0.4 @@ -25,7 +25,7 @@ features: dataSources: ... ``` -> Notez que L'utilisation d'une fonctionnalité sans la déclarer entraînera une **validation error** lors du déploiement du subgraph, mais aucune erreur ne se produira si une fonctionnalité est déclarée mais non utilisée. +> Note that using a feature without declaring it will incur a **validation error** during Subgraph deployment, but no errors will occur if a feature is declared but not used. ## Séries chronologiques et agrégations @@ -33,9 +33,9 @@ Prerequisites: - Subgraph specVersion must be ≥1.1.0. -Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, and more. +Timeseries and aggregations enable your Subgraph to track statistics like daily average price, hourly total transfers, and more. -This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. +This feature introduces two new types of Subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. ### Exemple de schéma @@ -97,11 +97,11 @@ Aggregation entities are automatically calculated on the basis of the specified ## Erreurs non fatales -Les erreurs d'indexation sur les subgraphs déjà synchronisés entraîneront, par défaut, l'échec du subgraph et l'arrêt de la synchronisation. Les subgraphs peuvent également être configurés pour continuer la synchronisation en présence d'erreurs, en ignorant les modifications apportées par le gestionnaire qui a provoqué l'erreur. Cela donne aux auteurs de subgraphs le temps de corriger leurs subgraphs pendant que les requêtes continuent d'être traitées sur le dernier bloc, bien que les résultats puissent être incohérents en raison du bogue à l'origine de l'erreur. Notez que certaines erreurs sont toujours fatales. Pour être non fatale, l'erreur doit être connue pour être déterministe. +Indexing errors on already synced Subgraphs will, by default, cause the Subgraph to fail and stop syncing. Subgraphs can alternatively be configured to continue syncing in the presence of errors, by ignoring the changes made by the handler which provoked the error. This gives Subgraph authors time to correct their Subgraphs while queries continue to be served against the latest block, though the results might be inconsistent due to the bug that caused the error. Note that some errors are still always fatal. To be non-fatal, the error must be known to be deterministic. -> **Note:** The Graph Network ne supporte pas encore les erreurs non fatales, et les développeurs ne doivent pas déployer de subgraphs utilisant cette fonctionnalité sur le réseau via le Studio. +> **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy Subgraphs using that functionality to the network via the Studio. -L'activation des erreurs non fatales nécessite la définition de l'indicateur de fonctionnalité suivant sur le manifeste du subgraph : +Enabling non-fatal errors requires setting the following feature flag on the Subgraph manifest: ```yaml specVersion: 0.0.4 @@ -111,7 +111,7 @@ features: ... ``` -La requête doit également opter pour l'interrogation de données avec des incohérences potentielles via l'argument `subgraphError`. Il est également recommandé d'interroger `_meta` pour vérifier si le subgraph a ignoré des erreurs, comme dans l'exemple : +The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the Subgraph has skipped over errors, as in the example: ```graphql foos(first: 100, subgraphError: allow) { @@ -123,7 +123,7 @@ _meta { } ``` -Si le subgraph rencontre une erreur, cette requête renverra à la fois les données et une erreur graphql avec le message `"indexing_error"`, comme dans cet exemple de réponse : +If the Subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: ```graphql "data": { @@ -145,7 +145,7 @@ Si le subgraph rencontre une erreur, cette requête renverra à la fois les donn ## File Data Sources de fichiers IPFS/Arweave -Les sources de données de fichiers sont une nouvelle fonctionnalité de subgraph permettant d'accéder aux données hors chaîne pendant l'indexation de manière robuste et extensible. Les sources de données de fichiers prennent en charge la récupération de fichiers depuis IPFS et Arweave. +File data sources are a new Subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. > Cela jette également les bases d’une indexation déterministe des données hors chaîne, ainsi que de l’introduction potentielle de données arbitraires provenant de HTTP. @@ -290,7 +290,7 @@ L'exemple: import { TokenMetadata as TokenMetadataTemplate } from '../generated/templates' const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' -//Cet exemple de code concerne un sous-graphe de Crypto coven. Le hachage ipfs ci-dessus est un répertoire contenant les métadonnées des jetons pour toutes les NFT de l'alliance cryptographique. +//This example code is for a Crypto coven Subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. export function handleTransfer(event: TransferEvent): void { let token = Token.load(event.params.tokenId.toString()) @@ -300,7 +300,7 @@ export function handleTransfer(event: TransferEvent): void { token.tokenURI = '/' + event.params.tokenId.toString() + '.json' const tokenIpfsHash = ipfshash + token.tokenURI - //Ceci crée un chemin vers les métadonnées pour un seul Crypto coven NFT. Il concatène le répertoire avec "/" + nom de fichier + ".json" + //This creates a path to the metadata for a single Crypto coven NFT. It concats the directory with "/" + filename + ".json" token.ipfsURI = tokenIpfsHash @@ -317,23 +317,23 @@ Cela créera une nouvelle source de données de fichier, qui interrogera le poin Cet exemple utilise le CID comme référence entre l'entité parent `Token` et l'entité résultante `TokenMetadata`. -> Auparavant, c'est à ce stade qu'un développeur de subgraphs aurait appelé `ipfs.cat(CID)` pour récupérer le fichier +> Previously, this is the point at which a Subgraph developer would have called `ipfs.cat(CID)` to fetch the file Félicitations, vous utilisez des sources de données de fichiers ! -#### Déployer vos subgraphs +#### Deploying your Subgraphs -Vous pouvez maintenant `construire` et `déployer` votre subgraph sur n'importe quel Graph Node >=v0.30.0-rc.0. +You can now `build` and `deploy` your Subgraph to any Graph Node >=v0.30.0-rc.0. #### Limitations -Les entités et les gestionnaires de sources de données de fichiers sont isolés des autres entités du subgraph, ce qui garantit que leur exécution est déterministe et qu'il n'y a pas de contamination des sources de données basées sur des chaînes. Pour être plus précis : +File data source handlers and entities are isolated from other Subgraph entities, ensuring that they are deterministic when executed, and ensuring no contamination of chain-based data sources. To be specific: - Les entités créées par les sources de données de fichiers sont immuables et ne peuvent pas être mises à jour - Les gestionnaires de sources de données de fichiers ne peuvent pas accéder à des entités provenant d'autres sources de données de fichiers - Les entités associées aux sources de données de fichiers ne sont pas accessibles aux gestionnaires basés sur des chaînes -> Cette contrainte ne devrait pas poser de problème pour la plupart des cas d'utilisation, mais elle peut en compliquer certains. N'hésitez pas à nous contacter via Discord si vous rencontrez des problèmes pour modéliser vos données basées sur des fichiers dans un subgraph ! +> While this constraint should not be problematic for most use-cases, it may introduce complexity for some. Please get in touch via Discord if you are having issues modelling your file-based data in a Subgraph! En outre, il n'est pas possible de créer des sources de données à partir d'une source de données de fichier, qu'il s'agisse d'une source de données onchain ou d'une autre source de données de fichier. Cette restriction pourrait être levée à l'avenir. @@ -365,15 +365,15 @@ Les gestionnaires pour les fichiers sources de données ne peuvent pas être dan > **Nécessite** : [SpecVersion](#specversion-releases) >= `1.2.0` -Les filtres de topics, également connus sous le nom de filtres d'arguments indexés, sont une fonctionnalité puissante dans les subgraphs qui permettent aux utilisateurs de filtrer précisément les événements de la blockchain en fonction des valeurs de leurs arguments indexés. +Topic filters, also known as indexed argument filters, are a powerful feature in Subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. -- Ces filtres aident à isoler des événements spécifiques intéressants parmi le vaste flux d'événements sur la blockchain, permettant aux subgraphs de fonctionner plus efficacement en se concentrant uniquement sur les données pertinentes. +- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing Subgraphs to operate more efficiently by focusing only on relevant data. -- Ceci est utile pour créer des subgraphs personnels qui suivent des adresses spécifiques et leurs interactions avec divers contrats intelligents sur la blockchain. +- This is useful for creating personal Subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. ### Comment fonctionnent les filtres de Topics -Lorsqu'un contrat intelligent émet un événement, tous les arguments marqués comme indexés peuvent être utilisés comme filtres dans le manifeste d'un subgraph. Ceci permet au subgraph d'écouter de façon sélective les événements qui correspondent à ces arguments indexés. +When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a Subgraph's manifest. This allows the Subgraph to listen selectively for events that match these indexed arguments. - Le premier argument indexé de l'événement correspond à `topic1`, le second à `topic2`, et ainsi de suite, jusqu'à `topic3`, puisque la machine virtuelle Ethereum (EVM) autorise jusqu'à trois arguments indexés par événement. @@ -401,7 +401,7 @@ Dans cet exemple: #### Configuration dans les subgraphs -Les filtres de topics sont définis directement dans la configuration du gestionnaire d'évènement situé dans le manifeste du subgraph. Voici comment ils sont configurés : +Topic filters are defined directly within the event handler configuration in the Subgraph manifest. Here is how they are configured: ```yaml eventHandlers: @@ -436,7 +436,7 @@ Dans cette configuration: - `topic1` est configuré pour filtrer les événements `Transfer` dont l'expéditeur est `0xAddressA`. - `topic2` est configuré pour filtrer les événements `Transfer` dont `0xAddressB` est le destinataire. -- Le subgraph n'indexera que les transactions qui se produisent directement de `0xAddressA` à `0xAddressB`. +- The Subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. #### Exemple 2 : Suivi des transactions dans les deux sens entre deux ou plusieurs adresses @@ -452,17 +452,17 @@ Dans cette configuration: - `topic1` est configuré pour filtrer les événements `Transfer` dont l'expéditeur est `0xAddressA`, `0xAddressB`, `0xAddressC`. - `topic2` est configuré pour filtrer les événements `Transfer` où `0xAddressB` et `0xAddressC` sont les destinataires. -- Le subgraph indexera les transactions qui se produisent dans les deux sens entre plusieurs adresses, permettant une surveillance complète des interactions impliquant toutes les adresses. +- The Subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. ## Déclaration eth_call > Remarque : Il s'agit d'une fonctionnalité expérimentale qui n'est pas encore disponible dans une version stable de Graph Node. Vous ne pouvez l'utiliser que dans Subgraph Studio ou sur votre nœud auto-hébergé. -Les `eth_calls' déclaratifs sont une caractéristique précieuse des subgraphs qui permet aux `eth_calls' d'être exécutés à l'avance, ce qui permet à `graph-node` de les exécuter en parallèle. +Declarative `eth_calls` are a valuable Subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. Cette fonctionnalité permet de : -- Améliorer de manière significative les performances de la récupération des données de la blockchain Ethereum en réduisant le temps total pour plusieurs appels et en optimisant l'efficacité globale du subgraph. +- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the Subgraph's overall efficiency. - Permet une récupération plus rapide des données, entraînant des réponses de requête plus rapides et une meilleure expérience utilisateur. - Réduire les temps d'attente pour les applications qui doivent réunir des données de plusieurs appels Ethereum, rendant le processus de récupération des données plus efficace. @@ -474,7 +474,7 @@ Cette fonctionnalité permet de : #### Scénario sans `eth_calls` déclaratifs -Imaginez que vous ayez un subgraph qui doit effectuer trois appels Ethereum pour récupérer des données sur les transactions, le solde et les avoirs en jetons d'un utilisateur. +Imagine you have a Subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. Traditionnellement, ces appels pourraient être effectués de manière séquentielle : @@ -498,15 +498,15 @@ Temps total pris = max (3, 2, 4) = 4 secondes #### Comment ça marche -1. Définition déclarative : Dans le manifeste du subgraph, vous déclarez les appels Ethereum d'une manière indiquant qu'ils peuvent être exécutés en parallèle. +1. Declarative Definition: In the Subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. 2. Moteur d'exécution parallèle : Le moteur d'exécution de Graph Node reconnaît ces déclarations et exécute les appels simultanément. -3. Agrégation des résultats : Une fois que tous les appels sont terminés, les résultats sont réunis et utilisés par le subgraph pour un traitement ultérieur. +3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the Subgraph for further processing. #### Exemple de configuration dans le manifeste du subgraph Les `eth_calls` déclarés peuvent accéder à l'adresse `event.address` de l'événement sous-jacent ainsi qu'à tous les paramètres `event.params`. -`Subgraph.yaml` utilisant `event.address` : +`subgraph.yaml` using `event.address`: ```yaml eventHandlers: @@ -524,7 +524,7 @@ Détails pour l'exemple ci-dessus : - Le texte (`Pool[event.address].feeGrowthGlobal0X128()`) est le `eth_call` réel qui sera exécuté, et est sous la forme de `Contract[address].function(arguments)` - L'adresse et les arguments peuvent être remplacés par des variables qui seront disponibles lorsque le gestionnaire sera exécuté. -`Subgraph.yaml` utilisant `event.params` +`subgraph.yaml` using `event.params` ```yaml calls: @@ -535,22 +535,22 @@ calls: > **Note:** il n'est pas recommandé d'utiliser le greffage lors de l'upgrade initial vers The Graph Network. Pour en savoir plus [ici](/subgraphs/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). -Lorsqu'un subgraph est déployé pour la première fois, il commence à indexer les événements au bloc de initial de la blockchain correspondante (ou au `startBlock` défini avec chaque source de données). Dans certaines circonstances, il est avantageux de réutiliser les données d'un subgraph existant et de commencer l'indexation à un bloc beaucoup plus tardif. Ce mode d'indexation est appelé _Grafting_. Le greffage (grafting) est, par exemple, utile pendant le développement pour surmonter rapidement de simples erreurs dans les mappages ou pour faire fonctionner temporairement un subgraph existant après qu'il ait échoué. +When a Subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing Subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing Subgraph working again after it has failed. -Un subgraph est greffé sur un subgraph de base lorsque le manifeste du subgraph dans `subgraph.yaml` contient un bloc `graft` au niveau supérieur : +A Subgraph is grafted onto a base Subgraph when the Subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: ```yaml description: ... graft: - base: Qm... # Subgraph ID of base subgraph + base: Qm... # Subgraph ID of base Subgraph block: 7345624 # Block number ``` -Lorsqu'un subgraph dont le manifeste contient un bloc `graft` est déployé, Graph Node copiera les données du subgraph `de base` jusqu'au bloc spécifié inclus, puis continuera à indexer le nouveau subgraph à partir de ce bloc. Le subgraph de base doit exister sur l'instance cible de Graph Node et doit avoir indexé au moins jusqu'au bloc spécifié. En raison de cette restriction, le greffage ne doit être utilisé que pendant le développement ou en cas d'urgence pour accélérer la production d'un subgraph équivalent non greffé. +When a Subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` Subgraph up to and including the given `block` and then continue indexing the new Subgraph from that block on. The base Subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted Subgraph. -Étant donné que le greffage copie plutôt que l'indexation des données de base, il est beaucoup plus rapide d'amener le susgraph dans le bloc souhaité que l'indexation à partir de zéro, bien que la copie initiale des données puisse encore prendre plusieurs heures pour de très gros subgraphs. Pendant l'initialisation du subgraph greffé, le nœud graphique enregistrera des informations sur les types d'entités qui ont déjà été copiés. +Because grafting copies rather than indexes base data, it is much quicker to get the Subgraph to the desired block than indexing from scratch, though the initial data copy can still take several hours for very large Subgraphs. While the grafted Subgraph is being initialized, the Graph Node will log information about the entity types that have already been copied. -Le subgraph greffé peut utiliser un schema GraphQL qui n'est pas identique à celui du subgraph de base, mais simplement compatible avec lui. Il doit s'agir d'un schema de subgraph valide en tant que tel, mais il peut s'écarter du schema du subgraph de base de la manière suivante : +The grafted Subgraph can use a GraphQL schema that is not identical to the one of the base Subgraph, but merely compatible with it. It has to be a valid Subgraph schema in its own right, but may deviate from the base Subgraph's schema in the following ways: - Il ajoute ou supprime des types d'entité - Il supprime les attributs des types d'entité @@ -560,4 +560,4 @@ Le subgraph greffé peut utiliser un schema GraphQL qui n'est pas identique à c - Il ajoute ou supprime des interfaces - Cela change pour quels types d'entités une interface est implémentée -> **[Gestion des fonctionnalités](#experimental-features):** `grafting` doit être déclaré sous `features` dans le manifeste du subgraph. +> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the Subgraph manifest. From bce82ada57760629cf541fbc1338cca32a2a13f7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:10:08 -0500 Subject: [PATCH 0120/1789] New translations advanced.mdx (Spanish) --- .../developing/creating/advanced.mdx | 78 +++++++++---------- 1 file changed, 39 insertions(+), 39 deletions(-) diff --git a/website/src/pages/es/subgraphs/developing/creating/advanced.mdx b/website/src/pages/es/subgraphs/developing/creating/advanced.mdx index 63cf8f312906..212d22aeeade 100644 --- a/website/src/pages/es/subgraphs/developing/creating/advanced.mdx +++ b/website/src/pages/es/subgraphs/developing/creating/advanced.mdx @@ -4,9 +4,9 @@ title: Advanced Subgraph Features ## Descripción -Add and implement advanced subgraph features to enhanced your subgraph's built. +Add and implement advanced Subgraph features to enhanced your Subgraph's built. -Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: +Starting from `specVersion` `0.0.4`, Subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: | Feature | Name | | ---------------------------------------------------- | ---------------- | @@ -14,7 +14,7 @@ Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declar | [Full-text Search](#defining-fulltext-search-fields) | `fullTextSearch` | | [Grafting](#grafting-onto-existing-subgraphs) | `grafting` | -For instance, if a subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: +For instance, if a Subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: ```yaml specVersion: 0.0.4 @@ -25,7 +25,7 @@ features: dataSources: ... ``` -> Note that using a feature without declaring it will incur a **validation error** during subgraph deployment, but no errors will occur if a feature is declared but not used. +> Note that using a feature without declaring it will incur a **validation error** during Subgraph deployment, but no errors will occur if a feature is declared but not used. ## Timeseries and Aggregations @@ -33,9 +33,9 @@ Prerequisites: - Subgraph specVersion must be ≥1.1.0. -Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, and more. +Timeseries and aggregations enable your Subgraph to track statistics like daily average price, hourly total transfers, and more. -This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. +This feature introduces two new types of Subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. ### Example Schema @@ -97,11 +97,11 @@ Aggregation entities are automatically calculated on the basis of the specified ## Errores no fatales -Los errores de indexación en subgrafos ya sincronizados provocarán, por defecto, que el subgrafo falle y deje de sincronizarse. Los subgrafos pueden ser configurados de manera alternativa para continuar la sincronización en presencia de errores, ignorando los cambios realizados por el handler que provocó el error. Esto da a los autores de los subgrafos tiempo para corregir sus subgrafos mientras las consultas continúan siendo servidas contra el último bloque, aunque los resultados serán posiblemente inconsistentes debido al bug que provocó el error. Nótese que algunos errores siguen siendo siempre fatales, para que el error no sea fatal debe saberse que es deterministico. +Indexing errors on already synced Subgraphs will, by default, cause the Subgraph to fail and stop syncing. Subgraphs can alternatively be configured to continue syncing in the presence of errors, by ignoring the changes made by the handler which provoked the error. This gives Subgraph authors time to correct their Subgraphs while queries continue to be served against the latest block, though the results might be inconsistent due to the bug that caused the error. Note that some errors are still always fatal. To be non-fatal, the error must be known to be deterministic. -> **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy subgraphs using that functionality to the network via the Studio. +> **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy Subgraphs using that functionality to the network via the Studio. -Para activar los errores no fatales es necesario establecer el siguiente indicador en el manifiesto del subgrafo: +Enabling non-fatal errors requires setting the following feature flag on the Subgraph manifest: ```yaml specVersion: 0.0.4 @@ -111,7 +111,7 @@ features: ... ``` -The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the subgraph has skipped over errors, as in the example: +The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the Subgraph has skipped over errors, as in the example: ```graphql foos(first: 100, subgraphError: allow) { @@ -123,7 +123,7 @@ _meta { } ``` -If the subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: +If the Subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: ```graphql "data": { @@ -145,7 +145,7 @@ If the subgraph encounters an error, that query will return both the data and a ## IPFS/Arweave File Data Sources -File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. +File data sources are a new Subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. > Esto también establece las bases para la indexación determinista de datos off-chain, así como la posible introducción de datos arbitrarios procedentes de HTTP. @@ -290,7 +290,7 @@ Ejemplo: import { TokenMetadata as TokenMetadataTemplate } from '../generated/templates' const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' -//This example code is for a Crypto coven subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. +//This example code is for a Crypto coven Subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. export function handleTransfer(event: TransferEvent): void { let token = Token.load(event.params.tokenId.toString()) @@ -317,23 +317,23 @@ This will create a new file data source, which will poll Graph Node's configured This example is using the CID as the lookup between the parent `Token` entity and the resulting `TokenMetadata` entity. -> Previously, this is the point at which a subgraph developer would have called `ipfs.cat(CID)` to fetch the file +> Previously, this is the point at which a Subgraph developer would have called `ipfs.cat(CID)` to fetch the file ¡Felicitaciones, estás utilizando fuentes de datos de archivos! -#### Deploy de tus subgrafos +#### Deploying your Subgraphs -You can now `build` and `deploy` your subgraph to any Graph Node >=v0.30.0-rc.0. +You can now `build` and `deploy` your Subgraph to any Graph Node >=v0.30.0-rc.0. #### Limitaciones -Los handlers y entidades de fuentes de datos de archivos están aislados de otras entidades del subgrafo, asegurando que son deterministas cuando se ejecutan, y asegurando que no se contaminan las fuentes de datos basadas en cadenas. En concreto: +File data source handlers and entities are isolated from other Subgraph entities, ensuring that they are deterministic when executed, and ensuring no contamination of chain-based data sources. To be specific: - Las entidades creadas por File Data Sources son inmutables y no pueden actualizarse - Los handlers de File Data Source no pueden acceder a entidades de otras fuentes de datos de archivos - Los handlers basados en cadenas no pueden acceder a las entidades asociadas a File Data Sources -> Aunque esta restricción no debería ser problemática para la mayoría de los casos de uso, puede introducir complejidad para algunos. Si tienes problemas para modelar tus datos basados en archivos en un subgrafo, ponte en contacto con nosotros a través de Discord! +> While this constraint should not be problematic for most use-cases, it may introduce complexity for some. Please get in touch via Discord if you are having issues modelling your file-based data in a Subgraph! Además, no es posible crear fuentes de datos a partir de una File Data Source, ya sea una fuente de datos on-chain u otra File Data Source. Es posible que esta restricción se elimine en el futuro. @@ -365,15 +365,15 @@ Handlers for File Data Sources cannot be in files which import `eth_call` contra > **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0` -Topic filters, also known as indexed argument filters, are a powerful feature in subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. +Topic filters, also known as indexed argument filters, are a powerful feature in Subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. -- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing subgraphs to operate more efficiently by focusing only on relevant data. +- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing Subgraphs to operate more efficiently by focusing only on relevant data. -- This is useful for creating personal subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. +- This is useful for creating personal Subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. ### How Topic Filters Work -When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a subgraph's manifest. This allows the subgraph to listen selectively for events that match these indexed arguments. +When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a Subgraph's manifest. This allows the Subgraph to listen selectively for events that match these indexed arguments. - The event's first indexed argument corresponds to `topic1`, the second to `topic2`, and so on, up to `topic3`, since the Ethereum Virtual Machine (EVM) allows up to three indexed arguments per event. @@ -401,7 +401,7 @@ In this example: #### Configuration in Subgraphs -Topic filters are defined directly within the event handler configuration in the subgraph manifest. Here is how they are configured: +Topic filters are defined directly within the event handler configuration in the Subgraph manifest. Here is how they are configured: ```yaml eventHandlers: @@ -436,7 +436,7 @@ In this configuration: - `topic1` is configured to filter `Transfer` events where `0xAddressA` is the sender. - `topic2` is configured to filter `Transfer` events where `0xAddressB` is the receiver. -- The subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. +- The Subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. #### Example 2: Tracking Transactions in Either Direction Between Two or More Addresses @@ -452,17 +452,17 @@ In this configuration: - `topic1` is configured to filter `Transfer` events where `0xAddressA`, `0xAddressB`, `0xAddressC` is the sender. - `topic2` is configured to filter `Transfer` events where `0xAddressB` and `0xAddressC` is the receiver. -- The subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. +- The Subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. ## Declared eth_call > Note: This is an experimental feature that is not currently available in a stable Graph Node release yet. You can only use it in Subgraph Studio or your self-hosted node. -Declarative `eth_calls` are a valuable subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. +Declarative `eth_calls` are a valuable Subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. This feature does the following: -- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the subgraph's overall efficiency. +- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the Subgraph's overall efficiency. - Allows faster data fetching, resulting in quicker query responses and a better user experience. - Reduces wait times for applications that need to aggregate data from multiple Ethereum calls, making the data retrieval process more efficient. @@ -474,7 +474,7 @@ This feature does the following: #### Scenario without Declarative `eth_calls` -Imagine you have a subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. +Imagine you have a Subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. Traditionally, these calls might be made sequentially: @@ -498,15 +498,15 @@ Total time taken = max (3, 2, 4) = 4 seconds #### How it Works -1. Declarative Definition: In the subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. +1. Declarative Definition: In the Subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. 2. Parallel Execution Engine: The Graph Node's execution engine recognizes these declarations and runs the calls simultaneously. -3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the subgraph for further processing. +3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the Subgraph for further processing. #### Example Configuration in Subgraph Manifest Declared `eth_calls` can access the `event.address` of the underlying event as well as all the `event.params`. -`Subgraph.yaml` using `event.address`: +`subgraph.yaml` using `event.address`: ```yaml eventHandlers: @@ -524,7 +524,7 @@ Details for the example above: - The text (`Pool[event.address].feeGrowthGlobal0X128()`) is the actual `eth_call` that will be executed, which is in the form of `Contract[address].function(arguments)` - The `address` and `arguments` can be replaced with variables that will be available when the handler is executed. -`Subgraph.yaml` using `event.params` +`subgraph.yaml` using `event.params` ```yaml calls: @@ -535,22 +535,22 @@ calls: > **Note:** it is not recommended to use grafting when initially upgrading to The Graph Network. Learn more [here](/subgraphs/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). -When a subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. +When a Subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing Subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing Subgraph working again after it has failed. -A subgraph is grafted onto a base subgraph when the subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: +A Subgraph is grafted onto a base Subgraph when the Subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: ```yaml description: ... graft: - base: Qm... # Subgraph ID of base subgraph + base: Qm... # Subgraph ID of base Subgraph block: 7345624 # Block number ``` -When a subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` subgraph up to and including the given `block` and then continue indexing the new subgraph from that block on. The base subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted subgraph. +When a Subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` Subgraph up to and including the given `block` and then continue indexing the new Subgraph from that block on. The base Subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted Subgraph. -Debido a que el grafting copia en lugar de indexar los datos base, es mucho más rápido llevar el subgrafo al bloque deseado que indexar desde cero, aunque la copia inicial de los datos aún puede llevar varias horas para subgrafos muy grandes. Mientras se inicializa el subgrafo grafted, Graph Node registrará información sobre los tipos de entidad que ya han sido copiados. +Because grafting copies rather than indexes base data, it is much quicker to get the Subgraph to the desired block than indexing from scratch, though the initial data copy can still take several hours for very large Subgraphs. While the grafted Subgraph is being initialized, the Graph Node will log information about the entity types that have already been copied. -El subgrafo grafteado puede utilizar un esquema GraphQL que no es idéntico al del subgrafo base, sino simplemente compatible con él. Tiene que ser un esquema de subgrafo válido por sí mismo, pero puede diferir del esquema del subgrafo base de las siguientes maneras: +The grafted Subgraph can use a GraphQL schema that is not identical to the one of the base Subgraph, but merely compatible with it. It has to be a valid Subgraph schema in its own right, but may deviate from the base Subgraph's schema in the following ways: - Agrega o elimina tipos de entidades - Elimina los atributos de los tipos de entidad @@ -560,4 +560,4 @@ El subgrafo grafteado puede utilizar un esquema GraphQL que no es idéntico al d - Agrega o elimina interfaces - Cambia para qué tipos de entidades se implementa una interfaz -> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the subgraph manifest. +> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the Subgraph manifest. From e7989c0049beb2bb3c85612d6c75647a29ff7dbf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:10:09 -0500 Subject: [PATCH 0121/1789] New translations advanced.mdx (Arabic) --- .../developing/creating/advanced.mdx | 78 +++++++++---------- 1 file changed, 39 insertions(+), 39 deletions(-) diff --git a/website/src/pages/ar/subgraphs/developing/creating/advanced.mdx b/website/src/pages/ar/subgraphs/developing/creating/advanced.mdx index d0f9bb2cc348..34a28b58087c 100644 --- a/website/src/pages/ar/subgraphs/developing/creating/advanced.mdx +++ b/website/src/pages/ar/subgraphs/developing/creating/advanced.mdx @@ -4,9 +4,9 @@ title: Advanced Subgraph Features ## نظره عامة -Add and implement advanced subgraph features to enhanced your subgraph's built. +Add and implement advanced Subgraph features to enhanced your Subgraph's built. -Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: +Starting from `specVersion` `0.0.4`, Subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: | Feature | Name | | ---------------------------------------------------- | ---------------- | @@ -14,7 +14,7 @@ Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declar | [Full-text Search](#defining-fulltext-search-fields) | `fullTextSearch` | | [Grafting](#grafting-onto-existing-subgraphs) | `grafting` | -For instance, if a subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: +For instance, if a Subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: ```yaml specVersion: 0.0.4 @@ -25,7 +25,7 @@ features: dataSources: ... ``` -> Note that using a feature without declaring it will incur a **validation error** during subgraph deployment, but no errors will occur if a feature is declared but not used. +> Note that using a feature without declaring it will incur a **validation error** during Subgraph deployment, but no errors will occur if a feature is declared but not used. ## Timeseries and Aggregations @@ -33,9 +33,9 @@ Prerequisites: - Subgraph specVersion must be ≥1.1.0. -Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, and more. +Timeseries and aggregations enable your Subgraph to track statistics like daily average price, hourly total transfers, and more. -This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. +This feature introduces two new types of Subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. ### Example Schema @@ -97,11 +97,11 @@ Aggregation entities are automatically calculated on the basis of the specified ## أخطاء غير فادحة -Indexing errors on already synced subgraphs will, by default, cause the subgraph to fail and stop syncing. Subgraphs can alternatively be configured to continue syncing in the presence of errors, by ignoring the changes made by the handler which provoked the error. This gives subgraph authors time to correct their subgraphs while queries continue to be served against the latest block, though the results might be inconsistent due to the bug that caused the error. Note that some errors are still always fatal. To be non-fatal, the error must be known to be deterministic. +Indexing errors on already synced Subgraphs will, by default, cause the Subgraph to fail and stop syncing. Subgraphs can alternatively be configured to continue syncing in the presence of errors, by ignoring the changes made by the handler which provoked the error. This gives Subgraph authors time to correct their Subgraphs while queries continue to be served against the latest block, though the results might be inconsistent due to the bug that caused the error. Note that some errors are still always fatal. To be non-fatal, the error must be known to be deterministic. -> **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy subgraphs using that functionality to the network via the Studio. +> **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy Subgraphs using that functionality to the network via the Studio. -Enabling non-fatal errors requires setting the following feature flag on the subgraph manifest: +Enabling non-fatal errors requires setting the following feature flag on the Subgraph manifest: ```yaml specVersion: 0.0.4 @@ -111,7 +111,7 @@ features: ... ``` -The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the subgraph has skipped over errors, as in the example: +The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the Subgraph has skipped over errors, as in the example: ```graphql foos(first: 100, subgraphError: allow) { @@ -123,7 +123,7 @@ _meta { } ``` -If the subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: +If the Subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: ```graphql "data": { @@ -145,7 +145,7 @@ If the subgraph encounters an error, that query will return both the data and a ## IPFS/Arweave File Data Sources -File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. +File data sources are a new Subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. > This also lays the groundwork for deterministic indexing of off-chain data, as well as the potential introduction of arbitrary HTTP-sourced data. @@ -290,7 +290,7 @@ Example: import { TokenMetadata as TokenMetadataTemplate } from '../generated/templates' const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' -//This example code is for a Crypto coven subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. +//This example code is for a Crypto coven Subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. export function handleTransfer(event: TransferEvent): void { let token = Token.load(event.params.tokenId.toString()) @@ -317,23 +317,23 @@ This will create a new file data source, which will poll Graph Node's configured This example is using the CID as the lookup between the parent `Token` entity and the resulting `TokenMetadata` entity. -> Previously, this is the point at which a subgraph developer would have called `ipfs.cat(CID)` to fetch the file +> Previously, this is the point at which a Subgraph developer would have called `ipfs.cat(CID)` to fetch the file Congratulations, you are using file data sources! -#### Deploying your subgraphs +#### Deploying your Subgraphs -You can now `build` and `deploy` your subgraph to any Graph Node >=v0.30.0-rc.0. +You can now `build` and `deploy` your Subgraph to any Graph Node >=v0.30.0-rc.0. #### Limitations -File data source handlers and entities are isolated from other subgraph entities, ensuring that they are deterministic when executed, and ensuring no contamination of chain-based data sources. To be specific: +File data source handlers and entities are isolated from other Subgraph entities, ensuring that they are deterministic when executed, and ensuring no contamination of chain-based data sources. To be specific: - Entities created by File Data Sources are immutable, and cannot be updated - File Data Source handlers cannot access entities from other file data sources - Entities associated with File Data Sources cannot be accessed by chain-based handlers -> While this constraint should not be problematic for most use-cases, it may introduce complexity for some. Please get in touch via Discord if you are having issues modelling your file-based data in a subgraph! +> While this constraint should not be problematic for most use-cases, it may introduce complexity for some. Please get in touch via Discord if you are having issues modelling your file-based data in a Subgraph! Additionally, it is not possible to create data sources from a file data source, be it an onchain data source or another file data source. This restriction may be lifted in the future. @@ -365,15 +365,15 @@ Handlers for File Data Sources cannot be in files which import `eth_call` contra > **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0` -Topic filters, also known as indexed argument filters, are a powerful feature in subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. +Topic filters, also known as indexed argument filters, are a powerful feature in Subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. -- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing subgraphs to operate more efficiently by focusing only on relevant data. +- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing Subgraphs to operate more efficiently by focusing only on relevant data. -- This is useful for creating personal subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. +- This is useful for creating personal Subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. ### How Topic Filters Work -When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a subgraph's manifest. This allows the subgraph to listen selectively for events that match these indexed arguments. +When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a Subgraph's manifest. This allows the Subgraph to listen selectively for events that match these indexed arguments. - The event's first indexed argument corresponds to `topic1`, the second to `topic2`, and so on, up to `topic3`, since the Ethereum Virtual Machine (EVM) allows up to three indexed arguments per event. @@ -401,7 +401,7 @@ In this example: #### Configuration in Subgraphs -Topic filters are defined directly within the event handler configuration in the subgraph manifest. Here is how they are configured: +Topic filters are defined directly within the event handler configuration in the Subgraph manifest. Here is how they are configured: ```yaml eventHandlers: @@ -436,7 +436,7 @@ In this configuration: - `topic1` is configured to filter `Transfer` events where `0xAddressA` is the sender. - `topic2` is configured to filter `Transfer` events where `0xAddressB` is the receiver. -- The subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. +- The Subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. #### Example 2: Tracking Transactions in Either Direction Between Two or More Addresses @@ -452,17 +452,17 @@ In this configuration: - `topic1` is configured to filter `Transfer` events where `0xAddressA`, `0xAddressB`, `0xAddressC` is the sender. - `topic2` is configured to filter `Transfer` events where `0xAddressB` and `0xAddressC` is the receiver. -- The subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. +- The Subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. ## Declared eth_call > Note: This is an experimental feature that is not currently available in a stable Graph Node release yet. You can only use it in Subgraph Studio or your self-hosted node. -Declarative `eth_calls` are a valuable subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. +Declarative `eth_calls` are a valuable Subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. This feature does the following: -- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the subgraph's overall efficiency. +- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the Subgraph's overall efficiency. - Allows faster data fetching, resulting in quicker query responses and a better user experience. - Reduces wait times for applications that need to aggregate data from multiple Ethereum calls, making the data retrieval process more efficient. @@ -474,7 +474,7 @@ This feature does the following: #### Scenario without Declarative `eth_calls` -Imagine you have a subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. +Imagine you have a Subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. Traditionally, these calls might be made sequentially: @@ -498,15 +498,15 @@ Total time taken = max (3, 2, 4) = 4 seconds #### How it Works -1. Declarative Definition: In the subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. +1. Declarative Definition: In the Subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. 2. Parallel Execution Engine: The Graph Node's execution engine recognizes these declarations and runs the calls simultaneously. -3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the subgraph for further processing. +3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the Subgraph for further processing. #### Example Configuration in Subgraph Manifest Declared `eth_calls` can access the `event.address` of the underlying event as well as all the `event.params`. -`Subgraph.yaml` using `event.address`: +`subgraph.yaml` using `event.address`: ```yaml eventHandlers: @@ -524,7 +524,7 @@ Details for the example above: - The text (`Pool[event.address].feeGrowthGlobal0X128()`) is the actual `eth_call` that will be executed, which is in the form of `Contract[address].function(arguments)` - The `address` and `arguments` can be replaced with variables that will be available when the handler is executed. -`Subgraph.yaml` using `event.params` +`subgraph.yaml` using `event.params` ```yaml calls: @@ -535,22 +535,22 @@ calls: > **Note:** it is not recommended to use grafting when initially upgrading to The Graph Network. Learn more [here](/subgraphs/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). -When a subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. +When a Subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing Subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing Subgraph working again after it has failed. -A subgraph is grafted onto a base subgraph when the subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: +A Subgraph is grafted onto a base Subgraph when the Subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: ```yaml description: ... graft: - base: Qm... # Subgraph ID of base subgraph + base: Qm... # Subgraph ID of base Subgraph block: 7345624 # Block number ``` -When a subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` subgraph up to and including the given `block` and then continue indexing the new subgraph from that block on. The base subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted subgraph. +When a Subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` Subgraph up to and including the given `block` and then continue indexing the new Subgraph from that block on. The base Subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted Subgraph. -Because grafting copies rather than indexes base data, it is much quicker to get the subgraph to the desired block than indexing from scratch, though the initial data copy can still take several hours for very large subgraphs. While the grafted subgraph is being initialized, the Graph Node will log information about the entity types that have already been copied. +Because grafting copies rather than indexes base data, it is much quicker to get the Subgraph to the desired block than indexing from scratch, though the initial data copy can still take several hours for very large Subgraphs. While the grafted Subgraph is being initialized, the Graph Node will log information about the entity types that have already been copied. -The grafted subgraph can use a GraphQL schema that is not identical to the one of the base subgraph, but merely compatible with it. It has to be a valid subgraph schema in its own right, but may deviate from the base subgraph's schema in the following ways: +The grafted Subgraph can use a GraphQL schema that is not identical to the one of the base Subgraph, but merely compatible with it. It has to be a valid Subgraph schema in its own right, but may deviate from the base Subgraph's schema in the following ways: - يضيف أو يزيل أنواع الكيانات - يزيل الصفات من أنواع الكيانات @@ -560,4 +560,4 @@ The grafted subgraph can use a GraphQL schema that is not identical to the one o - It adds or removes interfaces - يغير للكيانات التي يتم تنفيذ الواجهة لها -> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the subgraph manifest. +> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the Subgraph manifest. From d701f79a2e610a9179ac42ff5006f153a597213e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:10:10 -0500 Subject: [PATCH 0122/1789] New translations advanced.mdx (Czech) --- .../developing/creating/advanced.mdx | 80 +++++++++---------- 1 file changed, 40 insertions(+), 40 deletions(-) diff --git a/website/src/pages/cs/subgraphs/developing/creating/advanced.mdx b/website/src/pages/cs/subgraphs/developing/creating/advanced.mdx index 4fbf2b573c14..79bc022495f5 100644 --- a/website/src/pages/cs/subgraphs/developing/creating/advanced.mdx +++ b/website/src/pages/cs/subgraphs/developing/creating/advanced.mdx @@ -4,9 +4,9 @@ title: Advanced Subgraph Features ## Přehled -Add and implement advanced subgraph features to enhanced your subgraph's built. +Add and implement advanced Subgraph features to enhanced your Subgraph's built. -Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: +Starting from `specVersion` `0.0.4`, Subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: | Feature | Name | | ---------------------------------------------------- | ---------------- | @@ -14,7 +14,7 @@ Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declar | [Full-text Search](#defining-fulltext-search-fields) | `fullTextSearch` | | [Grafting](#grafting-onto-existing-subgraphs) | `grafting` | -For instance, if a subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: +For instance, if a Subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: ```yaml specVersion: 0.0.4 @@ -25,7 +25,7 @@ features: dataSources: ... ``` -> Note that using a feature without declaring it will incur a **validation error** during subgraph deployment, but no errors will occur if a feature is declared but not used. +> Note that using a feature without declaring it will incur a **validation error** during Subgraph deployment, but no errors will occur if a feature is declared but not used. ## Timeseries and Aggregations @@ -33,9 +33,9 @@ Prerequisites: - Subgraph specVersion must be ≥1.1.0. -Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, and more. +Timeseries and aggregations enable your Subgraph to track statistics like daily average price, hourly total transfers, and more. -This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. +This feature introduces two new types of Subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. ### Example Schema @@ -97,11 +97,11 @@ Aggregation entities are automatically calculated on the basis of the specified ## Nefatální -Chyby indexování v již synchronizovaných podgrafech ve výchozím nastavení způsobí selhání podgrafy a zastavení synchronizace. Podgrafy lze alternativně nakonfigurovat tak, aby pokračovaly v synchronizaci i při přítomnosti chyb, a to ignorováním změn provedených obslužnou rutinou, která chybu vyvolala. To dává autorům podgrafů čas na opravu jejich podgrafů, zatímco dotazy jsou nadále obsluhovány proti poslednímu bloku, ačkoli výsledky mohou být nekonzistentní kvůli chybě, která chybu způsobila. Všimněte si, že některé chyby jsou stále fatální. Aby chyba nebyla fatální, musí být známo, že je deterministická. +Indexing errors on already synced Subgraphs will, by default, cause the Subgraph to fail and stop syncing. Subgraphs can alternatively be configured to continue syncing in the presence of errors, by ignoring the changes made by the handler which provoked the error. This gives Subgraph authors time to correct their Subgraphs while queries continue to be served against the latest block, though the results might be inconsistent due to the bug that caused the error. Note that some errors are still always fatal. To be non-fatal, the error must be known to be deterministic. -> **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy subgraphs using that functionality to the network via the Studio. +> **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy Subgraphs using that functionality to the network via the Studio. -Povolení nefatálních chyb vyžaduje nastavení následujícího příznaku funkce v manifestu podgraf: +Enabling non-fatal errors requires setting the following feature flag on the Subgraph manifest: ```yaml specVersion: 0.0.4 @@ -111,7 +111,7 @@ features: ... ``` -The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the subgraph has skipped over errors, as in the example: +The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the Subgraph has skipped over errors, as in the example: ```graphql foos(first: 100, subgraphError: allow) { @@ -123,7 +123,7 @@ _meta { } ``` -If the subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: +If the Subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: ```graphql "data": { @@ -145,7 +145,7 @@ If the subgraph encounters an error, that query will return both the data and a ## IPFS/Arweave File Data Sources -Zdroje dat souborů jsou novou funkcí podgrafu pro přístup k datům mimo řetězec během indexování robustním a rozšiřitelným způsobem. Zdroje souborových dat podporují načítání souborů ze systému IPFS a z Arweave. +File data sources are a new Subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. > To také vytváří základ pro deterministické indexování dat mimo řetězec a potenciální zavedení libovolných dat ze zdrojů HTTP. @@ -246,7 +246,7 @@ The CID of the file as a readable string can be accessed via the `dataSource` as const cid = dataSource.stringParam() ``` -Příklad +Příklad ```typescript import { json, Bytes, dataSource } from '@graphprotocol/graph-ts' @@ -290,7 +290,7 @@ Příklad: import { TokenMetadata as TokenMetadataTemplate } from '../generated/templates' const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' -//This example code is for a Crypto coven subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. +//This example code is for a Crypto coven Subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. export function handleTransfer(event: TransferEvent): void { let token = Token.load(event.params.tokenId.toString()) @@ -317,23 +317,23 @@ Tím se vytvoří nový zdroj dat souborů, který bude dotazovat nakonfigurovan This example is using the CID as the lookup between the parent `Token` entity and the resulting `TokenMetadata` entity. -> Previously, this is the point at which a subgraph developer would have called `ipfs.cat(CID)` to fetch the file +> Previously, this is the point at which a Subgraph developer would have called `ipfs.cat(CID)` to fetch the file Gratulujeme, používáte souborové zdroje dat! -#### Nasazení podgrafů +#### Deploying your Subgraphs -You can now `build` and `deploy` your subgraph to any Graph Node >=v0.30.0-rc.0. +You can now `build` and `deploy` your Subgraph to any Graph Node >=v0.30.0-rc.0. #### Omezení -Zpracovatelé a entity zdrojů dat souborů jsou izolovány od ostatních entit podgrafů, což zajišťuje, že jsou při provádění deterministické a nedochází ke kontaminaci zdrojů dat založených na řetězci. Přesněji řečeno: +File data source handlers and entities are isolated from other Subgraph entities, ensuring that they are deterministic when executed, and ensuring no contamination of chain-based data sources. To be specific: - Entity vytvořené souborovými zdroji dat jsou neměnné a nelze je aktualizovat - Obsluhy zdrojů dat souborů nemohou přistupovat k entita z jiných zdrojů dat souborů - K entita přidruženým k datovým zdrojům souborů nelze přistupovat pomocí zpracovatelů založených na řetězci -> Ačkoli by toto omezení nemělo být pro většinu případů použití problematické, pro některé může představovat složitost. Pokud máte problémy s modelováním dat založených na souborech v podgrafu, kontaktujte nás prosím prostřednictvím služby Discord! +> While this constraint should not be problematic for most use-cases, it may introduce complexity for some. Please get in touch via Discord if you are having issues modelling your file-based data in a Subgraph! Kromě toho není možné vytvářet zdroje dat ze zdroje dat souborů, ať už se jedná o zdroj dat v řetězci nebo jiný zdroj dat souborů. Toto omezení může být v budoucnu zrušeno. @@ -365,15 +365,15 @@ Handlers for File Data Sources cannot be in files which import `eth_call` contra > **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0` -Topic filters, also known as indexed argument filters, are a powerful feature in subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. +Topic filters, also known as indexed argument filters, are a powerful feature in Subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. -- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing subgraphs to operate more efficiently by focusing only on relevant data. +- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing Subgraphs to operate more efficiently by focusing only on relevant data. -- This is useful for creating personal subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. +- This is useful for creating personal Subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. ### How Topic Filters Work -When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a subgraph's manifest. This allows the subgraph to listen selectively for events that match these indexed arguments. +When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a Subgraph's manifest. This allows the Subgraph to listen selectively for events that match these indexed arguments. - The event's first indexed argument corresponds to `topic1`, the second to `topic2`, and so on, up to `topic3`, since the Ethereum Virtual Machine (EVM) allows up to three indexed arguments per event. @@ -401,7 +401,7 @@ In this example: #### Configuration in Subgraphs -Topic filters are defined directly within the event handler configuration in the subgraph manifest. Here is how they are configured: +Topic filters are defined directly within the event handler configuration in the Subgraph manifest. Here is how they are configured: ```yaml eventHandlers: @@ -436,7 +436,7 @@ In this configuration: - `topic1` is configured to filter `Transfer` events where `0xAddressA` is the sender. - `topic2` is configured to filter `Transfer` events where `0xAddressB` is the receiver. -- The subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. +- The Subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. #### Example 2: Tracking Transactions in Either Direction Between Two or More Addresses @@ -452,17 +452,17 @@ In this configuration: - `topic1` is configured to filter `Transfer` events where `0xAddressA`, `0xAddressB`, `0xAddressC` is the sender. - `topic2` is configured to filter `Transfer` events where `0xAddressB` and `0xAddressC` is the receiver. -- The subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. +- The Subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. ## Declared eth_call > Note: This is an experimental feature that is not currently available in a stable Graph Node release yet. You can only use it in Subgraph Studio or your self-hosted node. -Declarative `eth_calls` are a valuable subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. +Declarative `eth_calls` are a valuable Subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. This feature does the following: -- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the subgraph's overall efficiency. +- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the Subgraph's overall efficiency. - Allows faster data fetching, resulting in quicker query responses and a better user experience. - Reduces wait times for applications that need to aggregate data from multiple Ethereum calls, making the data retrieval process more efficient. @@ -474,7 +474,7 @@ This feature does the following: #### Scenario without Declarative `eth_calls` -Imagine you have a subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. +Imagine you have a Subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. Traditionally, these calls might be made sequentially: @@ -498,15 +498,15 @@ Total time taken = max (3, 2, 4) = 4 seconds #### How it Works -1. Declarative Definition: In the subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. +1. Declarative Definition: In the Subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. 2. Parallel Execution Engine: The Graph Node's execution engine recognizes these declarations and runs the calls simultaneously. -3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the subgraph for further processing. +3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the Subgraph for further processing. #### Example Configuration in Subgraph Manifest Declared `eth_calls` can access the `event.address` of the underlying event as well as all the `event.params`. -`Subgraph.yaml` using `event.address`: +`subgraph.yaml` using `event.address`: ```yaml eventHandlers: @@ -524,7 +524,7 @@ Details for the example above: - The text (`Pool[event.address].feeGrowthGlobal0X128()`) is the actual `eth_call` that will be executed, which is in the form of `Contract[address].function(arguments)` - The `address` and `arguments` can be replaced with variables that will be available when the handler is executed. -`Subgraph.yaml` using `event.params` +`subgraph.yaml` using `event.params` ```yaml calls: @@ -535,22 +535,22 @@ calls: > **Note:** it is not recommended to use grafting when initially upgrading to The Graph Network. Learn more [here](/subgraphs/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). -When a subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. +When a Subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing Subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing Subgraph working again after it has failed. -A subgraph is grafted onto a base subgraph when the subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: +A Subgraph is grafted onto a base Subgraph when the Subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: ```yaml description: ... graft: - base: Qm... # Subgraph ID of base subgraph + base: Qm... # Subgraph ID of base Subgraph block: 7345624 # Block number ``` -When a subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` subgraph up to and including the given `block` and then continue indexing the new subgraph from that block on. The base subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted subgraph. +When a Subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` Subgraph up to and including the given `block` and then continue indexing the new Subgraph from that block on. The base Subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted Subgraph. -Protože se při roubování základní data spíše kopírují než indexují, je mnohem rychlejší dostat podgraf do požadovaného bloku než při indexování od nuly, i když počáteční kopírování dat může u velmi velkých podgrafů trvat i několik hodin. Během inicializace roubovaného podgrafu bude uzel Graf Uzel zaznamenávat informace o typů entit, které již byly zkopírovány. +Because grafting copies rather than indexes base data, it is much quicker to get the Subgraph to the desired block than indexing from scratch, though the initial data copy can still take several hours for very large Subgraphs. While the grafted Subgraph is being initialized, the Graph Node will log information about the entity types that have already been copied. -Štěpovaný podgraf může používat schéma GraphQL, které není totožné se schématem základního podgrafu, ale je s ním pouze kompatibilní. Musí to být platné schéma podgrafu jako takové, ale může se od schématu základního podgrafu odchýlit následujícími způsoby: +The grafted Subgraph can use a GraphQL schema that is not identical to the one of the base Subgraph, but merely compatible with it. It has to be a valid Subgraph schema in its own right, but may deviate from the base Subgraph's schema in the following ways: - Přidává nebo odebírá typy entit - Odstraňuje atributy z typů entit @@ -560,4 +560,4 @@ Protože se při roubování základní data spíše kopírují než indexují, - Přidává nebo odebírá rozhraní - Mění se, pro které typy entit je rozhraní implementováno -> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the subgraph manifest. +> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the Subgraph manifest. From ae6376b94f43fd677b42faef7b34fe06d339a294 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:10:12 -0500 Subject: [PATCH 0123/1789] New translations advanced.mdx (German) --- .../developing/creating/advanced.mdx | 78 +++++++++---------- 1 file changed, 39 insertions(+), 39 deletions(-) diff --git a/website/src/pages/de/subgraphs/developing/creating/advanced.mdx b/website/src/pages/de/subgraphs/developing/creating/advanced.mdx index 1a8debdf98c5..99a0b7d10a62 100644 --- a/website/src/pages/de/subgraphs/developing/creating/advanced.mdx +++ b/website/src/pages/de/subgraphs/developing/creating/advanced.mdx @@ -4,9 +4,9 @@ title: Advanced Subgraph Features ## Überblick -Add and implement advanced subgraph features to enhanced your subgraph's built. +Add and implement advanced Subgraph features to enhanced your Subgraph's built. -Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: +Starting from `specVersion` `0.0.4`, Subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: | Feature | Name | | ---------------------------------------------------- | ---------------- | @@ -14,7 +14,7 @@ Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declar | [Full-text Search](#defining-fulltext-search-fields) | `fullTextSearch` | | [Grafting](#grafting-onto-existing-subgraphs) | `grafting` | -For instance, if a subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: +For instance, if a Subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: ```yaml specVersion: 0.0.4 @@ -25,7 +25,7 @@ features: dataSources: ... ``` -> Note that using a feature without declaring it will incur a **validation error** during subgraph deployment, but no errors will occur if a feature is declared but not used. +> Note that using a feature without declaring it will incur a **validation error** during Subgraph deployment, but no errors will occur if a feature is declared but not used. ## Timeseries and Aggregations @@ -33,9 +33,9 @@ Prerequisites: - Subgraph specVersion must be ≥1.1.0. -Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, and more. +Timeseries and aggregations enable your Subgraph to track statistics like daily average price, hourly total transfers, and more. -This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. +This feature introduces two new types of Subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. ### Example Schema @@ -97,11 +97,11 @@ Aggregation entities are automatically calculated on the basis of the specified ## Non-fatal errors -Indexing errors on already synced subgraphs will, by default, cause the subgraph to fail and stop syncing. Subgraphs can alternatively be configured to continue syncing in the presence of errors, by ignoring the changes made by the handler which provoked the error. This gives subgraph authors time to correct their subgraphs while queries continue to be served against the latest block, though the results might be inconsistent due to the bug that caused the error. Note that some errors are still always fatal. To be non-fatal, the error must be known to be deterministic. +Indexing errors on already synced Subgraphs will, by default, cause the Subgraph to fail and stop syncing. Subgraphs can alternatively be configured to continue syncing in the presence of errors, by ignoring the changes made by the handler which provoked the error. This gives Subgraph authors time to correct their Subgraphs while queries continue to be served against the latest block, though the results might be inconsistent due to the bug that caused the error. Note that some errors are still always fatal. To be non-fatal, the error must be known to be deterministic. -> **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy subgraphs using that functionality to the network via the Studio. +> **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy Subgraphs using that functionality to the network via the Studio. -Enabling non-fatal errors requires setting the following feature flag on the subgraph manifest: +Enabling non-fatal errors requires setting the following feature flag on the Subgraph manifest: ```yaml specVersion: 0.0.4 @@ -111,7 +111,7 @@ features: ... ``` -The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the subgraph has skipped over errors, as in the example: +The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the Subgraph has skipped over errors, as in the example: ```graphql foos(first: 100, subgraphError: allow) { @@ -123,7 +123,7 @@ _meta { } ``` -If the subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: +If the Subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: ```graphql "data": { @@ -145,7 +145,7 @@ If the subgraph encounters an error, that query will return both the data and a ## IPFS/Arweave File Data Sources -File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. +File data sources are a new Subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. > This also lays the groundwork for deterministic indexing of off-chain data, as well as the potential introduction of arbitrary HTTP-sourced data. @@ -290,7 +290,7 @@ Example: import { TokenMetadata as TokenMetadataTemplate } from '../generated/templates' const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' -//This example code is for a Crypto coven subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. +//This example code is for a Crypto coven Subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. export function handleTransfer(event: TransferEvent): void { let token = Token.load(event.params.tokenId.toString()) @@ -317,23 +317,23 @@ This will create a new file data source, which will poll Graph Node's configured This example is using the CID as the lookup between the parent `Token` entity and the resulting `TokenMetadata` entity. -> Previously, this is the point at which a subgraph developer would have called `ipfs.cat(CID)` to fetch the file +> Previously, this is the point at which a Subgraph developer would have called `ipfs.cat(CID)` to fetch the file Congratulations, you are using file data sources! -#### Deploying your subgraphs +#### Deploying your Subgraphs -You can now `build` and `deploy` your subgraph to any Graph Node >=v0.30.0-rc.0. +You can now `build` and `deploy` your Subgraph to any Graph Node >=v0.30.0-rc.0. #### Limitations -File data source handlers and entities are isolated from other subgraph entities, ensuring that they are deterministic when executed, and ensuring no contamination of chain-based data sources. To be specific: +File data source handlers and entities are isolated from other Subgraph entities, ensuring that they are deterministic when executed, and ensuring no contamination of chain-based data sources. To be specific: - Entities created by File Data Sources are immutable, and cannot be updated - File Data Source handlers cannot access entities from other file data sources - Entities associated with File Data Sources cannot be accessed by chain-based handlers -> While this constraint should not be problematic for most use-cases, it may introduce complexity for some. Please get in touch via Discord if you are having issues modelling your file-based data in a subgraph! +> While this constraint should not be problematic for most use-cases, it may introduce complexity for some. Please get in touch via Discord if you are having issues modelling your file-based data in a Subgraph! Additionally, it is not possible to create data sources from a file data source, be it an onchain data source or another file data source. This restriction may be lifted in the future. @@ -365,15 +365,15 @@ Handlers for File Data Sources cannot be in files which import `eth_call` contra > **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0` -Topic filters, also known as indexed argument filters, are a powerful feature in subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. +Topic filters, also known as indexed argument filters, are a powerful feature in Subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. -- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing subgraphs to operate more efficiently by focusing only on relevant data. +- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing Subgraphs to operate more efficiently by focusing only on relevant data. -- This is useful for creating personal subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. +- This is useful for creating personal Subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. ### How Topic Filters Work -When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a subgraph's manifest. This allows the subgraph to listen selectively for events that match these indexed arguments. +When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a Subgraph's manifest. This allows the Subgraph to listen selectively for events that match these indexed arguments. - The event's first indexed argument corresponds to `topic1`, the second to `topic2`, and so on, up to `topic3`, since the Ethereum Virtual Machine (EVM) allows up to three indexed arguments per event. @@ -401,7 +401,7 @@ In this example: #### Configuration in Subgraphs -Topic filters are defined directly within the event handler configuration in the subgraph manifest. Here is how they are configured: +Topic filters are defined directly within the event handler configuration in the Subgraph manifest. Here is how they are configured: ```yaml eventHandlers: @@ -436,7 +436,7 @@ In this configuration: - `topic1` is configured to filter `Transfer` events where `0xAddressA` is the sender. - `topic2` is configured to filter `Transfer` events where `0xAddressB` is the receiver. -- The subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. +- The Subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. #### Example 2: Tracking Transactions in Either Direction Between Two or More Addresses @@ -452,17 +452,17 @@ In this configuration: - `topic1` is configured to filter `Transfer` events where `0xAddressA`, `0xAddressB`, `0xAddressC` is the sender. - `topic2` is configured to filter `Transfer` events where `0xAddressB` and `0xAddressC` is the receiver. -- The subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. +- The Subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. ## Declared eth_call > Note: This is an experimental feature that is not currently available in a stable Graph Node release yet. You can only use it in Subgraph Studio or your self-hosted node. -Declarative `eth_calls` are a valuable subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. +Declarative `eth_calls` are a valuable Subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. This feature does the following: -- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the subgraph's overall efficiency. +- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the Subgraph's overall efficiency. - Allows faster data fetching, resulting in quicker query responses and a better user experience. - Reduces wait times for applications that need to aggregate data from multiple Ethereum calls, making the data retrieval process more efficient. @@ -474,7 +474,7 @@ This feature does the following: #### Scenario without Declarative `eth_calls` -Imagine you have a subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. +Imagine you have a Subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. Traditionally, these calls might be made sequentially: @@ -498,15 +498,15 @@ Total time taken = max (3, 2, 4) = 4 seconds #### How it Works -1. Declarative Definition: In the subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. +1. Declarative Definition: In the Subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. 2. Parallel Execution Engine: The Graph Node's execution engine recognizes these declarations and runs the calls simultaneously. -3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the subgraph for further processing. +3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the Subgraph for further processing. #### Example Configuration in Subgraph Manifest Declared `eth_calls` can access the `event.address` of the underlying event as well as all the `event.params`. -`Subgraph.yaml` using `event.address`: +`subgraph.yaml` using `event.address`: ```yaml eventHandlers: @@ -524,7 +524,7 @@ Details for the example above: - The text (`Pool[event.address].feeGrowthGlobal0X128()`) is the actual `eth_call` that will be executed, which is in the form of `Contract[address].function(arguments)` - The `address` and `arguments` can be replaced with variables that will be available when the handler is executed. -`Subgraph.yaml` using `event.params` +`subgraph.yaml` using `event.params` ```yaml calls: @@ -535,22 +535,22 @@ calls: > **Note:** it is not recommended to use grafting when initially upgrading to The Graph Network. Learn more [here](/subgraphs/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). -When a subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. +When a Subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing Subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing Subgraph working again after it has failed. -A subgraph is grafted onto a base subgraph when the subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: +A Subgraph is grafted onto a base Subgraph when the Subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: ```yaml description: ... graft: - base: Qm... # Subgraph ID of base subgraph + base: Qm... # Subgraph ID of base Subgraph block: 7345624 # Block number ``` -When a subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` subgraph up to and including the given `block` and then continue indexing the new subgraph from that block on. The base subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted subgraph. +When a Subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` Subgraph up to and including the given `block` and then continue indexing the new Subgraph from that block on. The base Subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted Subgraph. -Because grafting copies rather than indexes base data, it is much quicker to get the subgraph to the desired block than indexing from scratch, though the initial data copy can still take several hours for very large subgraphs. While the grafted subgraph is being initialized, the Graph Node will log information about the entity types that have already been copied. +Because grafting copies rather than indexes base data, it is much quicker to get the Subgraph to the desired block than indexing from scratch, though the initial data copy can still take several hours for very large Subgraphs. While the grafted Subgraph is being initialized, the Graph Node will log information about the entity types that have already been copied. -The grafted subgraph can use a GraphQL schema that is not identical to the one of the base subgraph, but merely compatible with it. It has to be a valid subgraph schema in its own right, but may deviate from the base subgraph's schema in the following ways: +The grafted Subgraph can use a GraphQL schema that is not identical to the one of the base Subgraph, but merely compatible with it. It has to be a valid Subgraph schema in its own right, but may deviate from the base Subgraph's schema in the following ways: - It adds or removes entity types - It removes attributes from entity types @@ -560,4 +560,4 @@ The grafted subgraph can use a GraphQL schema that is not identical to the one o - It adds or removes interfaces - It changes for which entity types an interface is implemented -> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the subgraph manifest. +> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the Subgraph manifest. From 4b208b6b4f50ce169580ff0bdf1f97e498c8f503 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:10:13 -0500 Subject: [PATCH 0124/1789] New translations advanced.mdx (Italian) --- .../developing/creating/advanced.mdx | 78 +++++++++---------- 1 file changed, 39 insertions(+), 39 deletions(-) diff --git a/website/src/pages/it/subgraphs/developing/creating/advanced.mdx b/website/src/pages/it/subgraphs/developing/creating/advanced.mdx index 94c7d1f0d42d..ded167f1b4d4 100644 --- a/website/src/pages/it/subgraphs/developing/creating/advanced.mdx +++ b/website/src/pages/it/subgraphs/developing/creating/advanced.mdx @@ -4,9 +4,9 @@ title: Advanced Subgraph Features ## Panoramica -Add and implement advanced subgraph features to enhanced your subgraph's built. +Add and implement advanced Subgraph features to enhanced your Subgraph's built. -Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: +Starting from `specVersion` `0.0.4`, Subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: | Feature | Name | | ---------------------------------------------------- | ---------------- | @@ -14,7 +14,7 @@ Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declar | [Full-text Search](#defining-fulltext-search-fields) | `fullTextSearch` | | [Grafting](#grafting-onto-existing-subgraphs) | `grafting` | -For instance, if a subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: +For instance, if a Subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: ```yaml specVersion: 0.0.4 @@ -25,7 +25,7 @@ features: dataSources: ... ``` -> Note that using a feature without declaring it will incur a **validation error** during subgraph deployment, but no errors will occur if a feature is declared but not used. +> Note that using a feature without declaring it will incur a **validation error** during Subgraph deployment, but no errors will occur if a feature is declared but not used. ## Timeseries and Aggregations @@ -33,9 +33,9 @@ Prerequisites: - Subgraph specVersion must be ≥1.1.0. -Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, and more. +Timeseries and aggregations enable your Subgraph to track statistics like daily average price, hourly total transfers, and more. -This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. +This feature introduces two new types of Subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. ### Example Schema @@ -97,11 +97,11 @@ Aggregation entities are automatically calculated on the basis of the specified ## Errori non fatali -Gli errori di indicizzazione su subgraph già sincronizzati causano, per impostazione predefinita, il fallimento del subgraph e l'interruzione della sincronizzazione. In alternativa, i subgraph possono essere configurati per continuare la sincronizzazione in presenza di errori, ignorando le modifiche apportate dal gestore che ha provocato l'errore. In questo modo gli autori dei subgraph hanno il tempo di correggere i loro subgraph mentre le query continuano a essere servite rispetto al blocco più recente, anche se i risultati potrebbero essere incoerenti a causa del bug che ha causato l'errore. Si noti che alcuni errori sono sempre fatali. Per essere non fatale, l'errore deve essere noto come deterministico. +Indexing errors on already synced Subgraphs will, by default, cause the Subgraph to fail and stop syncing. Subgraphs can alternatively be configured to continue syncing in the presence of errors, by ignoring the changes made by the handler which provoked the error. This gives Subgraph authors time to correct their Subgraphs while queries continue to be served against the latest block, though the results might be inconsistent due to the bug that caused the error. Note that some errors are still always fatal. To be non-fatal, the error must be known to be deterministic. -> **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy subgraphs using that functionality to the network via the Studio. +> **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy Subgraphs using that functionality to the network via the Studio. -Per abilitare gli errori non fatali è necessario impostare il seguente flag di caratteristica nel manifesto del subgraph: +Enabling non-fatal errors requires setting the following feature flag on the Subgraph manifest: ```yaml specVersion: 0.0.4 @@ -111,7 +111,7 @@ features: ... ``` -The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the subgraph has skipped over errors, as in the example: +The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the Subgraph has skipped over errors, as in the example: ```graphql foos(first: 100, subgraphError: allow) { @@ -123,7 +123,7 @@ _meta { } ``` -If the subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: +If the Subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: ```graphql "data": { @@ -145,7 +145,7 @@ If the subgraph encounters an error, that query will return both the data and a ## IPFS/Arweave File Data Sources -I data source file sono una nuova funzionalità del subgraph per accedere ai dati fuori chain durante l'indicizzazione in modo robusto ed estendibile. I data source file supportano il recupero di file da IPFS e da Arweave. +File data sources are a new Subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. > Questo pone anche le basi per l'indicizzazione deterministica dei dati fuori chain e per la potenziale introduzione di dati arbitrari provenienti da HTTP. @@ -290,7 +290,7 @@ Example: import { TokenMetadata as TokenMetadataTemplate } from '../generated/templates' const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' -//This example code is for a Crypto coven subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. +//This example code is for a Crypto coven Subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. export function handleTransfer(event: TransferEvent): void { let token = Token.load(event.params.tokenId.toString()) @@ -317,23 +317,23 @@ Questo creerà una nuova data source file, che interrogherà l'endpoint IPFS o A This example is using the CID as the lookup between the parent `Token` entity and the resulting `TokenMetadata` entity. -> Previously, this is the point at which a subgraph developer would have called `ipfs.cat(CID)` to fetch the file +> Previously, this is the point at which a Subgraph developer would have called `ipfs.cat(CID)` to fetch the file Congratulazioni, state usando i data source file! -#### Distribuire i subgraph +#### Deploying your Subgraphs -You can now `build` and `deploy` your subgraph to any Graph Node >=v0.30.0-rc.0. +You can now `build` and `deploy` your Subgraph to any Graph Node >=v0.30.0-rc.0. #### Limitazioni -I gestori e le entità di data source file sono isolati dalle altre entità del subgraph, assicurando che siano deterministici quando vengono eseguiti e garantendo che non ci sia contaminazione di data source basate sulla chain. Per essere precisi: +File data source handlers and entities are isolated from other Subgraph entities, ensuring that they are deterministic when executed, and ensuring no contamination of chain-based data sources. To be specific: - Le entità create di Data Source file sono immutabili e non possono essere aggiornate - I gestori di Data Source file non possono accedere alle entità di altre data source file - Le entità associate al Data Source file non sono accessibili ai gestori alla chain -> Sebbene questo vincolo non dovrebbe essere problematico per la maggior parte dei casi d'uso, potrebbe introdurre complessità per alcuni. Contattate via Discord se avete problemi a modellare i vostri dati basati su file in un subgraph! +> While this constraint should not be problematic for most use-cases, it may introduce complexity for some. Please get in touch via Discord if you are having issues modelling your file-based data in a Subgraph! Inoltre, non è possibile creare data source da una data source file, sia essa una data source onchain o un'altra data source file. Questa restrizione potrebbe essere eliminata in futuro. @@ -365,15 +365,15 @@ Handlers for File Data Sources cannot be in files which import `eth_call` contra > **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0` -Topic filters, also known as indexed argument filters, are a powerful feature in subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. +Topic filters, also known as indexed argument filters, are a powerful feature in Subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. -- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing subgraphs to operate more efficiently by focusing only on relevant data. +- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing Subgraphs to operate more efficiently by focusing only on relevant data. -- This is useful for creating personal subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. +- This is useful for creating personal Subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. ### How Topic Filters Work -When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a subgraph's manifest. This allows the subgraph to listen selectively for events that match these indexed arguments. +When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a Subgraph's manifest. This allows the Subgraph to listen selectively for events that match these indexed arguments. - The event's first indexed argument corresponds to `topic1`, the second to `topic2`, and so on, up to `topic3`, since the Ethereum Virtual Machine (EVM) allows up to three indexed arguments per event. @@ -401,7 +401,7 @@ In this example: #### Configuration in Subgraphs -Topic filters are defined directly within the event handler configuration in the subgraph manifest. Here is how they are configured: +Topic filters are defined directly within the event handler configuration in the Subgraph manifest. Here is how they are configured: ```yaml eventHandlers: @@ -436,7 +436,7 @@ In this configuration: - `topic1` is configured to filter `Transfer` events where `0xAddressA` is the sender. - `topic2` is configured to filter `Transfer` events where `0xAddressB` is the receiver. -- The subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. +- The Subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. #### Example 2: Tracking Transactions in Either Direction Between Two or More Addresses @@ -452,17 +452,17 @@ In this configuration: - `topic1` is configured to filter `Transfer` events where `0xAddressA`, `0xAddressB`, `0xAddressC` is the sender. - `topic2` is configured to filter `Transfer` events where `0xAddressB` and `0xAddressC` is the receiver. -- The subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. +- The Subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. ## Declared eth_call > Note: This is an experimental feature that is not currently available in a stable Graph Node release yet. You can only use it in Subgraph Studio or your self-hosted node. -Declarative `eth_calls` are a valuable subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. +Declarative `eth_calls` are a valuable Subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. This feature does the following: -- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the subgraph's overall efficiency. +- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the Subgraph's overall efficiency. - Allows faster data fetching, resulting in quicker query responses and a better user experience. - Reduces wait times for applications that need to aggregate data from multiple Ethereum calls, making the data retrieval process more efficient. @@ -474,7 +474,7 @@ This feature does the following: #### Scenario without Declarative `eth_calls` -Imagine you have a subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. +Imagine you have a Subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. Traditionally, these calls might be made sequentially: @@ -498,15 +498,15 @@ Total time taken = max (3, 2, 4) = 4 seconds #### How it Works -1. Declarative Definition: In the subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. +1. Declarative Definition: In the Subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. 2. Parallel Execution Engine: The Graph Node's execution engine recognizes these declarations and runs the calls simultaneously. -3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the subgraph for further processing. +3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the Subgraph for further processing. #### Example Configuration in Subgraph Manifest Declared `eth_calls` can access the `event.address` of the underlying event as well as all the `event.params`. -`Subgraph.yaml` using `event.address`: +`subgraph.yaml` using `event.address`: ```yaml eventHandlers: @@ -524,7 +524,7 @@ Details for the example above: - The text (`Pool[event.address].feeGrowthGlobal0X128()`) is the actual `eth_call` that will be executed, which is in the form of `Contract[address].function(arguments)` - The `address` and `arguments` can be replaced with variables that will be available when the handler is executed. -`Subgraph.yaml` using `event.params` +`subgraph.yaml` using `event.params` ```yaml calls: @@ -535,22 +535,22 @@ calls: > **Note:** it is not recommended to use grafting when initially upgrading to The Graph Network. Learn more [here](/subgraphs/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). -When a subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. +When a Subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing Subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing Subgraph working again after it has failed. -A subgraph is grafted onto a base subgraph when the subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: +A Subgraph is grafted onto a base Subgraph when the Subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: ```yaml description: ... graft: - base: Qm... # Subgraph ID of base subgraph + base: Qm... # Subgraph ID of base Subgraph block: 7345624 # Block number ``` -When a subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` subgraph up to and including the given `block` and then continue indexing the new subgraph from that block on. The base subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted subgraph. +When a Subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` Subgraph up to and including the given `block` and then continue indexing the new Subgraph from that block on. The base Subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted Subgraph. -Poiché l'innesto copia piuttosto che indicizzare i dati di base, è molto più veloce portare il subgraph al blocco desiderato rispetto all'indicizzazione da zero, anche se la copia iniziale dei dati può richiedere diverse ore per subgraph molto grandi. Mentre il subgraph innestato viene inizializzato, il Graph Node registra le informazioni sui tipi di entità già copiati. +Because grafting copies rather than indexes base data, it is much quicker to get the Subgraph to the desired block than indexing from scratch, though the initial data copy can still take several hours for very large Subgraphs. While the grafted Subgraph is being initialized, the Graph Node will log information about the entity types that have already been copied. -The grafted subgraph can use a GraphQL schema that is not identical to the one of the base subgraph, but merely compatible with it. It has to be a valid subgraph schema in its own right, but may deviate from the base subgraph's schema in the following ways: +The grafted Subgraph can use a GraphQL schema that is not identical to the one of the base Subgraph, but merely compatible with it. It has to be a valid Subgraph schema in its own right, but may deviate from the base Subgraph's schema in the following ways: - It adds or removes entity types - It removes attributes from entity types @@ -560,4 +560,4 @@ The grafted subgraph can use a GraphQL schema that is not identical to the one o - It adds or removes interfaces - It changes for which entity types an interface is implemented -> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the subgraph manifest. +> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the Subgraph manifest. From deda7b9037149d8ee45d9e10adf89805de810e75 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:10:15 -0500 Subject: [PATCH 0125/1789] New translations advanced.mdx (Japanese) --- .../developing/creating/advanced.mdx | 78 +++++++++---------- 1 file changed, 39 insertions(+), 39 deletions(-) diff --git a/website/src/pages/ja/subgraphs/developing/creating/advanced.mdx b/website/src/pages/ja/subgraphs/developing/creating/advanced.mdx index b6269f49fcf5..1d74808c34b1 100644 --- a/website/src/pages/ja/subgraphs/developing/creating/advanced.mdx +++ b/website/src/pages/ja/subgraphs/developing/creating/advanced.mdx @@ -4,9 +4,9 @@ title: Advanced Subgraph Features ## 概要 -Add and implement advanced subgraph features to enhanced your subgraph's built. +Add and implement advanced Subgraph features to enhanced your Subgraph's built. -Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: +Starting from `specVersion` `0.0.4`, Subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: | Feature | Name | | ---------------------------------------------------- | ---------------- | @@ -14,7 +14,7 @@ Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declar | [Full-text Search](#defining-fulltext-search-fields) | `fullTextSearch` | | [Grafting](#grafting-onto-existing-subgraphs) | `grafting` | -For instance, if a subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: +For instance, if a Subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: ```yaml specVersion: 0.0.4 @@ -25,7 +25,7 @@ features: dataSources: ... ``` -> Note that using a feature without declaring it will incur a **validation error** during subgraph deployment, but no errors will occur if a feature is declared but not used. +> Note that using a feature without declaring it will incur a **validation error** during Subgraph deployment, but no errors will occur if a feature is declared but not used. ## Timeseries and Aggregations @@ -33,9 +33,9 @@ Prerequisites: - Subgraph specVersion must be ≥1.1.0. -Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, and more. +Timeseries and aggregations enable your Subgraph to track statistics like daily average price, hourly total transfers, and more. -This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. +This feature introduces two new types of Subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. ### Example Schema @@ -97,11 +97,11 @@ Aggregation entities are automatically calculated on the basis of the specified ## 致命的でないエラー -すでに同期しているサブグラフのインデックスエラーは、デフォルトではサブグラフを失敗させ、同期を停止させます。サブグラフは、エラーが発生したハンドラーによる変更を無視することで、エラーが発生しても同期を継続するように設定することができます。これにより、サブグラフの作成者はサブグラフを修正する時間を得ることができ、一方でクエリは最新のブロックに対して提供され続けますが、エラーの原因となったバグのために結果が一貫していない可能性があります。なお、エラーの中には常に致命的なものもあり、致命的でないものにするためには、そのエラーが決定論的であることがわかっていなければなりません。 +Indexing errors on already synced Subgraphs will, by default, cause the Subgraph to fail and stop syncing. Subgraphs can alternatively be configured to continue syncing in the presence of errors, by ignoring the changes made by the handler which provoked the error. This gives Subgraph authors time to correct their Subgraphs while queries continue to be served against the latest block, though the results might be inconsistent due to the bug that caused the error. Note that some errors are still always fatal. To be non-fatal, the error must be known to be deterministic. -> **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy subgraphs using that functionality to the network via the Studio. +> **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy Subgraphs using that functionality to the network via the Studio. -非致命的エラーを有効にするには、サブグラフのマニフェストに以下の機能フラグを設定する必要があります: +Enabling non-fatal errors requires setting the following feature flag on the Subgraph manifest: ```yaml specVersion: 0.0.4 @@ -111,7 +111,7 @@ features: ... ``` -The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the subgraph has skipped over errors, as in the example: +The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the Subgraph has skipped over errors, as in the example: ```graphql foos(first: 100, subgraphError: allow) { @@ -123,7 +123,7 @@ _meta { } ``` -If the subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: +If the Subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: ```graphql "data": { @@ -145,7 +145,7 @@ If the subgraph encounters an error, that query will return both the data and a ## IPFS/Arweave File Data Sources -ファイルデータソースは、堅牢で拡張可能な方法でインデックス作成中にオフチェーンデータにアクセスするための新しいサブグラフ機能です。ファイルデータソースは、IPFS および Arweave からのファイルのフェッチをサポートしています。 +File data sources are a new Subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. > また、オフチェーンデータの決定論的なインデックス作成、および任意のHTTPソースデータの導入の可能性についても基礎ができました。 @@ -290,7 +290,7 @@ For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave b import { TokenMetadata as TokenMetadataTemplate } from '../generated/templates' const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' -//This example code is for a Crypto coven subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. +//This example code is for a Crypto coven Subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. export function handleTransfer(event: TransferEvent): void { let token = Token.load(event.params.tokenId.toString()) @@ -317,23 +317,23 @@ export function handleTransfer(event: TransferEvent): void { This example is using the CID as the lookup between the parent `Token` entity and the resulting `TokenMetadata` entity. -> Previously, this is the point at which a subgraph developer would have called `ipfs.cat(CID)` to fetch the file +> Previously, this is the point at which a Subgraph developer would have called `ipfs.cat(CID)` to fetch the file おめでとうございます!ファイルデータソースが使用できます。 -#### サブグラフのデプロイ +#### Deploying your Subgraphs -You can now `build` and `deploy` your subgraph to any Graph Node >=v0.30.0-rc.0. +You can now `build` and `deploy` your Subgraph to any Graph Node >=v0.30.0-rc.0. #### 制限事項 -ファイルデータソースハンドラおよびエンティティは、他のサブグラフエンティティから分離され、実行時に決定論的であることを保証し、チェーンベースのデータソースを汚染しないことを保証します。具体的には、以下の通りです。 +File data source handlers and entities are isolated from other Subgraph entities, ensuring that they are deterministic when executed, and ensuring no contamination of chain-based data sources. To be specific: - ファイルデータソースで作成されたエンティティは不変であり、更新することはできません。 - ファイルデータソースハンドラは、他のファイルデータソースのエンティティにアクセスすることはできません。 - ファイルデータソースに関連するエンティティは、チェーンベースハンドラーからアクセスできません。 -> この制約は、ほとんどのユースケースで問題になることはありませんが、一部のユースケースでは複雑さをもたらすかもしれません。ファイルベースのデータをサブグラフでモデル化する際に問題がある場合は、Discordを通じてご連絡ください。 +> While this constraint should not be problematic for most use-cases, it may introduce complexity for some. Please get in touch via Discord if you are having issues modelling your file-based data in a Subgraph! また、オンチェーンデータソースや他のファイルデータソースからデータソースを作成することはできません。この制限は、将来的に解除される可能性があります。 @@ -365,15 +365,15 @@ Handlers for File Data Sources cannot be in files which import `eth_call` contra > **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0` -Topic filters, also known as indexed argument filters, are a powerful feature in subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. +Topic filters, also known as indexed argument filters, are a powerful feature in Subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. -- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing subgraphs to operate more efficiently by focusing only on relevant data. +- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing Subgraphs to operate more efficiently by focusing only on relevant data. -- This is useful for creating personal subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. +- This is useful for creating personal Subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. ### How Topic Filters Work -When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a subgraph's manifest. This allows the subgraph to listen selectively for events that match these indexed arguments. +When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a Subgraph's manifest. This allows the Subgraph to listen selectively for events that match these indexed arguments. - The event's first indexed argument corresponds to `topic1`, the second to `topic2`, and so on, up to `topic3`, since the Ethereum Virtual Machine (EVM) allows up to three indexed arguments per event. @@ -401,7 +401,7 @@ In this example: #### Configuration in Subgraphs -Topic filters are defined directly within the event handler configuration in the subgraph manifest. Here is how they are configured: +Topic filters are defined directly within the event handler configuration in the Subgraph manifest. Here is how they are configured: ```yaml eventHandlers: @@ -436,7 +436,7 @@ In this configuration: - `topic1` is configured to filter `Transfer` events where `0xAddressA` is the sender. - `topic2` is configured to filter `Transfer` events where `0xAddressB` is the receiver. -- The subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. +- The Subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. #### Example 2: Tracking Transactions in Either Direction Between Two or More Addresses @@ -452,17 +452,17 @@ In this configuration: - `topic1` is configured to filter `Transfer` events where `0xAddressA`, `0xAddressB`, `0xAddressC` is the sender. - `topic2` is configured to filter `Transfer` events where `0xAddressB` and `0xAddressC` is the receiver. -- The subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. +- The Subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. ## Declared eth_call > Note: This is an experimental feature that is not currently available in a stable Graph Node release yet. You can only use it in Subgraph Studio or your self-hosted node. -Declarative `eth_calls` are a valuable subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. +Declarative `eth_calls` are a valuable Subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. This feature does the following: -- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the subgraph's overall efficiency. +- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the Subgraph's overall efficiency. - Allows faster data fetching, resulting in quicker query responses and a better user experience. - Reduces wait times for applications that need to aggregate data from multiple Ethereum calls, making the data retrieval process more efficient. @@ -474,7 +474,7 @@ This feature does the following: #### Scenario without Declarative `eth_calls` -Imagine you have a subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. +Imagine you have a Subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. Traditionally, these calls might be made sequentially: @@ -498,15 +498,15 @@ Total time taken = max (3, 2, 4) = 4 seconds #### How it Works -1. Declarative Definition: In the subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. +1. Declarative Definition: In the Subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. 2. Parallel Execution Engine: The Graph Node's execution engine recognizes these declarations and runs the calls simultaneously. -3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the subgraph for further processing. +3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the Subgraph for further processing. #### Example Configuration in Subgraph Manifest Declared `eth_calls` can access the `event.address` of the underlying event as well as all the `event.params`. -`Subgraph.yaml` using `event.address`: +`subgraph.yaml` using `event.address`: ```yaml eventHandlers: @@ -524,7 +524,7 @@ Details for the example above: - The text (`Pool[event.address].feeGrowthGlobal0X128()`) is the actual `eth_call` that will be executed, which is in the form of `Contract[address].function(arguments)` - The `address` and `arguments` can be replaced with variables that will be available when the handler is executed. -`Subgraph.yaml` using `event.params` +`subgraph.yaml` using `event.params` ```yaml calls: @@ -535,22 +535,22 @@ calls: > **Note:** it is not recommended to use grafting when initially upgrading to The Graph Network. Learn more [here](/subgraphs/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). -When a subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. +When a Subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing Subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing Subgraph working again after it has failed. -A subgraph is grafted onto a base subgraph when the subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: +A Subgraph is grafted onto a base Subgraph when the Subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: ```yaml description: ... graft: - base: Qm... # Subgraph ID of base subgraph + base: Qm... # Subgraph ID of base Subgraph block: 7345624 # Block number ``` -When a subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` subgraph up to and including the given `block` and then continue indexing the new subgraph from that block on. The base subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted subgraph. +When a Subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` Subgraph up to and including the given `block` and then continue indexing the new Subgraph from that block on. The base Subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted Subgraph. -グラフトはベースデータのインデックスではなくコピーを行うため、スクラッチからインデックスを作成するよりもサブグラフを目的のブロックに早く到達させることができますが、非常に大きなサブグラフの場合は最初のデータコピーに数時間かかることもあります。グラフトされたサブグラフが初期化されている間、グラフノードは既にコピーされたエンティティタイプに関する情報を記録します。 +Because grafting copies rather than indexes base data, it is much quicker to get the Subgraph to the desired block than indexing from scratch, though the initial data copy can still take several hours for very large Subgraphs. While the grafted Subgraph is being initialized, the Graph Node will log information about the entity types that have already been copied. -グラフト化されたサブグラフは、ベースとなるサブグラフのスキーマと同一ではなく、単に互換性のある GraphQL スキーマを使用することができます。また、それ自体は有効なサブグラフのスキーマでなければなりませんが、以下の方法でベースサブグラフのスキーマから逸脱することができます。 +The grafted Subgraph can use a GraphQL schema that is not identical to the one of the base Subgraph, but merely compatible with it. It has to be a valid Subgraph schema in its own right, but may deviate from the base Subgraph's schema in the following ways: - エンティティタイプを追加または削除する - エンティティタイプから属性を削除する @@ -560,4 +560,4 @@ When a subgraph whose manifest contains a `graft` block is deployed, Graph Node - インターフェースの追加または削除 - インターフェースがどのエンティティタイプに実装されるかを変更する -> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the subgraph manifest. +> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the Subgraph manifest. From e1da2894514e7a5b6ada449368daec09d34aad97 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:10:16 -0500 Subject: [PATCH 0126/1789] New translations advanced.mdx (Korean) --- .../developing/creating/advanced.mdx | 78 +++++++++---------- 1 file changed, 39 insertions(+), 39 deletions(-) diff --git a/website/src/pages/ko/subgraphs/developing/creating/advanced.mdx b/website/src/pages/ko/subgraphs/developing/creating/advanced.mdx index ee9918f5f254..d19755ac0876 100644 --- a/website/src/pages/ko/subgraphs/developing/creating/advanced.mdx +++ b/website/src/pages/ko/subgraphs/developing/creating/advanced.mdx @@ -4,9 +4,9 @@ title: Advanced Subgraph Features ## Overview -Add and implement advanced subgraph features to enhanced your subgraph's built. +Add and implement advanced Subgraph features to enhanced your Subgraph's built. -Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: +Starting from `specVersion` `0.0.4`, Subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: | Feature | Name | | ---------------------------------------------------- | ---------------- | @@ -14,7 +14,7 @@ Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declar | [Full-text Search](#defining-fulltext-search-fields) | `fullTextSearch` | | [Grafting](#grafting-onto-existing-subgraphs) | `grafting` | -For instance, if a subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: +For instance, if a Subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: ```yaml specVersion: 0.0.4 @@ -25,7 +25,7 @@ features: dataSources: ... ``` -> Note that using a feature without declaring it will incur a **validation error** during subgraph deployment, but no errors will occur if a feature is declared but not used. +> Note that using a feature without declaring it will incur a **validation error** during Subgraph deployment, but no errors will occur if a feature is declared but not used. ## Timeseries and Aggregations @@ -33,9 +33,9 @@ Prerequisites: - Subgraph specVersion must be ≥1.1.0. -Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, and more. +Timeseries and aggregations enable your Subgraph to track statistics like daily average price, hourly total transfers, and more. -This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. +This feature introduces two new types of Subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. ### Example Schema @@ -97,11 +97,11 @@ Aggregation entities are automatically calculated on the basis of the specified ## Non-fatal errors -Indexing errors on already synced subgraphs will, by default, cause the subgraph to fail and stop syncing. Subgraphs can alternatively be configured to continue syncing in the presence of errors, by ignoring the changes made by the handler which provoked the error. This gives subgraph authors time to correct their subgraphs while queries continue to be served against the latest block, though the results might be inconsistent due to the bug that caused the error. Note that some errors are still always fatal. To be non-fatal, the error must be known to be deterministic. +Indexing errors on already synced Subgraphs will, by default, cause the Subgraph to fail and stop syncing. Subgraphs can alternatively be configured to continue syncing in the presence of errors, by ignoring the changes made by the handler which provoked the error. This gives Subgraph authors time to correct their Subgraphs while queries continue to be served against the latest block, though the results might be inconsistent due to the bug that caused the error. Note that some errors are still always fatal. To be non-fatal, the error must be known to be deterministic. -> **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy subgraphs using that functionality to the network via the Studio. +> **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy Subgraphs using that functionality to the network via the Studio. -Enabling non-fatal errors requires setting the following feature flag on the subgraph manifest: +Enabling non-fatal errors requires setting the following feature flag on the Subgraph manifest: ```yaml specVersion: 0.0.4 @@ -111,7 +111,7 @@ features: ... ``` -The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the subgraph has skipped over errors, as in the example: +The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the Subgraph has skipped over errors, as in the example: ```graphql foos(first: 100, subgraphError: allow) { @@ -123,7 +123,7 @@ _meta { } ``` -If the subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: +If the Subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: ```graphql "data": { @@ -145,7 +145,7 @@ If the subgraph encounters an error, that query will return both the data and a ## IPFS/Arweave File Data Sources -File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. +File data sources are a new Subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. > This also lays the groundwork for deterministic indexing of off-chain data, as well as the potential introduction of arbitrary HTTP-sourced data. @@ -290,7 +290,7 @@ Example: import { TokenMetadata as TokenMetadataTemplate } from '../generated/templates' const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' -//This example code is for a Crypto coven subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. +//This example code is for a Crypto coven Subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. export function handleTransfer(event: TransferEvent): void { let token = Token.load(event.params.tokenId.toString()) @@ -317,23 +317,23 @@ This will create a new file data source, which will poll Graph Node's configured This example is using the CID as the lookup between the parent `Token` entity and the resulting `TokenMetadata` entity. -> Previously, this is the point at which a subgraph developer would have called `ipfs.cat(CID)` to fetch the file +> Previously, this is the point at which a Subgraph developer would have called `ipfs.cat(CID)` to fetch the file Congratulations, you are using file data sources! -#### Deploying your subgraphs +#### Deploying your Subgraphs -You can now `build` and `deploy` your subgraph to any Graph Node >=v0.30.0-rc.0. +You can now `build` and `deploy` your Subgraph to any Graph Node >=v0.30.0-rc.0. #### Limitations -File data source handlers and entities are isolated from other subgraph entities, ensuring that they are deterministic when executed, and ensuring no contamination of chain-based data sources. To be specific: +File data source handlers and entities are isolated from other Subgraph entities, ensuring that they are deterministic when executed, and ensuring no contamination of chain-based data sources. To be specific: - Entities created by File Data Sources are immutable, and cannot be updated - File Data Source handlers cannot access entities from other file data sources - Entities associated with File Data Sources cannot be accessed by chain-based handlers -> While this constraint should not be problematic for most use-cases, it may introduce complexity for some. Please get in touch via Discord if you are having issues modelling your file-based data in a subgraph! +> While this constraint should not be problematic for most use-cases, it may introduce complexity for some. Please get in touch via Discord if you are having issues modelling your file-based data in a Subgraph! Additionally, it is not possible to create data sources from a file data source, be it an onchain data source or another file data source. This restriction may be lifted in the future. @@ -365,15 +365,15 @@ Handlers for File Data Sources cannot be in files which import `eth_call` contra > **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0` -Topic filters, also known as indexed argument filters, are a powerful feature in subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. +Topic filters, also known as indexed argument filters, are a powerful feature in Subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. -- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing subgraphs to operate more efficiently by focusing only on relevant data. +- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing Subgraphs to operate more efficiently by focusing only on relevant data. -- This is useful for creating personal subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. +- This is useful for creating personal Subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. ### How Topic Filters Work -When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a subgraph's manifest. This allows the subgraph to listen selectively for events that match these indexed arguments. +When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a Subgraph's manifest. This allows the Subgraph to listen selectively for events that match these indexed arguments. - The event's first indexed argument corresponds to `topic1`, the second to `topic2`, and so on, up to `topic3`, since the Ethereum Virtual Machine (EVM) allows up to three indexed arguments per event. @@ -401,7 +401,7 @@ In this example: #### Configuration in Subgraphs -Topic filters are defined directly within the event handler configuration in the subgraph manifest. Here is how they are configured: +Topic filters are defined directly within the event handler configuration in the Subgraph manifest. Here is how they are configured: ```yaml eventHandlers: @@ -436,7 +436,7 @@ In this configuration: - `topic1` is configured to filter `Transfer` events where `0xAddressA` is the sender. - `topic2` is configured to filter `Transfer` events where `0xAddressB` is the receiver. -- The subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. +- The Subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. #### Example 2: Tracking Transactions in Either Direction Between Two or More Addresses @@ -452,17 +452,17 @@ In this configuration: - `topic1` is configured to filter `Transfer` events where `0xAddressA`, `0xAddressB`, `0xAddressC` is the sender. - `topic2` is configured to filter `Transfer` events where `0xAddressB` and `0xAddressC` is the receiver. -- The subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. +- The Subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. ## Declared eth_call > Note: This is an experimental feature that is not currently available in a stable Graph Node release yet. You can only use it in Subgraph Studio or your self-hosted node. -Declarative `eth_calls` are a valuable subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. +Declarative `eth_calls` are a valuable Subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. This feature does the following: -- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the subgraph's overall efficiency. +- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the Subgraph's overall efficiency. - Allows faster data fetching, resulting in quicker query responses and a better user experience. - Reduces wait times for applications that need to aggregate data from multiple Ethereum calls, making the data retrieval process more efficient. @@ -474,7 +474,7 @@ This feature does the following: #### Scenario without Declarative `eth_calls` -Imagine you have a subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. +Imagine you have a Subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. Traditionally, these calls might be made sequentially: @@ -498,15 +498,15 @@ Total time taken = max (3, 2, 4) = 4 seconds #### How it Works -1. Declarative Definition: In the subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. +1. Declarative Definition: In the Subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. 2. Parallel Execution Engine: The Graph Node's execution engine recognizes these declarations and runs the calls simultaneously. -3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the subgraph for further processing. +3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the Subgraph for further processing. #### Example Configuration in Subgraph Manifest Declared `eth_calls` can access the `event.address` of the underlying event as well as all the `event.params`. -`Subgraph.yaml` using `event.address`: +`subgraph.yaml` using `event.address`: ```yaml eventHandlers: @@ -524,7 +524,7 @@ Details for the example above: - The text (`Pool[event.address].feeGrowthGlobal0X128()`) is the actual `eth_call` that will be executed, which is in the form of `Contract[address].function(arguments)` - The `address` and `arguments` can be replaced with variables that will be available when the handler is executed. -`Subgraph.yaml` using `event.params` +`subgraph.yaml` using `event.params` ```yaml calls: @@ -535,22 +535,22 @@ calls: > **Note:** it is not recommended to use grafting when initially upgrading to The Graph Network. Learn more [here](/subgraphs/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). -When a subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. +When a Subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing Subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing Subgraph working again after it has failed. -A subgraph is grafted onto a base subgraph when the subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: +A Subgraph is grafted onto a base Subgraph when the Subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: ```yaml description: ... graft: - base: Qm... # Subgraph ID of base subgraph + base: Qm... # Subgraph ID of base Subgraph block: 7345624 # Block number ``` -When a subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` subgraph up to and including the given `block` and then continue indexing the new subgraph from that block on. The base subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted subgraph. +When a Subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` Subgraph up to and including the given `block` and then continue indexing the new Subgraph from that block on. The base Subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted Subgraph. -Because grafting copies rather than indexes base data, it is much quicker to get the subgraph to the desired block than indexing from scratch, though the initial data copy can still take several hours for very large subgraphs. While the grafted subgraph is being initialized, the Graph Node will log information about the entity types that have already been copied. +Because grafting copies rather than indexes base data, it is much quicker to get the Subgraph to the desired block than indexing from scratch, though the initial data copy can still take several hours for very large Subgraphs. While the grafted Subgraph is being initialized, the Graph Node will log information about the entity types that have already been copied. -The grafted subgraph can use a GraphQL schema that is not identical to the one of the base subgraph, but merely compatible with it. It has to be a valid subgraph schema in its own right, but may deviate from the base subgraph's schema in the following ways: +The grafted Subgraph can use a GraphQL schema that is not identical to the one of the base Subgraph, but merely compatible with it. It has to be a valid Subgraph schema in its own right, but may deviate from the base Subgraph's schema in the following ways: - It adds or removes entity types - It removes attributes from entity types @@ -560,4 +560,4 @@ The grafted subgraph can use a GraphQL schema that is not identical to the one o - It adds or removes interfaces - It changes for which entity types an interface is implemented -> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the subgraph manifest. +> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the Subgraph manifest. From 98dfaa3bf10c8de13ad9164ced3baa1aeb802cfa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:10:17 -0500 Subject: [PATCH 0127/1789] New translations advanced.mdx (Dutch) --- .../developing/creating/advanced.mdx | 78 +++++++++---------- 1 file changed, 39 insertions(+), 39 deletions(-) diff --git a/website/src/pages/nl/subgraphs/developing/creating/advanced.mdx b/website/src/pages/nl/subgraphs/developing/creating/advanced.mdx index ee9918f5f254..d19755ac0876 100644 --- a/website/src/pages/nl/subgraphs/developing/creating/advanced.mdx +++ b/website/src/pages/nl/subgraphs/developing/creating/advanced.mdx @@ -4,9 +4,9 @@ title: Advanced Subgraph Features ## Overview -Add and implement advanced subgraph features to enhanced your subgraph's built. +Add and implement advanced Subgraph features to enhanced your Subgraph's built. -Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: +Starting from `specVersion` `0.0.4`, Subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: | Feature | Name | | ---------------------------------------------------- | ---------------- | @@ -14,7 +14,7 @@ Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declar | [Full-text Search](#defining-fulltext-search-fields) | `fullTextSearch` | | [Grafting](#grafting-onto-existing-subgraphs) | `grafting` | -For instance, if a subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: +For instance, if a Subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: ```yaml specVersion: 0.0.4 @@ -25,7 +25,7 @@ features: dataSources: ... ``` -> Note that using a feature without declaring it will incur a **validation error** during subgraph deployment, but no errors will occur if a feature is declared but not used. +> Note that using a feature without declaring it will incur a **validation error** during Subgraph deployment, but no errors will occur if a feature is declared but not used. ## Timeseries and Aggregations @@ -33,9 +33,9 @@ Prerequisites: - Subgraph specVersion must be ≥1.1.0. -Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, and more. +Timeseries and aggregations enable your Subgraph to track statistics like daily average price, hourly total transfers, and more. -This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. +This feature introduces two new types of Subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. ### Example Schema @@ -97,11 +97,11 @@ Aggregation entities are automatically calculated on the basis of the specified ## Non-fatal errors -Indexing errors on already synced subgraphs will, by default, cause the subgraph to fail and stop syncing. Subgraphs can alternatively be configured to continue syncing in the presence of errors, by ignoring the changes made by the handler which provoked the error. This gives subgraph authors time to correct their subgraphs while queries continue to be served against the latest block, though the results might be inconsistent due to the bug that caused the error. Note that some errors are still always fatal. To be non-fatal, the error must be known to be deterministic. +Indexing errors on already synced Subgraphs will, by default, cause the Subgraph to fail and stop syncing. Subgraphs can alternatively be configured to continue syncing in the presence of errors, by ignoring the changes made by the handler which provoked the error. This gives Subgraph authors time to correct their Subgraphs while queries continue to be served against the latest block, though the results might be inconsistent due to the bug that caused the error. Note that some errors are still always fatal. To be non-fatal, the error must be known to be deterministic. -> **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy subgraphs using that functionality to the network via the Studio. +> **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy Subgraphs using that functionality to the network via the Studio. -Enabling non-fatal errors requires setting the following feature flag on the subgraph manifest: +Enabling non-fatal errors requires setting the following feature flag on the Subgraph manifest: ```yaml specVersion: 0.0.4 @@ -111,7 +111,7 @@ features: ... ``` -The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the subgraph has skipped over errors, as in the example: +The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the Subgraph has skipped over errors, as in the example: ```graphql foos(first: 100, subgraphError: allow) { @@ -123,7 +123,7 @@ _meta { } ``` -If the subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: +If the Subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: ```graphql "data": { @@ -145,7 +145,7 @@ If the subgraph encounters an error, that query will return both the data and a ## IPFS/Arweave File Data Sources -File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. +File data sources are a new Subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. > This also lays the groundwork for deterministic indexing of off-chain data, as well as the potential introduction of arbitrary HTTP-sourced data. @@ -290,7 +290,7 @@ Example: import { TokenMetadata as TokenMetadataTemplate } from '../generated/templates' const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' -//This example code is for a Crypto coven subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. +//This example code is for a Crypto coven Subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. export function handleTransfer(event: TransferEvent): void { let token = Token.load(event.params.tokenId.toString()) @@ -317,23 +317,23 @@ This will create a new file data source, which will poll Graph Node's configured This example is using the CID as the lookup between the parent `Token` entity and the resulting `TokenMetadata` entity. -> Previously, this is the point at which a subgraph developer would have called `ipfs.cat(CID)` to fetch the file +> Previously, this is the point at which a Subgraph developer would have called `ipfs.cat(CID)` to fetch the file Congratulations, you are using file data sources! -#### Deploying your subgraphs +#### Deploying your Subgraphs -You can now `build` and `deploy` your subgraph to any Graph Node >=v0.30.0-rc.0. +You can now `build` and `deploy` your Subgraph to any Graph Node >=v0.30.0-rc.0. #### Limitations -File data source handlers and entities are isolated from other subgraph entities, ensuring that they are deterministic when executed, and ensuring no contamination of chain-based data sources. To be specific: +File data source handlers and entities are isolated from other Subgraph entities, ensuring that they are deterministic when executed, and ensuring no contamination of chain-based data sources. To be specific: - Entities created by File Data Sources are immutable, and cannot be updated - File Data Source handlers cannot access entities from other file data sources - Entities associated with File Data Sources cannot be accessed by chain-based handlers -> While this constraint should not be problematic for most use-cases, it may introduce complexity for some. Please get in touch via Discord if you are having issues modelling your file-based data in a subgraph! +> While this constraint should not be problematic for most use-cases, it may introduce complexity for some. Please get in touch via Discord if you are having issues modelling your file-based data in a Subgraph! Additionally, it is not possible to create data sources from a file data source, be it an onchain data source or another file data source. This restriction may be lifted in the future. @@ -365,15 +365,15 @@ Handlers for File Data Sources cannot be in files which import `eth_call` contra > **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0` -Topic filters, also known as indexed argument filters, are a powerful feature in subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. +Topic filters, also known as indexed argument filters, are a powerful feature in Subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. -- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing subgraphs to operate more efficiently by focusing only on relevant data. +- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing Subgraphs to operate more efficiently by focusing only on relevant data. -- This is useful for creating personal subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. +- This is useful for creating personal Subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. ### How Topic Filters Work -When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a subgraph's manifest. This allows the subgraph to listen selectively for events that match these indexed arguments. +When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a Subgraph's manifest. This allows the Subgraph to listen selectively for events that match these indexed arguments. - The event's first indexed argument corresponds to `topic1`, the second to `topic2`, and so on, up to `topic3`, since the Ethereum Virtual Machine (EVM) allows up to three indexed arguments per event. @@ -401,7 +401,7 @@ In this example: #### Configuration in Subgraphs -Topic filters are defined directly within the event handler configuration in the subgraph manifest. Here is how they are configured: +Topic filters are defined directly within the event handler configuration in the Subgraph manifest. Here is how they are configured: ```yaml eventHandlers: @@ -436,7 +436,7 @@ In this configuration: - `topic1` is configured to filter `Transfer` events where `0xAddressA` is the sender. - `topic2` is configured to filter `Transfer` events where `0xAddressB` is the receiver. -- The subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. +- The Subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. #### Example 2: Tracking Transactions in Either Direction Between Two or More Addresses @@ -452,17 +452,17 @@ In this configuration: - `topic1` is configured to filter `Transfer` events where `0xAddressA`, `0xAddressB`, `0xAddressC` is the sender. - `topic2` is configured to filter `Transfer` events where `0xAddressB` and `0xAddressC` is the receiver. -- The subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. +- The Subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. ## Declared eth_call > Note: This is an experimental feature that is not currently available in a stable Graph Node release yet. You can only use it in Subgraph Studio or your self-hosted node. -Declarative `eth_calls` are a valuable subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. +Declarative `eth_calls` are a valuable Subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. This feature does the following: -- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the subgraph's overall efficiency. +- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the Subgraph's overall efficiency. - Allows faster data fetching, resulting in quicker query responses and a better user experience. - Reduces wait times for applications that need to aggregate data from multiple Ethereum calls, making the data retrieval process more efficient. @@ -474,7 +474,7 @@ This feature does the following: #### Scenario without Declarative `eth_calls` -Imagine you have a subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. +Imagine you have a Subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. Traditionally, these calls might be made sequentially: @@ -498,15 +498,15 @@ Total time taken = max (3, 2, 4) = 4 seconds #### How it Works -1. Declarative Definition: In the subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. +1. Declarative Definition: In the Subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. 2. Parallel Execution Engine: The Graph Node's execution engine recognizes these declarations and runs the calls simultaneously. -3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the subgraph for further processing. +3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the Subgraph for further processing. #### Example Configuration in Subgraph Manifest Declared `eth_calls` can access the `event.address` of the underlying event as well as all the `event.params`. -`Subgraph.yaml` using `event.address`: +`subgraph.yaml` using `event.address`: ```yaml eventHandlers: @@ -524,7 +524,7 @@ Details for the example above: - The text (`Pool[event.address].feeGrowthGlobal0X128()`) is the actual `eth_call` that will be executed, which is in the form of `Contract[address].function(arguments)` - The `address` and `arguments` can be replaced with variables that will be available when the handler is executed. -`Subgraph.yaml` using `event.params` +`subgraph.yaml` using `event.params` ```yaml calls: @@ -535,22 +535,22 @@ calls: > **Note:** it is not recommended to use grafting when initially upgrading to The Graph Network. Learn more [here](/subgraphs/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). -When a subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. +When a Subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing Subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing Subgraph working again after it has failed. -A subgraph is grafted onto a base subgraph when the subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: +A Subgraph is grafted onto a base Subgraph when the Subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: ```yaml description: ... graft: - base: Qm... # Subgraph ID of base subgraph + base: Qm... # Subgraph ID of base Subgraph block: 7345624 # Block number ``` -When a subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` subgraph up to and including the given `block` and then continue indexing the new subgraph from that block on. The base subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted subgraph. +When a Subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` Subgraph up to and including the given `block` and then continue indexing the new Subgraph from that block on. The base Subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted Subgraph. -Because grafting copies rather than indexes base data, it is much quicker to get the subgraph to the desired block than indexing from scratch, though the initial data copy can still take several hours for very large subgraphs. While the grafted subgraph is being initialized, the Graph Node will log information about the entity types that have already been copied. +Because grafting copies rather than indexes base data, it is much quicker to get the Subgraph to the desired block than indexing from scratch, though the initial data copy can still take several hours for very large Subgraphs. While the grafted Subgraph is being initialized, the Graph Node will log information about the entity types that have already been copied. -The grafted subgraph can use a GraphQL schema that is not identical to the one of the base subgraph, but merely compatible with it. It has to be a valid subgraph schema in its own right, but may deviate from the base subgraph's schema in the following ways: +The grafted Subgraph can use a GraphQL schema that is not identical to the one of the base Subgraph, but merely compatible with it. It has to be a valid Subgraph schema in its own right, but may deviate from the base Subgraph's schema in the following ways: - It adds or removes entity types - It removes attributes from entity types @@ -560,4 +560,4 @@ The grafted subgraph can use a GraphQL schema that is not identical to the one o - It adds or removes interfaces - It changes for which entity types an interface is implemented -> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the subgraph manifest. +> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the Subgraph manifest. From 12327bff4248780100af9d503f861195c524bb93 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:10:19 -0500 Subject: [PATCH 0128/1789] New translations advanced.mdx (Polish) --- .../developing/creating/advanced.mdx | 78 +++++++++---------- 1 file changed, 39 insertions(+), 39 deletions(-) diff --git a/website/src/pages/pl/subgraphs/developing/creating/advanced.mdx b/website/src/pages/pl/subgraphs/developing/creating/advanced.mdx index ee9918f5f254..d19755ac0876 100644 --- a/website/src/pages/pl/subgraphs/developing/creating/advanced.mdx +++ b/website/src/pages/pl/subgraphs/developing/creating/advanced.mdx @@ -4,9 +4,9 @@ title: Advanced Subgraph Features ## Overview -Add and implement advanced subgraph features to enhanced your subgraph's built. +Add and implement advanced Subgraph features to enhanced your Subgraph's built. -Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: +Starting from `specVersion` `0.0.4`, Subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: | Feature | Name | | ---------------------------------------------------- | ---------------- | @@ -14,7 +14,7 @@ Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declar | [Full-text Search](#defining-fulltext-search-fields) | `fullTextSearch` | | [Grafting](#grafting-onto-existing-subgraphs) | `grafting` | -For instance, if a subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: +For instance, if a Subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: ```yaml specVersion: 0.0.4 @@ -25,7 +25,7 @@ features: dataSources: ... ``` -> Note that using a feature without declaring it will incur a **validation error** during subgraph deployment, but no errors will occur if a feature is declared but not used. +> Note that using a feature without declaring it will incur a **validation error** during Subgraph deployment, but no errors will occur if a feature is declared but not used. ## Timeseries and Aggregations @@ -33,9 +33,9 @@ Prerequisites: - Subgraph specVersion must be ≥1.1.0. -Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, and more. +Timeseries and aggregations enable your Subgraph to track statistics like daily average price, hourly total transfers, and more. -This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. +This feature introduces two new types of Subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. ### Example Schema @@ -97,11 +97,11 @@ Aggregation entities are automatically calculated on the basis of the specified ## Non-fatal errors -Indexing errors on already synced subgraphs will, by default, cause the subgraph to fail and stop syncing. Subgraphs can alternatively be configured to continue syncing in the presence of errors, by ignoring the changes made by the handler which provoked the error. This gives subgraph authors time to correct their subgraphs while queries continue to be served against the latest block, though the results might be inconsistent due to the bug that caused the error. Note that some errors are still always fatal. To be non-fatal, the error must be known to be deterministic. +Indexing errors on already synced Subgraphs will, by default, cause the Subgraph to fail and stop syncing. Subgraphs can alternatively be configured to continue syncing in the presence of errors, by ignoring the changes made by the handler which provoked the error. This gives Subgraph authors time to correct their Subgraphs while queries continue to be served against the latest block, though the results might be inconsistent due to the bug that caused the error. Note that some errors are still always fatal. To be non-fatal, the error must be known to be deterministic. -> **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy subgraphs using that functionality to the network via the Studio. +> **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy Subgraphs using that functionality to the network via the Studio. -Enabling non-fatal errors requires setting the following feature flag on the subgraph manifest: +Enabling non-fatal errors requires setting the following feature flag on the Subgraph manifest: ```yaml specVersion: 0.0.4 @@ -111,7 +111,7 @@ features: ... ``` -The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the subgraph has skipped over errors, as in the example: +The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the Subgraph has skipped over errors, as in the example: ```graphql foos(first: 100, subgraphError: allow) { @@ -123,7 +123,7 @@ _meta { } ``` -If the subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: +If the Subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: ```graphql "data": { @@ -145,7 +145,7 @@ If the subgraph encounters an error, that query will return both the data and a ## IPFS/Arweave File Data Sources -File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. +File data sources are a new Subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. > This also lays the groundwork for deterministic indexing of off-chain data, as well as the potential introduction of arbitrary HTTP-sourced data. @@ -290,7 +290,7 @@ Example: import { TokenMetadata as TokenMetadataTemplate } from '../generated/templates' const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' -//This example code is for a Crypto coven subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. +//This example code is for a Crypto coven Subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. export function handleTransfer(event: TransferEvent): void { let token = Token.load(event.params.tokenId.toString()) @@ -317,23 +317,23 @@ This will create a new file data source, which will poll Graph Node's configured This example is using the CID as the lookup between the parent `Token` entity and the resulting `TokenMetadata` entity. -> Previously, this is the point at which a subgraph developer would have called `ipfs.cat(CID)` to fetch the file +> Previously, this is the point at which a Subgraph developer would have called `ipfs.cat(CID)` to fetch the file Congratulations, you are using file data sources! -#### Deploying your subgraphs +#### Deploying your Subgraphs -You can now `build` and `deploy` your subgraph to any Graph Node >=v0.30.0-rc.0. +You can now `build` and `deploy` your Subgraph to any Graph Node >=v0.30.0-rc.0. #### Limitations -File data source handlers and entities are isolated from other subgraph entities, ensuring that they are deterministic when executed, and ensuring no contamination of chain-based data sources. To be specific: +File data source handlers and entities are isolated from other Subgraph entities, ensuring that they are deterministic when executed, and ensuring no contamination of chain-based data sources. To be specific: - Entities created by File Data Sources are immutable, and cannot be updated - File Data Source handlers cannot access entities from other file data sources - Entities associated with File Data Sources cannot be accessed by chain-based handlers -> While this constraint should not be problematic for most use-cases, it may introduce complexity for some. Please get in touch via Discord if you are having issues modelling your file-based data in a subgraph! +> While this constraint should not be problematic for most use-cases, it may introduce complexity for some. Please get in touch via Discord if you are having issues modelling your file-based data in a Subgraph! Additionally, it is not possible to create data sources from a file data source, be it an onchain data source or another file data source. This restriction may be lifted in the future. @@ -365,15 +365,15 @@ Handlers for File Data Sources cannot be in files which import `eth_call` contra > **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0` -Topic filters, also known as indexed argument filters, are a powerful feature in subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. +Topic filters, also known as indexed argument filters, are a powerful feature in Subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. -- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing subgraphs to operate more efficiently by focusing only on relevant data. +- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing Subgraphs to operate more efficiently by focusing only on relevant data. -- This is useful for creating personal subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. +- This is useful for creating personal Subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. ### How Topic Filters Work -When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a subgraph's manifest. This allows the subgraph to listen selectively for events that match these indexed arguments. +When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a Subgraph's manifest. This allows the Subgraph to listen selectively for events that match these indexed arguments. - The event's first indexed argument corresponds to `topic1`, the second to `topic2`, and so on, up to `topic3`, since the Ethereum Virtual Machine (EVM) allows up to three indexed arguments per event. @@ -401,7 +401,7 @@ In this example: #### Configuration in Subgraphs -Topic filters are defined directly within the event handler configuration in the subgraph manifest. Here is how they are configured: +Topic filters are defined directly within the event handler configuration in the Subgraph manifest. Here is how they are configured: ```yaml eventHandlers: @@ -436,7 +436,7 @@ In this configuration: - `topic1` is configured to filter `Transfer` events where `0xAddressA` is the sender. - `topic2` is configured to filter `Transfer` events where `0xAddressB` is the receiver. -- The subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. +- The Subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. #### Example 2: Tracking Transactions in Either Direction Between Two or More Addresses @@ -452,17 +452,17 @@ In this configuration: - `topic1` is configured to filter `Transfer` events where `0xAddressA`, `0xAddressB`, `0xAddressC` is the sender. - `topic2` is configured to filter `Transfer` events where `0xAddressB` and `0xAddressC` is the receiver. -- The subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. +- The Subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. ## Declared eth_call > Note: This is an experimental feature that is not currently available in a stable Graph Node release yet. You can only use it in Subgraph Studio or your self-hosted node. -Declarative `eth_calls` are a valuable subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. +Declarative `eth_calls` are a valuable Subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. This feature does the following: -- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the subgraph's overall efficiency. +- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the Subgraph's overall efficiency. - Allows faster data fetching, resulting in quicker query responses and a better user experience. - Reduces wait times for applications that need to aggregate data from multiple Ethereum calls, making the data retrieval process more efficient. @@ -474,7 +474,7 @@ This feature does the following: #### Scenario without Declarative `eth_calls` -Imagine you have a subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. +Imagine you have a Subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. Traditionally, these calls might be made sequentially: @@ -498,15 +498,15 @@ Total time taken = max (3, 2, 4) = 4 seconds #### How it Works -1. Declarative Definition: In the subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. +1. Declarative Definition: In the Subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. 2. Parallel Execution Engine: The Graph Node's execution engine recognizes these declarations and runs the calls simultaneously. -3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the subgraph for further processing. +3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the Subgraph for further processing. #### Example Configuration in Subgraph Manifest Declared `eth_calls` can access the `event.address` of the underlying event as well as all the `event.params`. -`Subgraph.yaml` using `event.address`: +`subgraph.yaml` using `event.address`: ```yaml eventHandlers: @@ -524,7 +524,7 @@ Details for the example above: - The text (`Pool[event.address].feeGrowthGlobal0X128()`) is the actual `eth_call` that will be executed, which is in the form of `Contract[address].function(arguments)` - The `address` and `arguments` can be replaced with variables that will be available when the handler is executed. -`Subgraph.yaml` using `event.params` +`subgraph.yaml` using `event.params` ```yaml calls: @@ -535,22 +535,22 @@ calls: > **Note:** it is not recommended to use grafting when initially upgrading to The Graph Network. Learn more [here](/subgraphs/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). -When a subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. +When a Subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing Subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing Subgraph working again after it has failed. -A subgraph is grafted onto a base subgraph when the subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: +A Subgraph is grafted onto a base Subgraph when the Subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: ```yaml description: ... graft: - base: Qm... # Subgraph ID of base subgraph + base: Qm... # Subgraph ID of base Subgraph block: 7345624 # Block number ``` -When a subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` subgraph up to and including the given `block` and then continue indexing the new subgraph from that block on. The base subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted subgraph. +When a Subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` Subgraph up to and including the given `block` and then continue indexing the new Subgraph from that block on. The base Subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted Subgraph. -Because grafting copies rather than indexes base data, it is much quicker to get the subgraph to the desired block than indexing from scratch, though the initial data copy can still take several hours for very large subgraphs. While the grafted subgraph is being initialized, the Graph Node will log information about the entity types that have already been copied. +Because grafting copies rather than indexes base data, it is much quicker to get the Subgraph to the desired block than indexing from scratch, though the initial data copy can still take several hours for very large Subgraphs. While the grafted Subgraph is being initialized, the Graph Node will log information about the entity types that have already been copied. -The grafted subgraph can use a GraphQL schema that is not identical to the one of the base subgraph, but merely compatible with it. It has to be a valid subgraph schema in its own right, but may deviate from the base subgraph's schema in the following ways: +The grafted Subgraph can use a GraphQL schema that is not identical to the one of the base Subgraph, but merely compatible with it. It has to be a valid Subgraph schema in its own right, but may deviate from the base Subgraph's schema in the following ways: - It adds or removes entity types - It removes attributes from entity types @@ -560,4 +560,4 @@ The grafted subgraph can use a GraphQL schema that is not identical to the one o - It adds or removes interfaces - It changes for which entity types an interface is implemented -> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the subgraph manifest. +> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the Subgraph manifest. From b015defeae4c3ed3b30615ba372d25345c943f81 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:10:20 -0500 Subject: [PATCH 0129/1789] New translations advanced.mdx (Portuguese) --- .../developing/creating/advanced.mdx | 82 +++++++++---------- 1 file changed, 41 insertions(+), 41 deletions(-) diff --git a/website/src/pages/pt/subgraphs/developing/creating/advanced.mdx b/website/src/pages/pt/subgraphs/developing/creating/advanced.mdx index 5dfeb1034a5f..a31f7935fa5f 100644 --- a/website/src/pages/pt/subgraphs/developing/creating/advanced.mdx +++ b/website/src/pages/pt/subgraphs/developing/creating/advanced.mdx @@ -4,9 +4,9 @@ title: Advanced Subgraph Features ## Visão geral -Add and implement advanced subgraph features to enhanced your subgraph's built. +Add and implement advanced Subgraph features to enhanced your Subgraph's built. -Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: +Starting from `specVersion` `0.0.4`, Subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: | Feature | Name | | ---------------------------------------------------- | ---------------- | @@ -14,7 +14,7 @@ Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declar | [Full-text Search](#defining-fulltext-search-fields) | `fullTextSearch` | | [Grafting](#grafting-onto-existing-subgraphs) | `grafting` | -For instance, if a subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: +For instance, if a Subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: ```yaml specVersion: 0.0.4 @@ -25,7 +25,7 @@ features: dataSources: ... ``` -> Note that using a feature without declaring it will incur a **validation error** during subgraph deployment, but no errors will occur if a feature is declared but not used. +> Note that using a feature without declaring it will incur a **validation error** during Subgraph deployment, but no errors will occur if a feature is declared but not used. ## Séries de Tempo e Agregações @@ -33,9 +33,9 @@ Prerequisites: - Subgraph specVersion must be ≥1.1.0. -Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, and more. +Timeseries and aggregations enable your Subgraph to track statistics like daily average price, hourly total transfers, and more. -This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. +This feature introduces two new types of Subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. ### Exemplo de Schema @@ -97,11 +97,11 @@ Aggregation entities are automatically calculated on the basis of the specified ## Erros não-fatais -Erros de indexação em subgraphs já sincronizados, por si próprios, farão que o subgraph falhe e pare de sincronizar. Os subgraphs podem, de outra forma, ser configurados a continuar a sincronizar na presença de erros, ao ignorar as mudanças feitas pelo handler que provocaram o erro. Isto dá tempo aos autores de subgraphs para corrigir seus subgraphs enquanto queries continuam a ser servidos perante o bloco mais recente, porém os resultados podem ser inconsistentes devido ao bug que causou o erro. Note que alguns erros ainda são sempre fatais. Para ser não-fatais, os erros devem ser confirmados como determinísticos. +Indexing errors on already synced Subgraphs will, by default, cause the Subgraph to fail and stop syncing. Subgraphs can alternatively be configured to continue syncing in the presence of errors, by ignoring the changes made by the handler which provoked the error. This gives Subgraph authors time to correct their Subgraphs while queries continue to be served against the latest block, though the results might be inconsistent due to the bug that caused the error. Note that some errors are still always fatal. To be non-fatal, the error must be known to be deterministic. -> **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy subgraphs using that functionality to the network via the Studio. +> **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy Subgraphs using that functionality to the network via the Studio. -Permitir erros não fatais exige a configuração da seguinte feature flag no manifest do subgraph: +Enabling non-fatal errors requires setting the following feature flag on the Subgraph manifest: ```yaml specVersion: 0.0.4 @@ -111,7 +111,7 @@ features: ... ``` -The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the subgraph has skipped over errors, as in the example: +The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the Subgraph has skipped over errors, as in the example: ```graphql foos(first: 100, subgraphError: allow) { @@ -123,7 +123,7 @@ _meta { } ``` -If the subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: +If the Subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: ```graphql "data": { @@ -145,7 +145,7 @@ If the subgraph encounters an error, that query will return both the data and a ## Fontes de Dados de Arquivos em IPFS/Arweave -Fontes de dados de arquivos são uma nova funcionalidade de subgraph para acessar dados off-chain de forma robusta e extensível. As fontes de dados de arquivos apoiam o retiro de arquivos do IPFS e do Arweave. +File data sources are a new Subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. > Isto também abre as portas para indexar dados off-chain de forma determinística, além de potencialmente introduzir dados arbitrários com fonte em HTTP. @@ -290,7 +290,7 @@ Exemplo: import { TokenMetadata as TokenMetadataTemplate } from '../generated/templates' const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' -//Este exemplo de código é para um subgraph do Crypto Coven. O hash ipfs acima é um diretório com metadados de tokens para todos os NFTs do Crypto Coven. +//This example code is for a Crypto coven Subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. export function handleTransfer(event: TransferEvent): void { let token = Token.load(event.params.tokenId.toString()) @@ -300,7 +300,7 @@ export function handleTransfer(event: TransferEvent): void { token.tokenURI = '/' + event.params.tokenId.toString() + '.json' const tokenIpfsHash = ipfshash + token.tokenURI - //Isto cria um caminho aos metadados para um único NFT do Crypto Coven. Ele concatena o diretório com "/" + nome do arquivo + ".json" + //This creates a path to the metadata for a single Crypto coven NFT. It concats the directory with "/" + filename + ".json" token.ipfsURI = tokenIpfsHash @@ -317,23 +317,23 @@ Isto criará uma fonte de dados de arquivos, que avaliará o endpoint de IPFS ou This example is using the CID as the lookup between the parent `Token` entity and the resulting `TokenMetadata` entity. -> Previously, this is the point at which a subgraph developer would have called `ipfs.cat(CID)` to fetch the file +> Previously, this is the point at which a Subgraph developer would have called `ipfs.cat(CID)` to fetch the file Parabéns, você está a usar fontes de dados de arquivos! -#### Como lançar os seus Subgraphs +#### Deploying your Subgraphs -You can now `build` and `deploy` your subgraph to any Graph Node >=v0.30.0-rc.0. +You can now `build` and `deploy` your Subgraph to any Graph Node >=v0.30.0-rc.0. #### Limitações -Handlers e entidades de fontes de dados de arquivos são isolados de outras entidades de subgraph, o que garante que sejam determinísticos quando executados e que não haja contaminação de fontes de dados baseadas em chain. Especificamente: +File data source handlers and entities are isolated from other Subgraph entities, ensuring that they are deterministic when executed, and ensuring no contamination of chain-based data sources. To be specific: - Entidades criadas por Fontes de Dados de Arquivos são imutáveis, e não podem ser atualizadas - Handlers de Fontes de Dados de Arquivos não podem acessar entidades de outras fontes de dados de arquivos - Entidades associadas com Fontes de Dados de Arquivos não podem ser acessadas por handlers baseados em chain -> Enquanto esta limitação pode não ser problemática para a maioria dos casos de uso, ela pode deixar alguns mais complexos. Se houver qualquer problema neste processo, por favor dê um alô via Discord! +> While this constraint should not be problematic for most use-cases, it may introduce complexity for some. Please get in touch via Discord if you are having issues modelling your file-based data in a Subgraph! Além disto, não é possível criar fontes de dados de uma fonte de dado de arquivos, seja uma on-chain ou outra fonte de dados de arquivos. Esta restrição poderá ser retirada no futuro. @@ -365,15 +365,15 @@ Handlers for File Data Sources cannot be in files which import `eth_call` contra > **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0` -Filtros de tópico, também conhecidos como filtros de argumentos indexados, permitem que os utilizadores filtrem eventos de blockchain com alta precisão, em base nos valores dos seus argumentos indexados. +Topic filters, also known as indexed argument filters, are a powerful feature in Subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. -- Estes filtros ajudam a isolar eventos específicos de interesse do fluxo vasto de eventos na blockchain, o que permite que subgraphs operem com mais eficácia ao focarem apenas em dados relevantes. +- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing Subgraphs to operate more efficiently by focusing only on relevant data. -- Isto serve para criar subgraphs pessoais que rastreiam endereços específicos e as suas interações com vários contratos inteligentes na blockchain. +- This is useful for creating personal Subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. ### Como Filtros de Tópicos Funcionam -Quando um contrato inteligente emite um evento, quaisquer argumentos que forem marcados como indexados podem ser usados como filtros no manifest de um subgraph. Isto permite que o subgraph preste atenção seletiva para eventos que correspondam a estes argumentos indexados. +When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a Subgraph's manifest. This allows the Subgraph to listen selectively for events that match these indexed arguments. - The event's first indexed argument corresponds to `topic1`, the second to `topic2`, and so on, up to `topic3`, since the Ethereum Virtual Machine (EVM) allows up to three indexed arguments per event. @@ -401,7 +401,7 @@ Neste exemplo: #### Configuração em Subgraphs -Filtros de tópicos são definidos diretamente na configuração de handlers de eventos no manifest do subgraph. Veja como eles são configurados: +Topic filters are defined directly within the event handler configuration in the Subgraph manifest. Here is how they are configured: ```yaml eventHandlers: @@ -436,7 +436,7 @@ Nesta configuração: - `topic1` is configured to filter `Transfer` events where `0xAddressA` is the sender. - `topic2` is configured to filter `Transfer` events where `0xAddressB` is the receiver. -- The subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. +- The Subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. #### Exemplo 2: Como Rastrear Transações em Qualquer Direção Entre Dois ou Mais Endereços @@ -452,17 +452,17 @@ Nesta configuração: - `topic1` is configured to filter `Transfer` events where `0xAddressA`, `0xAddressB`, `0xAddressC` is the sender. - `topic2` is configured to filter `Transfer` events where `0xAddressB` and `0xAddressC` is the receiver. -- O subgraph indexará transações que ocorrerem em qualquer direção entre vários endereços, o que permite a monitoria compreensiva de interações que envolverem todos os endereços. +- The Subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. ## Declared eth_call > Note: This is an experimental feature that is not currently available in a stable Graph Node release yet. You can only use it in Subgraph Studio or your self-hosted node. -Declarative `eth_calls` are a valuable subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. +Declarative `eth_calls` are a valuable Subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. Esta ferramenta faz o seguinte: -- Aumenta muito o desempenho do retiro de dados da blockchain Ethereum ao reduzir o tempo total para múltiplas chamadas e otimizar a eficácia geral do subgraph. +- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the Subgraph's overall efficiency. - Permite retiros de dados mais rápidos, o que resulta em respostas de query aceleradas e uma experiência de utilizador melhorada. - Reduz tempos de espera para aplicativos que precisam agregar dados de várias chamadas no Ethereum, o que aumenta a eficácia do processo de retiro de dados. @@ -474,7 +474,7 @@ Esta ferramenta faz o seguinte: #### Scenario without Declarative `eth_calls` -Imagina que tens um subgraph que precisa fazer três chamadas no Ethereum para retirar dados sobre as transações, o saldo e as posses de token de um utilizador. +Imagine you have a Subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. Tradicionalmente, estas chamadas podem ser realizadas em sequência: @@ -498,15 +498,15 @@ Total de tempo = max (3, 2, 4) = 4 segundos #### How it Works -1. Definição Declarativa: No manifest do subgraph, as chamadas no Ethereum são declaradas de maneira que indique que elas possam ser executadas em paralelo. +1. Declarative Definition: In the Subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. 2. Motor de Execução Paralela: O motor de execução do Graph Node reconhece estas declarações e executa as chamadas simultaneamente. -3. Agregação de Resultado: Quando todas as chamadas forem completadas, os resultados são agregados e usados pelo subgraph para mais processos. +3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the Subgraph for further processing. #### Example Configuration in Subgraph Manifest Declared `eth_calls` can access the `event.address` of the underlying event as well as all the `event.params`. -`Subgraph.yaml` using `event.address`: +`subgraph.yaml` using `event.address`: ```yaml eventHandlers: @@ -524,7 +524,7 @@ Detalhes para o exemplo acima: - The text (`Pool[event.address].feeGrowthGlobal0X128()`) is the actual `eth_call` that will be executed, which is in the form of `Contract[address].function(arguments)` - The `address` and `arguments` can be replaced with variables that will be available when the handler is executed. -`Subgraph.yaml` using `event.params` +`subgraph.yaml` using `event.params` ```yaml calls: @@ -535,22 +535,22 @@ calls: > **Note:** it is not recommended to use grafting when initially upgrading to The Graph Network. Learn more [here](/subgraphs/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). -When a subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. +When a Subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing Subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing Subgraph working again after it has failed. -A subgraph is grafted onto a base subgraph when the subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: +A Subgraph is grafted onto a base Subgraph when the Subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: ```yaml description: ... graft: - base: Qm... # ID do subgraph base - block: 7345624 # Número do bloco + base: Qm... # Subgraph ID of base Subgraph + block: 7345624 # Block number ``` -When a subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` subgraph up to and including the given `block` and then continue indexing the new subgraph from that block on. The base subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted subgraph. +When a Subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` Subgraph up to and including the given `block` and then continue indexing the new Subgraph from that block on. The base Subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted Subgraph. -Como o enxerto copia em vez de indexar dados base, dirigir o subgraph para o bloco desejado desta maneira é mais rápido que indexar do começo, mesmo que a cópia inicial dos dados ainda possa levar várias horas para subgraphs muito grandes. Enquanto o subgraph enxertado é inicializado, o Graph Node gravará informações sobre os tipos de entidade que já foram copiados. +Because grafting copies rather than indexes base data, it is much quicker to get the Subgraph to the desired block than indexing from scratch, though the initial data copy can still take several hours for very large Subgraphs. While the grafted Subgraph is being initialized, the Graph Node will log information about the entity types that have already been copied. -O subgraph enxertado pode usar um schema GraphQL que não é idêntico ao schema do subgraph base, mas é apenas compatível com ele. Ele deve ser um schema válido no seu próprio mérito, mas pode desviar do schema do subgraph base nas seguintes maneiras: +The grafted Subgraph can use a GraphQL schema that is not identical to the one of the base Subgraph, but merely compatible with it. It has to be a valid Subgraph schema in its own right, but may deviate from the base Subgraph's schema in the following ways: - Ele adiciona ou remove tipos de entidade - Ele retira atributos de tipos de entidade @@ -560,4 +560,4 @@ O subgraph enxertado pode usar um schema GraphQL que não é idêntico ao schema - Ele adiciona ou remove interfaces - Ele muda os tipos de entidades para qual implementar uma interface -> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the subgraph manifest. +> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the Subgraph manifest. From b76eba9a347c03eab5736497747edb4e1687ce0a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:10:21 -0500 Subject: [PATCH 0130/1789] New translations advanced.mdx (Russian) --- .../developing/creating/advanced.mdx | 80 +++++++++---------- 1 file changed, 40 insertions(+), 40 deletions(-) diff --git a/website/src/pages/ru/subgraphs/developing/creating/advanced.mdx b/website/src/pages/ru/subgraphs/developing/creating/advanced.mdx index a264671c393e..6ea689767d6d 100644 --- a/website/src/pages/ru/subgraphs/developing/creating/advanced.mdx +++ b/website/src/pages/ru/subgraphs/developing/creating/advanced.mdx @@ -4,9 +4,9 @@ title: Advanced Subgraph Features ## Обзор -Add and implement advanced subgraph features to enhanced your subgraph's built. +Add and implement advanced Subgraph features to enhanced your Subgraph's built. -Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: +Starting from `specVersion` `0.0.4`, Subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: | Feature | Name | | ---------------------------------------------------- | ---------------- | @@ -14,7 +14,7 @@ Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declar | [Full-text Search](#defining-fulltext-search-fields) | `fullTextSearch` | | [Grafting](#grafting-onto-existing-subgraphs) | `grafting` | -For instance, if a subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: +For instance, if a Subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: ```yaml specVersion: 0.0.4 @@ -25,7 +25,7 @@ features: dataSources: ... ``` -> Note that using a feature without declaring it will incur a **validation error** during subgraph deployment, but no errors will occur if a feature is declared but not used. +> Note that using a feature without declaring it will incur a **validation error** during Subgraph deployment, but no errors will occur if a feature is declared but not used. ## Тайм-серии и агрегации @@ -33,9 +33,9 @@ Prerequisites: - Subgraph specVersion must be ≥1.1.0. -Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, and more. +Timeseries and aggregations enable your Subgraph to track statistics like daily average price, hourly total transfers, and more. -This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. +This feature introduces two new types of Subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. ### Пример схемы @@ -97,11 +97,11 @@ Aggregation entities are automatically calculated on the basis of the specified ## Нефатальные ошибки -Ошибки индексирования в уже синхронизированных субграфах по умолчанию приведут к сбою субграфа и прекращению синхронизации. В качестве альтернативы субграфы можно настроить на продолжение синхронизации при наличии ошибок, игнорируя изменения, внесенные обработчиком, который спровоцировал ошибку. Это дает авторам субграфов время на исправление своих субграфов, в то время как запросы к последнему блоку продолжают обрабатываться, хотя результаты могут быть противоречивыми из-за бага, вызвавшего ошибку. Обратите внимание на то, что некоторые ошибки всё равно всегда будут фатальны. Чтобы быть нефатальной, ошибка должна быть детерминированной. +Indexing errors on already synced Subgraphs will, by default, cause the Subgraph to fail and stop syncing. Subgraphs can alternatively be configured to continue syncing in the presence of errors, by ignoring the changes made by the handler which provoked the error. This gives Subgraph authors time to correct their Subgraphs while queries continue to be served against the latest block, though the results might be inconsistent due to the bug that caused the error. Note that some errors are still always fatal. To be non-fatal, the error must be known to be deterministic. -> **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy subgraphs using that functionality to the network via the Studio. +> **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy Subgraphs using that functionality to the network via the Studio. -Для включения нефатальных ошибок необходимо установить в манифесте субграфа следующий флаг функции: +Enabling non-fatal errors requires setting the following feature flag on the Subgraph manifest: ```yaml specVersion: 0.0.4 @@ -111,7 +111,7 @@ features: ... ``` -The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the subgraph has skipped over errors, as in the example: +The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the Subgraph has skipped over errors, as in the example: ```graphql foos(first: 100, subgraphError: allow) { @@ -123,7 +123,7 @@ _meta { } ``` -If the subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: +If the Subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: ```graphql "data": { @@ -145,7 +145,7 @@ If the subgraph encounters an error, that query will return both the data and a ## Источники файловых данных IPFS/Arweave -Источники файловых данных — это новая функциональность субграфа для надежного и расширенного доступа к данным вне чейна во время индексации. Источники данных файлов поддерживают получение файлов из IPFS и Arweave. +File data sources are a new Subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. > Это также закладывает основу для детерминированного индексирования данных вне сети, а также потенциального введения произвольных данных из HTTP-источников. @@ -290,7 +290,7 @@ For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave b import { TokenMetadata as TokenMetadataTemplate } from '../generated/templates' const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' -//Этот пример кода предназначен для сборщика субграфа Crypto. Приведенный выше хеш ipfs представляет собой каталог с метаданными токена для всех NFT криптоковена. +//This example code is for a Crypto coven Subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. export function handleTransfer(event: TransferEvent): void { let token = Token.load(event.params.tokenId.toString()) @@ -300,7 +300,7 @@ export function handleTransfer(event: TransferEvent): void { token.tokenURI = '/' + event.params.tokenId.toString() + '.json' const tokenIpfsHash = ipfshash + token.tokenURI - //Это создает путь к метаданным для одного сборщика NFT Crypto. Он объединяет каталог с "/" + filename + ".json" + //This creates a path to the metadata for a single Crypto coven NFT. It concats the directory with "/" + filename + ".json" token.ipfsURI = tokenIpfsHash @@ -317,23 +317,23 @@ export function handleTransfer(event: TransferEvent): void { This example is using the CID as the lookup between the parent `Token` entity and the resulting `TokenMetadata` entity. -> Previously, this is the point at which a subgraph developer would have called `ipfs.cat(CID)` to fetch the file +> Previously, this is the point at which a Subgraph developer would have called `ipfs.cat(CID)` to fetch the file Поздравляем, Вы используете файловые источники данных! -#### Развертывание субграфов +#### Deploying your Subgraphs -You can now `build` and `deploy` your subgraph to any Graph Node >=v0.30.0-rc.0. +You can now `build` and `deploy` your Subgraph to any Graph Node >=v0.30.0-rc.0. #### Ограничения -Обработчики и объекты файловых источников данных изолированы от других объектов субграфа, что гарантирует их детерминированность при выполнении и исключает загрязнение источников данных на чейн-основе. В частности: +File data source handlers and entities are isolated from other Subgraph entities, ensuring that they are deterministic when executed, and ensuring no contamination of chain-based data sources. To be specific: - Объекты, созданные с помощью файловых источников данных, неизменяемы и не могут быть обновлены - Обработчики файловых источников данных не могут получить доступ к объектам из других файловых источников данных - Объекты, связанные с источниками данных файлов, не могут быть доступны обработчикам на чейн-основе -> Хотя это ограничение не должно вызывать проблем в большинстве случаев, для некоторых оно может вызвать сложности. Если у Вас возникли проблемы с моделированием Ваших файловых данных в субграфе, свяжитесь с нами через Discord! +> While this constraint should not be problematic for most use-cases, it may introduce complexity for some. Please get in touch via Discord if you are having issues modelling your file-based data in a Subgraph! Кроме того, невозможно создать источники данных из файлового источника данных, будь то источник данных onchain или другой файловый источник данных. Это ограничение может быть снято в будущем. @@ -365,15 +365,15 @@ Handlers for File Data Sources cannot be in files which import `eth_call` contra > **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0` -Фильтры по темам, также известные как фильтры по индексированным аргументам, — это мощная функция в субграфах, которая позволяет пользователям точно фильтровать события блокчейна на основе значений их индексированных аргументов. +Topic filters, also known as indexed argument filters, are a powerful feature in Subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. -- Эти фильтры помогают изолировать конкретные интересующие события из огромного потока событий в блокчейне, позволяя субграфам работать более эффективно, сосредотачиваясь только на релевантных данных. +- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing Subgraphs to operate more efficiently by focusing only on relevant data. -- Это полезно для создания персональных субграфов, отслеживающих конкретные адреса и их взаимодействие с различными смарт-контрактами в блокчейне. +- This is useful for creating personal Subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. ### Как работают фильтры тем -Когда смарт-контракт генерирует событие, любые аргументы, помеченные как индексированные, могут использоваться в манифесте субграфа в качестве фильтров. Это позволяет субграфу выборочно прослушивать события, соответствующие этим индексированным аргументам. +When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a Subgraph's manifest. This allows the Subgraph to listen selectively for events that match these indexed arguments. - The event's first indexed argument corresponds to `topic1`, the second to `topic2`, and so on, up to `topic3`, since the Ethereum Virtual Machine (EVM) allows up to three indexed arguments per event. @@ -401,7 +401,7 @@ contract Token { #### Конфигурация в субграфах -Фильтры тем определяются непосредственно в конфигурации обработчика событий в манифесте субграфа. Вот как они настроены: +Topic filters are defined directly within the event handler configuration in the Subgraph manifest. Here is how they are configured: ```yaml eventHandlers: @@ -436,7 +436,7 @@ eventHandlers: - `topic1` is configured to filter `Transfer` events where `0xAddressA` is the sender. - `topic2` is configured to filter `Transfer` events where `0xAddressB` is the receiver. -- The subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. +- The Subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. #### Пример 2. Отслеживание транзакций в любом направлении между двумя и более адресами @@ -452,17 +452,17 @@ eventHandlers: - `topic1` is configured to filter `Transfer` events where `0xAddressA`, `0xAddressB`, `0xAddressC` is the sender. - `topic2` is configured to filter `Transfer` events where `0xAddressB` and `0xAddressC` is the receiver. -- Субграф будет индексировать транзакции, происходящие в любом направлении между несколькими адресами, что позволит осуществлять комплексный мониторинг взаимодействий с участием всех адресов. +- The Subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. ## Декларированный eth_call > Примечание: Это экспериментальная функция, которая пока недоступна в стабильной версии Graph Node. Вы можете использовать её только в Subgraph Studio или на своей локальной ноде. -Declarative `eth_calls` are a valuable subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. +Declarative `eth_calls` are a valuable Subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. Эта функция выполняет следующие действия: -- Значительно повышает производительность получения данных из блокчейна Ethereum за счет сокращения общего времени выполнения нескольких вызовов и оптимизации общей эффективности субграфа. +- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the Subgraph's overall efficiency. - Обеспечивает ускоренное получение данных, что приводит к более быстрому реагированию на запросы и улучшению пользовательского опыта. - Сокращает время ожидания для приложений, которым необходимо агрегировать данные из нескольких вызовов Ethereum, что делает процесс получения данных более эффективным. @@ -474,7 +474,7 @@ Declarative `eth_calls` are a valuable subgraph feature that allows `eth_calls` #### Scenario without Declarative `eth_calls` -Представьте, что у вас есть субграф, которому необходимо выполнить три вызова в Ethereum, чтобы получить данные о транзакциях пользователя, балансе и владении токенами. +Imagine you have a Subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. Традиционно эти вызовы могут выполняться последовательно: @@ -498,15 +498,15 @@ Declarative `eth_calls` are a valuable subgraph feature that allows `eth_calls` #### Как это работает -1. Декларативное определение: В манифесте субграфа Вы декларируете вызовы Ethereum таким образом, чтобы указать, что они могут выполняться параллельно. +1. Declarative Definition: In the Subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. 2. Механизм параллельного выполнения: Механизм выполнения The Graph Node распознает эти объявления и выполняет вызовы одновременно. -3. Агрегация результатов: После завершения всех вызовов результаты агрегируются и используются субграфом для дальнейшей обработки. +3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the Subgraph for further processing. #### Пример конфигурации в манифесте субграфа Declared `eth_calls` can access the `event.address` of the underlying event as well as all the `event.params`. -`Subgraph.yaml` using `event.address`: +`subgraph.yaml` using `event.address`: ```yaml eventHandlers: @@ -524,7 +524,7 @@ calls: - The text (`Pool[event.address].feeGrowthGlobal0X128()`) is the actual `eth_call` that will be executed, which is in the form of `Contract[address].function(arguments)` - The `address` and `arguments` can be replaced with variables that will be available when the handler is executed. -`Subgraph.yaml` using `event.params` +`subgraph.yaml` using `event.params` ```yaml calls: @@ -535,22 +535,22 @@ calls: > **Note:** it is not recommended to use grafting when initially upgrading to The Graph Network. Learn more [here](/subgraphs/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). -When a subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. +When a Subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing Subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing Subgraph working again after it has failed. -A subgraph is grafted onto a base subgraph when the subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: +A Subgraph is grafted onto a base Subgraph when the Subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: ```yaml description: ... graft: - base: Qm... # Subgraph ID of base subgraph + base: Qm... # Subgraph ID of base Subgraph block: 7345624 # Block number ``` -When a subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` subgraph up to and including the given `block` and then continue indexing the new subgraph from that block on. The base subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted subgraph. +When a Subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` Subgraph up to and including the given `block` and then continue indexing the new Subgraph from that block on. The base Subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted Subgraph. -Поскольку графтинг копирует, а не индексирует базовые данные, гораздо быстрее перенести субграф в нужный блок, чем индексировать с нуля, хотя для очень больших субграфов копирование исходных данных может занять несколько часов. Пока графтовый субграф инициализируется, узел The Graph будет регистрировать информацию о типах объектов, которые уже были скопированы. +Because grafting copies rather than indexes base data, it is much quicker to get the Subgraph to the desired block than indexing from scratch, though the initial data copy can still take several hours for very large Subgraphs. While the grafted Subgraph is being initialized, the Graph Node will log information about the entity types that have already been copied. -Перенесённый субграф может использовать схему GraphQL, которая не идентична схеме базового субграфа, а просто совместима с ней. Это должна быть автономно действующая схема субграфа, но она может отличаться от схемы базового субграфа следующим образом: +The grafted Subgraph can use a GraphQL schema that is not identical to the one of the base Subgraph, but merely compatible with it. It has to be a valid Subgraph schema in its own right, but may deviate from the base Subgraph's schema in the following ways: - Она добавляет или удаляет типы объектов - Она удаляет атрибуты из типов объектов @@ -560,4 +560,4 @@ When a subgraph whose manifest contains a `graft` block is deployed, Graph Node - Она добавляет или удаляет интерфейсы - Она изменяется в зависимости от того, под какой тип объектов реализован интерфейс -> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the subgraph manifest. +> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the Subgraph manifest. From 4b414c145bc82dba23814219f8fcc20f59328637 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:10:22 -0500 Subject: [PATCH 0131/1789] New translations advanced.mdx (Swedish) --- .../developing/creating/advanced.mdx | 80 +++++++++---------- 1 file changed, 40 insertions(+), 40 deletions(-) diff --git a/website/src/pages/sv/subgraphs/developing/creating/advanced.mdx b/website/src/pages/sv/subgraphs/developing/creating/advanced.mdx index 7ed946aee07e..68cf6c435536 100644 --- a/website/src/pages/sv/subgraphs/developing/creating/advanced.mdx +++ b/website/src/pages/sv/subgraphs/developing/creating/advanced.mdx @@ -4,9 +4,9 @@ title: Advanced Subgraph Features ## Översikt -Add and implement advanced subgraph features to enhanced your subgraph's built. +Add and implement advanced Subgraph features to enhanced your Subgraph's built. -Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: +Starting from `specVersion` `0.0.4`, Subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: | Feature | Name | | ---------------------------------------------------- | ---------------- | @@ -14,7 +14,7 @@ Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declar | [Full-text Search](#defining-fulltext-search-fields) | `fullTextSearch` | | [Grafting](#grafting-onto-existing-subgraphs) | `grafting` | -For instance, if a subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: +For instance, if a Subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: ```yaml specVersion: 0.0.4 @@ -25,7 +25,7 @@ features: dataSources: ... ``` -> Note that using a feature without declaring it will incur a **validation error** during subgraph deployment, but no errors will occur if a feature is declared but not used. +> Note that using a feature without declaring it will incur a **validation error** during Subgraph deployment, but no errors will occur if a feature is declared but not used. ## Timeseries and Aggregations @@ -33,9 +33,9 @@ Prerequisites: - Subgraph specVersion must be ≥1.1.0. -Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, and more. +Timeseries and aggregations enable your Subgraph to track statistics like daily average price, hourly total transfers, and more. -This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. +This feature introduces two new types of Subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. ### Example Schema @@ -97,11 +97,11 @@ Aggregation entities are automatically calculated on the basis of the specified ## Icke dödliga fel -Indexeringsfel på redan synkroniserade delgrafer kommer, som standard, att få delgrafen att misslyckas och sluta synkronisera. Delgrafer kan istället konfigureras för att fortsätta synkroniseringen i närvaro av fel, genom att ignorera ändringarna som orsakades av hanteraren som provocerade felet. Det ger delgrafsförfattare tid att korrigera sina delgrafer medan förfrågningar fortsätter att behandlas mot det senaste blocket, även om resultaten kan vara inkonsekventa på grund av felet som orsakade felet. Observera att vissa fel alltid är dödliga. För att vara icke-dödliga måste felet vara känt för att vara deterministiskt. +Indexing errors on already synced Subgraphs will, by default, cause the Subgraph to fail and stop syncing. Subgraphs can alternatively be configured to continue syncing in the presence of errors, by ignoring the changes made by the handler which provoked the error. This gives Subgraph authors time to correct their Subgraphs while queries continue to be served against the latest block, though the results might be inconsistent due to the bug that caused the error. Note that some errors are still always fatal. To be non-fatal, the error must be known to be deterministic. -> **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy subgraphs using that functionality to the network via the Studio. +> **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy Subgraphs using that functionality to the network via the Studio. -Aktivering av icke-dödliga fel kräver att följande funktionsflagga sätts i delgrafens manifest: +Enabling non-fatal errors requires setting the following feature flag on the Subgraph manifest: ```yaml specVersion: 0.0.4 @@ -111,7 +111,7 @@ features: ... ``` -The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the subgraph has skipped over errors, as in the example: +The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the Subgraph has skipped over errors, as in the example: ```graphql foos(first: 100, subgraphError: allow) { @@ -123,7 +123,7 @@ _meta { } ``` -If the subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: +If the Subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: ```graphql "data": { @@ -145,7 +145,7 @@ If the subgraph encounters an error, that query will return both the data and a ## IPFS/Arweave File Data Sources -Filbaserade datakällor är en ny delgrafsfunktion för att få tillgång till data utanför kedjan under indexering på ett robust, utökat sätt. Filbaserade datakällor stödjer hämtning av filer från IPFS och från Arweave. +File data sources are a new Subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. > Detta lägger också grunden för deterministisk indexering av data utanför kedjan, samt möjligheten att introducera godtycklig data som hämtas via HTTP. @@ -290,7 +290,7 @@ Exempel: import { TokenMetadata as TokenMetadataTemplate } from '../generated/templates' const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' -//Denna exempelkod är för en undergraf för kryptosamverkan. Ovanstående ipfs-hash är en katalog med tokenmetadata för alla kryptosamverkande NFT:er. +//This example code is for a Crypto coven Subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. export function handleTransfer(event: TransferEvent): void { let token = Token.load(event.params.tokenId.toString()) @@ -300,7 +300,7 @@ export function handleTransfer(event: TransferEvent): void { token.tokenURI = '/' + event.params.tokenId.toString() + '.json' const tokenIpfsHash = ipfshash + token.tokenURI - //Detta skapar en sökväg till metadata för en enskild Crypto coven NFT. Den konkaterar katalogen med "/" + filnamn + ".json" + //This creates a path to the metadata for a single Crypto coven NFT. It concats the directory with "/" + filename + ".json" token.ipfsURI = tokenIpfsHash @@ -317,23 +317,23 @@ Detta kommer att skapa en ny filbaserad datakälla som kommer att övervaka Grap This example is using the CID as the lookup between the parent `Token` entity and the resulting `TokenMetadata` entity. -> Previously, this is the point at which a subgraph developer would have called `ipfs.cat(CID)` to fetch the file +> Previously, this is the point at which a Subgraph developer would have called `ipfs.cat(CID)` to fetch the file Grattis, du använder filbaserade datakällor! -#### Distribuera dina delgrafer +#### Deploying your Subgraphs -You can now `build` and `deploy` your subgraph to any Graph Node >=v0.30.0-rc.0. +You can now `build` and `deploy` your Subgraph to any Graph Node >=v0.30.0-rc.0. #### Begränsningar -Filbaserade datakällahanterare och entiteter är isolerade från andra delgrafentiteter, vilket säkerställer att de är deterministiska när de körs och att ingen förorening av kedjebaserade datakällor sker. För att vara specifik: +File data source handlers and entities are isolated from other Subgraph entities, ensuring that they are deterministic when executed, and ensuring no contamination of chain-based data sources. To be specific: - Entiteter skapade av Filbaserade datakällor är oföränderliga och kan inte uppdateras - Filbaserade datakällahanterare kan inte komma åt entiteter från andra filbaserade datakällor - Entiteter associerade med filbaserade datakällor kan inte nås av kedjebaserade hanterare -> Även om denna begränsning inte bör vara problematisk för de flesta användningsfall kan den införa komplexitet för vissa. Var god kontakta oss via Discord om du har problem med att modellera din data baserad på fil i en delgraf! +> While this constraint should not be problematic for most use-cases, it may introduce complexity for some. Please get in touch via Discord if you are having issues modelling your file-based data in a Subgraph! Dessutom är det inte möjligt att skapa datakällor från en filbaserad datakälla, vare sig det är en datakälla på kedjan eller en annan filbaserad datakälla. Denna begränsning kan komma att hävas i framtiden. @@ -365,15 +365,15 @@ Handlers for File Data Sources cannot be in files which import `eth_call` contra > **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0` -Topic filters, also known as indexed argument filters, are a powerful feature in subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. +Topic filters, also known as indexed argument filters, are a powerful feature in Subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. -- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing subgraphs to operate more efficiently by focusing only on relevant data. +- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing Subgraphs to operate more efficiently by focusing only on relevant data. -- This is useful for creating personal subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. +- This is useful for creating personal Subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. ### How Topic Filters Work -When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a subgraph's manifest. This allows the subgraph to listen selectively for events that match these indexed arguments. +When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a Subgraph's manifest. This allows the Subgraph to listen selectively for events that match these indexed arguments. - The event's first indexed argument corresponds to `topic1`, the second to `topic2`, and so on, up to `topic3`, since the Ethereum Virtual Machine (EVM) allows up to three indexed arguments per event. @@ -401,7 +401,7 @@ In this example: #### Configuration in Subgraphs -Topic filters are defined directly within the event handler configuration in the subgraph manifest. Here is how they are configured: +Topic filters are defined directly within the event handler configuration in the Subgraph manifest. Here is how they are configured: ```yaml eventHandlers: @@ -436,7 +436,7 @@ In this configuration: - `topic1` is configured to filter `Transfer` events where `0xAddressA` is the sender. - `topic2` is configured to filter `Transfer` events where `0xAddressB` is the receiver. -- The subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. +- The Subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. #### Example 2: Tracking Transactions in Either Direction Between Two or More Addresses @@ -452,17 +452,17 @@ In this configuration: - `topic1` is configured to filter `Transfer` events where `0xAddressA`, `0xAddressB`, `0xAddressC` is the sender. - `topic2` is configured to filter `Transfer` events where `0xAddressB` and `0xAddressC` is the receiver. -- The subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. +- The Subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. ## Declared eth_call > Note: This is an experimental feature that is not currently available in a stable Graph Node release yet. You can only use it in Subgraph Studio or your self-hosted node. -Declarative `eth_calls` are a valuable subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. +Declarative `eth_calls` are a valuable Subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. This feature does the following: -- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the subgraph's overall efficiency. +- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the Subgraph's overall efficiency. - Allows faster data fetching, resulting in quicker query responses and a better user experience. - Reduces wait times for applications that need to aggregate data from multiple Ethereum calls, making the data retrieval process more efficient. @@ -474,7 +474,7 @@ This feature does the following: #### Scenario without Declarative `eth_calls` -Imagine you have a subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. +Imagine you have a Subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. Traditionally, these calls might be made sequentially: @@ -498,15 +498,15 @@ Total time taken = max (3, 2, 4) = 4 seconds #### How it Works -1. Declarative Definition: In the subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. +1. Declarative Definition: In the Subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. 2. Parallel Execution Engine: The Graph Node's execution engine recognizes these declarations and runs the calls simultaneously. -3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the subgraph for further processing. +3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the Subgraph for further processing. #### Example Configuration in Subgraph Manifest Declared `eth_calls` can access the `event.address` of the underlying event as well as all the `event.params`. -`Subgraph.yaml` using `event.address`: +`subgraph.yaml` using `event.address`: ```yaml eventHandlers: @@ -524,7 +524,7 @@ Details for the example above: - The text (`Pool[event.address].feeGrowthGlobal0X128()`) is the actual `eth_call` that will be executed, which is in the form of `Contract[address].function(arguments)` - The `address` and `arguments` can be replaced with variables that will be available when the handler is executed. -`Subgraph.yaml` using `event.params` +`subgraph.yaml` using `event.params` ```yaml calls: @@ -535,22 +535,22 @@ calls: > **Note:** it is not recommended to use grafting when initially upgrading to The Graph Network. Learn more [here](/subgraphs/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). -When a subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. +When a Subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing Subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing Subgraph working again after it has failed. -A subgraph is grafted onto a base subgraph when the subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: +A Subgraph is grafted onto a base Subgraph when the Subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: ```yaml description: ... graft: - base: Qm... # Subgraph ID of base subgraph + base: Qm... # Subgraph ID of base Subgraph block: 7345624 # Block number ``` -When a subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` subgraph up to and including the given `block` and then continue indexing the new subgraph from that block on. The base subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted subgraph. +When a Subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` Subgraph up to and including the given `block` and then continue indexing the new Subgraph from that block on. The base Subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted Subgraph. -Eftersom ympning kopierar data istället för att indexera basdata går det mycket snabbare att få delgrafen till det önskade blocket än att indexera från början, även om den initiala datorkopieringen fortfarande kan ta flera timmar för mycket stora delgrafer. Medan den ympade delgrafen initialiseras kommer Graph Node att logga information om de entitetstyper som redan har kopierats. +Because grafting copies rather than indexes base data, it is much quicker to get the Subgraph to the desired block than indexing from scratch, though the initial data copy can still take several hours for very large Subgraphs. While the grafted Subgraph is being initialized, the Graph Node will log information about the entity types that have already been copied. -Den ympade subgrafen kan använda ett GraphQL-schema som inte är identiskt med det i bas subgrafen, utan bara är kompatibelt med det. Det måste vara ett giltigt subgraf schema i sig, men kan avvika från bas undergrafens schema på följande sätt: +The grafted Subgraph can use a GraphQL schema that is not identical to the one of the base Subgraph, but merely compatible with it. It has to be a valid Subgraph schema in its own right, but may deviate from the base Subgraph's schema in the following ways: - Den lägger till eller tar bort entitetstyper - Det tar bort attribut från entitetstyper @@ -560,4 +560,4 @@ Den ympade subgrafen kan använda ett GraphQL-schema som inte är identiskt med - Den lägger till eller tar bort gränssnitt - Det ändrar för vilka entitetstyper ett gränssnitt implementeras -> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the subgraph manifest. +> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the Subgraph manifest. From 5d88793994e160189b7c26fafa5618794bd62cc5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:10:23 -0500 Subject: [PATCH 0132/1789] New translations advanced.mdx (Turkish) --- .../developing/creating/advanced.mdx | 80 +++++++++---------- 1 file changed, 40 insertions(+), 40 deletions(-) diff --git a/website/src/pages/tr/subgraphs/developing/creating/advanced.mdx b/website/src/pages/tr/subgraphs/developing/creating/advanced.mdx index 980b0069c3e9..b60e8af06703 100644 --- a/website/src/pages/tr/subgraphs/developing/creating/advanced.mdx +++ b/website/src/pages/tr/subgraphs/developing/creating/advanced.mdx @@ -4,9 +4,9 @@ title: Advanced Subgraph Features ## Genel Bakış -Add and implement advanced subgraph features to enhanced your subgraph's built. +Add and implement advanced Subgraph features to enhanced your Subgraph's built. -Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: +Starting from `specVersion` `0.0.4`, Subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: | Feature | Name | | ---------------------------------------------------- | ---------------- | @@ -14,7 +14,7 @@ Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declar | [Full-text Search](#defining-fulltext-search-fields) | `fullTextSearch` | | [Grafting](#grafting-onto-existing-subgraphs) | `grafting` | -For instance, if a subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: +For instance, if a Subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: ```yaml specVersion: 0.0.4 @@ -25,7 +25,7 @@ features: dataSources: ... ``` -> Note that using a feature without declaring it will incur a **validation error** during subgraph deployment, but no errors will occur if a feature is declared but not used. +> Note that using a feature without declaring it will incur a **validation error** during Subgraph deployment, but no errors will occur if a feature is declared but not used. ## Zaman Serileri ve Toplulaştırmalar @@ -33,9 +33,9 @@ Prerequisites: - Subgraph specVersion must be ≥1.1.0. -Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, and more. +Timeseries and aggregations enable your Subgraph to track statistics like daily average price, hourly total transfers, and more. -This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. +This feature introduces two new types of Subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. ### Örnek Şema @@ -97,11 +97,11 @@ Aggregation entities are automatically calculated on the basis of the specified ## Ölümcül Olmayan Hatalar -Halihazırda senkronize edilmiş subgraphlarda indeksleme hataları varsayılan olarak subgraph başarısız olmasına ve senkronizasyonun durmasına neden olur. Hatalara rağmen senkronizasyonun devam etmesi için subgraphlar, hata tetikleyen işleyicinin yapılan değişikliklerini yok sayarak yapılandırılabilir. Bu, subgraph yazarlarının subgraphlarını düzeltmeleri için zaman kazandırırken, sorguların en son blokta sunulmaya devam etmesini sağlar, ancak hata nedeniyle sonuçlar tutarsız olabilir. Bazı hatalar hala her zaman ölümcül olacaktır. Ölümcül olmaması için hatanın belirlenmiş olması gerekmektedir. +Indexing errors on already synced Subgraphs will, by default, cause the Subgraph to fail and stop syncing. Subgraphs can alternatively be configured to continue syncing in the presence of errors, by ignoring the changes made by the handler which provoked the error. This gives Subgraph authors time to correct their Subgraphs while queries continue to be served against the latest block, though the results might be inconsistent due to the bug that caused the error. Note that some errors are still always fatal. To be non-fatal, the error must be known to be deterministic. -> **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy subgraphs using that functionality to the network via the Studio. +> **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy Subgraphs using that functionality to the network via the Studio. -Ölümcül olmayan hataların etkinleştirilmesi, subgraph manifestinde aşağıdaki özellik bayrağının ayarlanmasını gerektirir: +Enabling non-fatal errors requires setting the following feature flag on the Subgraph manifest: ```yaml specVersion: 0.0.4 @@ -111,7 +111,7 @@ features: ... ``` -The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the subgraph has skipped over errors, as in the example: +The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the Subgraph has skipped over errors, as in the example: ```graphql foos(first: 100, subgraphError: allow) { @@ -123,7 +123,7 @@ _meta { } ``` -If the subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: +If the Subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: ```graphql "data": { @@ -145,7 +145,7 @@ If the subgraph encounters an error, that query will return both the data and a ## IPFS/Arweave Dosya Veri Kaynakları -Dosya veri kaynakları, indeksleme sırasında zincir dışı verilere sağlam ve genişletilebilir bir şekilde erişmek için yeni bir subgraph fonksiyonudur. Dosya veri kaynakları IPFS'den ve Arweave'den dosya getirmeyi desteklemektedir. +File data sources are a new Subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. > Bu aynı zamanda zincir dışı verilerinin belirlenebilir indekslenmesi için zemin hazırlar ve keyfi HTTP kaynaklı verilerin tanıtılma potansiyelini de beraberinde getirir. @@ -290,7 +290,7 @@ For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave b import { TokenMetadata as TokenMetadataTemplate } from '../generated/templates' const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' -//Bu örnek kod, bir Crypto coven subgraph'ı içindir. Yukarıdaki ipfs hash'ı, tüm kripto NFT'leri için token üst verilerine sahip bir dizindir. +//This example code is for a Crypto coven Subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. export function handleTransfer(event: TransferEvent): void { let token = Token.load(event.params.tokenId.toString()) @@ -300,7 +300,7 @@ export function handleTransfer(event: TransferEvent): void { token.tokenURI = '/' + event.params.tokenId.toString() + '.json' const tokenIpfsHash = ipfshash + token.tokenURI - //Bu, tek bir Crypto coven NFT için üst verilere giden bir yol oluşturur. Dizini "/" + dosya adı + ".json" ile birleştirir. + //This creates a path to the metadata for a single Crypto coven NFT. It concats the directory with "/" + filename + ".json" token.ipfsURI = tokenIpfsHash @@ -317,23 +317,23 @@ Bu, Graph Düğümü'nün yapılandırılmış IPFS veya Arweave uç noktasını This example is using the CID as the lookup between the parent `Token` entity and the resulting `TokenMetadata` entity. -> Previously, this is the point at which a subgraph developer would have called `ipfs.cat(CID)` to fetch the file +> Previously, this is the point at which a Subgraph developer would have called `ipfs.cat(CID)` to fetch the file Tebrikler, dosya veri kaynaklarını kullanıyorsunuz! -#### Subgraph'ınızı dağıtma +#### Deploying your Subgraphs -You can now `build` and `deploy` your subgraph to any Graph Node >=v0.30.0-rc.0. +You can now `build` and `deploy` your Subgraph to any Graph Node >=v0.30.0-rc.0. #### Sınırlamalar -Dosya veri kaynağı işleyicileri ve varlıkları yürütüldüklerinde belirleyici olmaları ve zincir tabanlı veri kaynaklarının bozulmasını önlemeleri için, diğer subgraph varlıklarından izole edilir,. Açıkça şunlardır: +File data source handlers and entities are isolated from other Subgraph entities, ensuring that they are deterministic when executed, and ensuring no contamination of chain-based data sources. To be specific: - Dosya Veri Kaynakları tarafından oluşturulan varlıklar değiştirilemez ve güncellenemez - Dosya Veri Kaynağı işleyicileri, diğer dosya veri kaynaklarından varlıklara erişemez - Dosya Veri Kaynaklarıyla ilişkili varlıklara zincir tabanlı işleyicilerden erişilemez -> Bu kısıtlama çoğu kullanım durumu için sorun oluşturmamalıdır, ancak bazı durumlarda karmaşıklıklığa sebep olabilir. Dosya tabanlı verilerinizi bir subgraph'ta modellemekte zorluk yaşarsanız, lütfen Discord üzerinden bizimle iletişime geçin! +> While this constraint should not be problematic for most use-cases, it may introduce complexity for some. Please get in touch via Discord if you are having issues modelling your file-based data in a Subgraph! Ek olarak, zincir üstü bir veri kaynağı veya başka bir dosya veri kaynağı olsun, bir dosya veri kaynağından veri kaynakları oluşturmak mümkün değildir. Bu kısıtlama gelecekte kaldırılabilir. @@ -365,15 +365,15 @@ Handlers for File Data Sources cannot be in files which import `eth_call` contra > **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0` -Konu filtreleri veya endekslenmiş argüman filtreleri olarak da bilinen bu özellik, subgraph'lerin endekslenmiş argümanlarının değerlerine göre blok zinciri olaylarını hassas bir şekilde filtrelemesine olanak tanır. +Topic filters, also known as indexed argument filters, are a powerful feature in Subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. -- Bu filtreler, blokzincirindeki büyük olay akışından ilgilenilen belirli olayları izole etmeye yardımcı olarak, subgraph'lerin yalnızca alakalı verilere odaklanmasını ve böylece daha verimli çalışmasını sağlar. +- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing Subgraphs to operate more efficiently by focusing only on relevant data. -- Bu özellik, belirli adresleri ve bunların blokzincirindeki çeşitli akıllı sözleşmelerle olan etkileşimlerini izleyen kişisel subgraph'ler oluşturmak için faydalıdır. +- This is useful for creating personal Subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. ### Konu Filtreleri Nasıl Çalışır -Bir akıllı sözleşme olay yaydığında, endekslenmiş olarak işaretlenen tüm argümanlar bir subgraph'in manifestosunda filtre olarak kullanılabilir. Bu durum, subgraph'in yalnızca ilgili endekslenmiş argümanlara uyan olayları dinleyip diğerlerini görmezden gelmesini sağlar. +When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a Subgraph's manifest. This allows the Subgraph to listen selectively for events that match these indexed arguments. - The event's first indexed argument corresponds to `topic1`, the second to `topic2`, and so on, up to `topic3`, since the Ethereum Virtual Machine (EVM) allows up to three indexed arguments per event. @@ -401,7 +401,7 @@ Bu örnekte: #### Subgraph'lerde Yapılandırma -Konu filtreleri, subgraph manifestosunda doğrudan olay işleyici yapılandırması içinde tanımlanır. Yapılandırma örneği: +Topic filters are defined directly within the event handler configuration in the Subgraph manifest. Here is how they are configured: ```yaml eventHandlers: @@ -436,7 +436,7 @@ Bu konfigürasyonda: - `topic1` is configured to filter `Transfer` events where `0xAddressA` is the sender. - `topic2` is configured to filter `Transfer` events where `0xAddressB` is the receiver. -- The subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. +- The Subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. #### Örnek 2: İki veya Daha Fazla Adres Arasında Her İki Yönde Gerçekleşen İşlemleri Takip Etme @@ -452,17 +452,17 @@ Bu konfigürasyonda: - `topic1` is configured to filter `Transfer` events where `0xAddressA`, `0xAddressB`, `0xAddressC` is the sender. - `topic2` is configured to filter `Transfer` events where `0xAddressB` and `0xAddressC` is the receiver. -- Subgraph, birden fazla adres arasında her iki yönde gerçekleşen işlemleri endeksleyerek tüm adresleri içeren etkileşimlerin kapsamlı bir şekilde izlenmesini sağlar. +- The Subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. ## Deklare edilmiş eth_call > Not: Bu, henüz stabil bir Graph Düğümü sürümünde mevcut olmayan deneysel bir özelliktir. Yalnızca Subgraph Studio'da veya sağlayıcılığını kendiniz yaptığınız düğümünüzde kullanabilirsiniz. -Declarative `eth_calls` are a valuable subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. +Declarative `eth_calls` are a valuable Subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. Bu özellik: -- Ethereum blokzincirinden veri getirme performansını önemli ölçüde artırır. Bunu birden fazla çağrı için toplam süreyi azaltarak ve subgraph'in genel verimliliğini optimize ederek yapar. +- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the Subgraph's overall efficiency. - Daha hızlı veri çekmeye olanak tanıyarak, daha hızlı sorgu yanıtları alınmasını ve daha iyi bir kullanıcı deneyimi sağlar. - Birden fazla Ethereum çağrısından veri toplaması gereken uygulamalar için bekleme sürelerini azaltarak veri çekme sürecini daha verimli hale getirir. @@ -474,7 +474,7 @@ Bu özellik: #### Scenario without Declarative `eth_calls` -Bir kullanıcının işlemleri, bakiyesi ve token varlıkları hakkında veri almak için üç Ethereum çağrısı yapması gereken bir subgraph'iniz olduğunu düşünün. +Imagine you have a Subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. Geleneksel olarak, bu çağrılar ardışık olarak yapılabilir: @@ -498,15 +498,15 @@ Toplam süre = max (3, 2, 4) = 4 saniye #### Nasıl Çalışır -1. Bildirimsel Tanım: Subgraph manifestosunda, Ethereum çağrılarını paralel olarak çalıştırılabilecek şekilde tanımlarsınız. +1. Declarative Definition: In the Subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. 2. Paralel Çalıştırma Motoru: Graph Düğümü'nün yürütme motoru bu bildirimleri tanır ve çağrıları aynı anda çalıştırır. -3. Sonuçların Birleştirilmesi: Tüm çağrılar tamamlandığında, sonuçlar birleştirilir ve sonraki işlemler için subgraph tarafından kullanılır. +3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the Subgraph for further processing. #### Subgraph Manifestosunda Örnek Yapılandırma Declared `eth_calls` can access the `event.address` of the underlying event as well as all the `event.params`. -`Subgraph.yaml` using `event.address`: +`subgraph.yaml` using `event.address`: ```yaml eventHandlers: @@ -524,7 +524,7 @@ Yukarıdaki örnek için detaylar: - The text (`Pool[event.address].feeGrowthGlobal0X128()`) is the actual `eth_call` that will be executed, which is in the form of `Contract[address].function(arguments)` - The `address` and `arguments` can be replaced with variables that will be available when the handler is executed. -`Subgraph.yaml` using `event.params` +`subgraph.yaml` using `event.params` ```yaml calls: @@ -535,22 +535,22 @@ calls: > **Note:** it is not recommended to use grafting when initially upgrading to The Graph Network. Learn more [here](/subgraphs/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). -When a subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. +When a Subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing Subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing Subgraph working again after it has failed. -A subgraph is grafted onto a base subgraph when the subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: +A Subgraph is grafted onto a base Subgraph when the Subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: ```yaml description: ... graft: - base: Qm... # Subgraph ID of base subgraph + base: Qm... # Subgraph ID of base Subgraph block: 7345624 # Block number ``` -When a subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` subgraph up to and including the given `block` and then continue indexing the new subgraph from that block on. The base subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted subgraph. +When a Subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` Subgraph up to and including the given `block` and then continue indexing the new Subgraph from that block on. The base Subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted Subgraph. -Graftlama, temel verileri indekslemek yerine kopyaladığından, subgraph'ı istenen bloğa getirmek sıfırdan indekslemeye nazaran çok daha hızlıdır, ancak ilk veri kopyası çok büyük subgraphlar için yine birkaç saat sürebilir. Graftlanmış subgraph başlatılırken, Graph Düğümü halihazırda kopyalanmış olan varlık türleri hakkında bilgileri kaydedecektir. +Because grafting copies rather than indexes base data, it is much quicker to get the Subgraph to the desired block than indexing from scratch, though the initial data copy can still take several hours for very large Subgraphs. While the grafted Subgraph is being initialized, the Graph Node will log information about the entity types that have already been copied. -Graftlanan subgraph, temel subgraphla tamamen aynı olmayan, ancak onunla uyumlu olan bir GraphQL şeması kullanabilir. Kendi başına geçerli bir subgraph şeması olmalıdır, ancak şu şekillerde temel subgraph şemasından sapabilir: +The grafted Subgraph can use a GraphQL schema that is not identical to the one of the base Subgraph, but merely compatible with it. It has to be a valid Subgraph schema in its own right, but may deviate from the base Subgraph's schema in the following ways: - Varlık türlerini ekler veya kaldırır - Varlık türlerinden öznitelikleri kaldırır @@ -560,4 +560,4 @@ Graftlanan subgraph, temel subgraphla tamamen aynı olmayan, ancak onunla uyumlu - Arayüzleri ekler veya kaldırır - Arayüzün hangi varlık türleri için uygulandığını değiştirir -> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the subgraph manifest. +> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the Subgraph manifest. From c28c97d20a07a0321496920bb5798805f0fac9f9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:10:24 -0500 Subject: [PATCH 0133/1789] New translations advanced.mdx (Ukrainian) --- .../developing/creating/advanced.mdx | 78 +++++++++---------- 1 file changed, 39 insertions(+), 39 deletions(-) diff --git a/website/src/pages/uk/subgraphs/developing/creating/advanced.mdx b/website/src/pages/uk/subgraphs/developing/creating/advanced.mdx index 7614511a5617..d65a9c6b37d1 100644 --- a/website/src/pages/uk/subgraphs/developing/creating/advanced.mdx +++ b/website/src/pages/uk/subgraphs/developing/creating/advanced.mdx @@ -4,9 +4,9 @@ title: Advanced Subgraph Features ## Overview -Add and implement advanced subgraph features to enhanced your subgraph's built. +Add and implement advanced Subgraph features to enhanced your Subgraph's built. -Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: +Starting from `specVersion` `0.0.4`, Subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: | Feature | Name | | ---------------------------------------------------- | ---------------- | @@ -14,7 +14,7 @@ Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declar | [Full-text Search](#defining-fulltext-search-fields) | `fullTextSearch` | | [Grafting](#grafting-onto-existing-subgraphs) | `grafting` | -For instance, if a subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: +For instance, if a Subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: ```yaml specVersion: 0.0.4 @@ -25,7 +25,7 @@ features: dataSources: ... ``` -> Note that using a feature without declaring it will incur a **validation error** during subgraph deployment, but no errors will occur if a feature is declared but not used. +> Note that using a feature without declaring it will incur a **validation error** during Subgraph deployment, but no errors will occur if a feature is declared but not used. ## Timeseries and Aggregations @@ -33,9 +33,9 @@ Prerequisites: - Subgraph specVersion must be ≥1.1.0. -Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, and more. +Timeseries and aggregations enable your Subgraph to track statistics like daily average price, hourly total transfers, and more. -This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. +This feature introduces two new types of Subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. ### Example Schema @@ -97,11 +97,11 @@ Aggregation entities are automatically calculated on the basis of the specified ## Non-fatal errors -Indexing errors on already synced subgraphs will, by default, cause the subgraph to fail and stop syncing. Subgraphs can alternatively be configured to continue syncing in the presence of errors, by ignoring the changes made by the handler which provoked the error. This gives subgraph authors time to correct their subgraphs while queries continue to be served against the latest block, though the results might be inconsistent due to the bug that caused the error. Note that some errors are still always fatal. To be non-fatal, the error must be known to be deterministic. +Indexing errors on already synced Subgraphs will, by default, cause the Subgraph to fail and stop syncing. Subgraphs can alternatively be configured to continue syncing in the presence of errors, by ignoring the changes made by the handler which provoked the error. This gives Subgraph authors time to correct their Subgraphs while queries continue to be served against the latest block, though the results might be inconsistent due to the bug that caused the error. Note that some errors are still always fatal. To be non-fatal, the error must be known to be deterministic. -> **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy subgraphs using that functionality to the network via the Studio. +> **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy Subgraphs using that functionality to the network via the Studio. -Enabling non-fatal errors requires setting the following feature flag on the subgraph manifest: +Enabling non-fatal errors requires setting the following feature flag on the Subgraph manifest: ```yaml specVersion: 0.0.4 @@ -111,7 +111,7 @@ features: ... ``` -The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the subgraph has skipped over errors, as in the example: +The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the Subgraph has skipped over errors, as in the example: ```graphql foos(first: 100, subgraphError: allow) { @@ -123,7 +123,7 @@ _meta { } ``` -If the subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: +If the Subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: ```graphql "data": { @@ -145,7 +145,7 @@ If the subgraph encounters an error, that query will return both the data and a ## IPFS/Arweave File Data Sources -File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. +File data sources are a new Subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. > This also lays the groundwork for deterministic indexing of off-chain data, as well as the potential introduction of arbitrary HTTP-sourced data. @@ -290,7 +290,7 @@ Example: import { TokenMetadata as TokenMetadataTemplate } from '../generated/templates' const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' -//This example code is for a Crypto coven subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. +//This example code is for a Crypto coven Subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. export function handleTransfer(event: TransferEvent): void { let token = Token.load(event.params.tokenId.toString()) @@ -317,23 +317,23 @@ This will create a new file data source, which will poll Graph Node's configured This example is using the CID as the lookup between the parent `Token` entity and the resulting `TokenMetadata` entity. -> Previously, this is the point at which a subgraph developer would have called `ipfs.cat(CID)` to fetch the file +> Previously, this is the point at which a Subgraph developer would have called `ipfs.cat(CID)` to fetch the file Congratulations, you are using file data sources! -#### Deploying your subgraphs +#### Deploying your Subgraphs -You can now `build` and `deploy` your subgraph to any Graph Node >=v0.30.0-rc.0. +You can now `build` and `deploy` your Subgraph to any Graph Node >=v0.30.0-rc.0. #### Limitations -File data source handlers and entities are isolated from other subgraph entities, ensuring that they are deterministic when executed, and ensuring no contamination of chain-based data sources. To be specific: +File data source handlers and entities are isolated from other Subgraph entities, ensuring that they are deterministic when executed, and ensuring no contamination of chain-based data sources. To be specific: - Entities created by File Data Sources are immutable, and cannot be updated - File Data Source handlers cannot access entities from other file data sources - Entities associated with File Data Sources cannot be accessed by chain-based handlers -> While this constraint should not be problematic for most use-cases, it may introduce complexity for some. Please get in touch via Discord if you are having issues modelling your file-based data in a subgraph! +> While this constraint should not be problematic for most use-cases, it may introduce complexity for some. Please get in touch via Discord if you are having issues modelling your file-based data in a Subgraph! Additionally, it is not possible to create data sources from a file data source, be it an onchain data source or another file data source. This restriction may be lifted in the future. @@ -365,15 +365,15 @@ Handlers for File Data Sources cannot be in files which import `eth_call` contra > **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0` -Topic filters, also known as indexed argument filters, are a powerful feature in subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. +Topic filters, also known as indexed argument filters, are a powerful feature in Subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. -- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing subgraphs to operate more efficiently by focusing only on relevant data. +- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing Subgraphs to operate more efficiently by focusing only on relevant data. -- This is useful for creating personal subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. +- This is useful for creating personal Subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. ### How Topic Filters Work -When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a subgraph's manifest. This allows the subgraph to listen selectively for events that match these indexed arguments. +When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a Subgraph's manifest. This allows the Subgraph to listen selectively for events that match these indexed arguments. - The event's first indexed argument corresponds to `topic1`, the second to `topic2`, and so on, up to `topic3`, since the Ethereum Virtual Machine (EVM) allows up to three indexed arguments per event. @@ -401,7 +401,7 @@ In this example: #### Configuration in Subgraphs -Topic filters are defined directly within the event handler configuration in the subgraph manifest. Here is how they are configured: +Topic filters are defined directly within the event handler configuration in the Subgraph manifest. Here is how they are configured: ```yaml eventHandlers: @@ -436,7 +436,7 @@ In this configuration: - `topic1` is configured to filter `Transfer` events where `0xAddressA` is the sender. - `topic2` is configured to filter `Transfer` events where `0xAddressB` is the receiver. -- The subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. +- The Subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. #### Example 2: Tracking Transactions in Either Direction Between Two or More Addresses @@ -452,17 +452,17 @@ In this configuration: - `topic1` is configured to filter `Transfer` events where `0xAddressA`, `0xAddressB`, `0xAddressC` is the sender. - `topic2` is configured to filter `Transfer` events where `0xAddressB` and `0xAddressC` is the receiver. -- The subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. +- The Subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. ## Declared eth_call > Note: This is an experimental feature that is not currently available in a stable Graph Node release yet. You can only use it in Subgraph Studio or your self-hosted node. -Declarative `eth_calls` are a valuable subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. +Declarative `eth_calls` are a valuable Subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. This feature does the following: -- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the subgraph's overall efficiency. +- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the Subgraph's overall efficiency. - Allows faster data fetching, resulting in quicker query responses and a better user experience. - Reduces wait times for applications that need to aggregate data from multiple Ethereum calls, making the data retrieval process more efficient. @@ -474,7 +474,7 @@ This feature does the following: #### Scenario without Declarative `eth_calls` -Imagine you have a subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. +Imagine you have a Subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. Traditionally, these calls might be made sequentially: @@ -498,15 +498,15 @@ Total time taken = max (3, 2, 4) = 4 seconds #### How it Works -1. Declarative Definition: In the subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. +1. Declarative Definition: In the Subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. 2. Parallel Execution Engine: The Graph Node's execution engine recognizes these declarations and runs the calls simultaneously. -3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the subgraph for further processing. +3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the Subgraph for further processing. #### Example Configuration in Subgraph Manifest Declared `eth_calls` can access the `event.address` of the underlying event as well as all the `event.params`. -`Subgraph.yaml` using `event.address`: +`subgraph.yaml` using `event.address`: ```yaml eventHandlers: @@ -524,7 +524,7 @@ Details for the example above: - The text (`Pool[event.address].feeGrowthGlobal0X128()`) is the actual `eth_call` that will be executed, which is in the form of `Contract[address].function(arguments)` - The `address` and `arguments` can be replaced with variables that will be available when the handler is executed. -`Subgraph.yaml` using `event.params` +`subgraph.yaml` using `event.params` ```yaml calls: @@ -535,22 +535,22 @@ calls: > **Note:** it is not recommended to use grafting when initially upgrading to The Graph Network. Learn more [here](/subgraphs/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). -When a subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. +When a Subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing Subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing Subgraph working again after it has failed. -A subgraph is grafted onto a base subgraph when the subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: +A Subgraph is grafted onto a base Subgraph when the Subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: ```yaml description: ... graft: - base: Qm... # Subgraph ID of base subgraph + base: Qm... # Subgraph ID of base Subgraph block: 7345624 # Block number ``` -When a subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` subgraph up to and including the given `block` and then continue indexing the new subgraph from that block on. The base subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted subgraph. +When a Subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` Subgraph up to and including the given `block` and then continue indexing the new Subgraph from that block on. The base Subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted Subgraph. -Because grafting copies rather than indexes base data, it is much quicker to get the subgraph to the desired block than indexing from scratch, though the initial data copy can still take several hours for very large subgraphs. While the grafted subgraph is being initialized, the Graph Node will log information about the entity types that have already been copied. +Because grafting copies rather than indexes base data, it is much quicker to get the Subgraph to the desired block than indexing from scratch, though the initial data copy can still take several hours for very large Subgraphs. While the grafted Subgraph is being initialized, the Graph Node will log information about the entity types that have already been copied. -Підграф, утворений в результаті може використовувати схему GraphQL, яка не є ідентичною схемі базового підграфа, а лише сумісною з нею. Вона повинна бути валідною схемою підграфа сама по собі, але може відхилятися від схеми базового підграфа у такому випадку: +The grafted Subgraph can use a GraphQL schema that is not identical to the one of the base Subgraph, but merely compatible with it. It has to be a valid Subgraph schema in its own right, but may deviate from the base Subgraph's schema in the following ways: - Додає або видаляє типи елементів - Видаляє атрибути з типів елементів @@ -560,4 +560,4 @@ Because grafting copies rather than indexes base data, it is much quicker to get - Додає або видаляє інтерфейси - Визначає, для яких типів елементів реалізовано інтерфейс -> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the subgraph manifest. +> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the Subgraph manifest. From 04e56f43c7d4f6f7fbab0a52a05fdb045ca2d9f5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:10:25 -0500 Subject: [PATCH 0134/1789] New translations advanced.mdx (Chinese Simplified) --- .../developing/creating/advanced.mdx | 104 +++++++++--------- 1 file changed, 52 insertions(+), 52 deletions(-) diff --git a/website/src/pages/zh/subgraphs/developing/creating/advanced.mdx b/website/src/pages/zh/subgraphs/developing/creating/advanced.mdx index 5f86707e106c..dd770836b1e8 100644 --- a/website/src/pages/zh/subgraphs/developing/creating/advanced.mdx +++ b/website/src/pages/zh/subgraphs/developing/creating/advanced.mdx @@ -2,11 +2,11 @@ title: Advanced Subgraph Features --- -## 概述 +## Overview -Add and implement advanced subgraph features to enhanced your subgraph's built. +Add and implement advanced Subgraph features to enhanced your Subgraph's built. -Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: +Starting from `specVersion` `0.0.4`, Subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: | Feature | Name | | ---------------------------------------------------- | ---------------- | @@ -14,7 +14,7 @@ Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declar | [Full-text Search](#defining-fulltext-search-fields) | `fullTextSearch` | | [Grafting](#grafting-onto-existing-subgraphs) | `grafting` | -For instance, if a subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: +For instance, if a Subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: ```yaml specVersion: 0.0.4 @@ -25,7 +25,7 @@ features: dataSources: ... ``` -> Note that using a feature without declaring it will incur a **validation error** during subgraph deployment, but no errors will occur if a feature is declared but not used. +> Note that using a feature without declaring it will incur a **validation error** during Subgraph deployment, but no errors will occur if a feature is declared but not used. ## Timeseries and Aggregations @@ -33,9 +33,9 @@ Prerequisites: - Subgraph specVersion must be ≥1.1.0. -Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, and more. +Timeseries and aggregations enable your Subgraph to track statistics like daily average price, hourly total transfers, and more. -This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. +This feature introduces two new types of Subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. ### Example Schema @@ -97,11 +97,11 @@ Aggregation entities are automatically calculated on the basis of the specified ## 非致命错误 -在默认情况下,已同步子图上的索引错误会导致子图失败并停止同步。 子图也可以配置为忽略引发错误的处理程序所做的更改, 在出现错误时继续同步。 这使子图作者有时间更正他们的子图,同时继续针对最新区块提供查询,尽管由于导致错误的代码问题,结果可能会不一致。 请注意,某些错误仍然总是致命的,要成为非致命错误,首先需要确定相应的错误是确定性的错误。 +Indexing errors on already synced Subgraphs will, by default, cause the Subgraph to fail and stop syncing. Subgraphs can alternatively be configured to continue syncing in the presence of errors, by ignoring the changes made by the handler which provoked the error. This gives Subgraph authors time to correct their Subgraphs while queries continue to be served against the latest block, though the results might be inconsistent due to the bug that caused the error. Note that some errors are still always fatal. To be non-fatal, the error must be known to be deterministic. -> **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy subgraphs using that functionality to the network via the Studio. +> **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy Subgraphs using that functionality to the network via the Studio. -启用非致命错误需要在子图清单上设置以下功能标志: +Enabling non-fatal errors requires setting the following feature flag on the Subgraph manifest: ```yaml specVersion: 0.0.4 @@ -111,7 +111,7 @@ features: ... ``` -The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the subgraph has skipped over errors, as in the example: +The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the Subgraph has skipped over errors, as in the example: ```graphql foos(first: 100, subgraphError: allow) { @@ -123,7 +123,7 @@ _meta { } ``` -If the subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: +If the Subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: ```graphql "data": { @@ -145,11 +145,11 @@ If the subgraph encounters an error, that query will return both the data and a ## IPFS/Arweave File Data Sources -文件数据源是一种新的子图功能,用于以稳健、可扩展的方式在索引期间访问链下数据。文件数据源支持从IPFS和Arweave获取文件。 +File data sources are a new Subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. > 这也为链外数据的确定性索引以及引入任意HTTP源数据奠定了基础。 -### 概述 +### Overview Rather than fetching files "in line" during handler execution, this introduces templates which can be spawned as new data sources for a given file identifier. These new data sources fetch the files, retrying if they are unsuccessful, running a dedicated handler when the file is found. @@ -290,7 +290,7 @@ For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave b import { TokenMetadata as TokenMetadataTemplate } from '../generated/templates' const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' -//This example code is for a Crypto coven subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. +//This example code is for a Crypto coven Subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. export function handleTransfer(event: TransferEvent): void { let token = Token.load(event.params.tokenId.toString()) @@ -317,23 +317,23 @@ export function handleTransfer(event: TransferEvent): void { This example is using the CID as the lookup between the parent `Token` entity and the resulting `TokenMetadata` entity. -> Previously, this is the point at which a subgraph developer would have called `ipfs.cat(CID)` to fetch the file +> Previously, this is the point at which a Subgraph developer would have called `ipfs.cat(CID)` to fetch the file 祝贺您,您正在使用文件数据源! -#### 将你的子图部署 +#### Deploying your Subgraphs -You can now `build` and `deploy` your subgraph to any Graph Node >=v0.30.0-rc.0. +You can now `build` and `deploy` your Subgraph to any Graph Node >=v0.30.0-rc.0. #### 限制 -文件数据源处理程序和实体与其他子图实体隔离,确保它们在执行时是确定的,并确保基于链的数据源不受污染。具体来说: +File data source handlers and entities are isolated from other Subgraph entities, ensuring that they are deterministic when executed, and ensuring no contamination of chain-based data sources. To be specific: - 文件数据源创建的实体是不可变的,不能更新 - 文件数据源处理程序无法访问其他文件数据源中的实体 - 基于链的处理程序无法访问与文件数据源关联的实体 -> 虽然这个约束对于大多数用例不应该是有问题的,但是对于某些用例,它可能会引入复杂性。如果您在子图中基于文件数据建模时遇到问题,请通过 Discord 与我们联系! +> While this constraint should not be problematic for most use-cases, it may introduce complexity for some. Please get in touch via Discord if you are having issues modelling your file-based data in a Subgraph! 此外,不可能从文件数据源创建数据源,无论是线上数据源还是其他文件数据源。这项限制将来可能会取消。 @@ -365,15 +365,15 @@ Handlers for File Data Sources cannot be in files which import `eth_call` contra > **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0` -Topic filters, also known as indexed argument filters, are a powerful feature in subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. +Topic filters, also known as indexed argument filters, are a powerful feature in Subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. -- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing subgraphs to operate more efficiently by focusing only on relevant data. +- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing Subgraphs to operate more efficiently by focusing only on relevant data. -- This is useful for creating personal subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. +- This is useful for creating personal Subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. ### How Topic Filters Work -When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a subgraph's manifest. This allows the subgraph to listen selectively for events that match these indexed arguments. +When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a Subgraph's manifest. This allows the Subgraph to listen selectively for events that match these indexed arguments. - The event's first indexed argument corresponds to `topic1`, the second to `topic2`, and so on, up to `topic3`, since the Ethereum Virtual Machine (EVM) allows up to three indexed arguments per event. @@ -401,7 +401,7 @@ In this example: #### Configuration in Subgraphs -Topic filters are defined directly within the event handler configuration in the subgraph manifest. Here is how they are configured: +Topic filters are defined directly within the event handler configuration in the Subgraph manifest. Here is how they are configured: ```yaml eventHandlers: @@ -436,7 +436,7 @@ In this configuration: - `topic1` is configured to filter `Transfer` events where `0xAddressA` is the sender. - `topic2` is configured to filter `Transfer` events where `0xAddressB` is the receiver. -- The subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. +- The Subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. #### Example 2: Tracking Transactions in Either Direction Between Two or More Addresses @@ -452,17 +452,17 @@ In this configuration: - `topic1` is configured to filter `Transfer` events where `0xAddressA`, `0xAddressB`, `0xAddressC` is the sender. - `topic2` is configured to filter `Transfer` events where `0xAddressB` and `0xAddressC` is the receiver. -- The subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. +- The Subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. ## Declared eth_call > Note: This is an experimental feature that is not currently available in a stable Graph Node release yet. You can only use it in Subgraph Studio or your self-hosted node. -Declarative `eth_calls` are a valuable subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. +Declarative `eth_calls` are a valuable Subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. This feature does the following: -- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the subgraph's overall efficiency. +- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the Subgraph's overall efficiency. - Allows faster data fetching, resulting in quicker query responses and a better user experience. - Reduces wait times for applications that need to aggregate data from multiple Ethereum calls, making the data retrieval process more efficient. @@ -474,13 +474,13 @@ This feature does the following: #### Scenario without Declarative `eth_calls` -Imagine you have a subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. +Imagine you have a Subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. Traditionally, these calls might be made sequentially: -1. Call 1 (Transactions): Takes 3 seconds +1. 调用 1 (交易): 需要3 秒 2. Call 2 (Balance): Takes 2 seconds -3. Call 3 (Token Holdings): Takes 4 seconds +3. 调用 3 (代币持有): 需要4 秒 Total time taken = 3 + 2 + 4 = 9 seconds @@ -488,9 +488,9 @@ Total time taken = 3 + 2 + 4 = 9 seconds With this feature, you can declare these calls to be executed in parallel: -1. Call 1 (Transactions): Takes 3 seconds +1. 调用 1 (交易): 需要3 秒 2. Call 2 (Balance): Takes 2 seconds -3. Call 3 (Token Holdings): Takes 4 seconds +3. 调用 3 (代币持有): 需要4 秒 Since these calls are executed in parallel, the total time taken is equal to the time taken by the longest call. @@ -498,15 +498,15 @@ Total time taken = max (3, 2, 4) = 4 seconds #### How it Works -1. Declarative Definition: In the subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. +1. Declarative Definition: In the Subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. 2. Parallel Execution Engine: The Graph Node's execution engine recognizes these declarations and runs the calls simultaneously. -3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the subgraph for further processing. +3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the Subgraph for further processing. #### Example Configuration in Subgraph Manifest Declared `eth_calls` can access the `event.address` of the underlying event as well as all the `event.params`. -`Subgraph.yaml` using `event.address`: +`subgraph.yaml` using `event.address`: ```yaml eventHandlers: @@ -524,7 +524,7 @@ Details for the example above: - The text (`Pool[event.address].feeGrowthGlobal0X128()`) is the actual `eth_call` that will be executed, which is in the form of `Contract[address].function(arguments)` - The `address` and `arguments` can be replaced with variables that will be available when the handler is executed. -`Subgraph.yaml` using `event.params` +`subgraph.yaml` using `event.params` ```yaml calls: @@ -535,29 +535,29 @@ calls: > **Note:** it is not recommended to use grafting when initially upgrading to The Graph Network. Learn more [here](/subgraphs/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). -When a subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. +When a Subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing Subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing Subgraph working again after it has failed. -A subgraph is grafted onto a base subgraph when the subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: +A Subgraph is grafted onto a base Subgraph when the Subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: ```yaml description: ... graft: - base: Qm... # Subgraph ID of base subgraph + base: Qm... # Subgraph ID of base Subgraph block: 7345624 # Block number ``` -When a subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` subgraph up to and including the given `block` and then continue indexing the new subgraph from that block on. The base subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted subgraph. +When a Subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` Subgraph up to and including the given `block` and then continue indexing the new Subgraph from that block on. The base Subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted Subgraph. -因为嫁接是拷贝而不是索引基础数据,所以子图同步到所需区块比从头开始索引要快得多,尽管对于非常大的子图,初始数据拷贝仍可能需要几个小时。 在初始化嫁接子图时,Graph 节点将记录有关已复制的实体类型的信息。 +Because grafting copies rather than indexes base data, it is much quicker to get the Subgraph to the desired block than indexing from scratch, though the initial data copy can still take several hours for very large Subgraphs. While the grafted Subgraph is being initialized, the Graph Node will log information about the entity types that have already been copied. -嫁接子图可以使用一个 GraphQL 模式 schema,该模式与基子图之一不同,但仅与基子图兼容。它本身必须是一个有效的子图模式,但是可以通过以下方式偏离基子图的模式: +The grafted Subgraph can use a GraphQL schema that is not identical to the one of the base Subgraph, but merely compatible with it. It has to be a valid Subgraph schema in its own right, but may deviate from the base Subgraph's schema in the following ways: - 它添加或删除实体类型 -- 它从实体类型中删除属性 -- 它将可为空的属性添加到实体类型 -- 它将不可为空的属性转换为可空的属性 -- 它将值添加到枚举类型中 -- 它添加或删除接口 -- 它改变了实现接口的实体类型 - -> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the subgraph manifest. +- 从实体类型中删除属性 +- 将可为空的属性添加到实体类型 +- 将不可为空的属性转换为可空的属性 +- 将值添加到枚举类型中 +- 添加或删除接口 +- 改变了实现接口的实体类型 + +> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the Subgraph manifest. From 5ec9347111283487321f8ccf159d869a3b9bd78b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:10:27 -0500 Subject: [PATCH 0135/1789] New translations advanced.mdx (Urdu (Pakistan)) --- .../developing/creating/advanced.mdx | 78 +++++++++---------- 1 file changed, 39 insertions(+), 39 deletions(-) diff --git a/website/src/pages/ur/subgraphs/developing/creating/advanced.mdx b/website/src/pages/ur/subgraphs/developing/creating/advanced.mdx index 6d3c40d1e663..84e5a5f0f689 100644 --- a/website/src/pages/ur/subgraphs/developing/creating/advanced.mdx +++ b/website/src/pages/ur/subgraphs/developing/creating/advanced.mdx @@ -4,9 +4,9 @@ title: Advanced Subgraph Features ## جائزہ -Add and implement advanced subgraph features to enhanced your subgraph's built. +Add and implement advanced Subgraph features to enhanced your Subgraph's built. -Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: +Starting from `specVersion` `0.0.4`, Subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: | Feature | Name | | ---------------------------------------------------- | ---------------- | @@ -14,7 +14,7 @@ Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declar | [Full-text Search](#defining-fulltext-search-fields) | `fullTextSearch` | | [Grafting](#grafting-onto-existing-subgraphs) | `grafting` | -For instance, if a subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: +For instance, if a Subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: ```yaml specVersion: 0.0.4 @@ -25,7 +25,7 @@ features: dataSources: ... ``` -> Note that using a feature without declaring it will incur a **validation error** during subgraph deployment, but no errors will occur if a feature is declared but not used. +> Note that using a feature without declaring it will incur a **validation error** during Subgraph deployment, but no errors will occur if a feature is declared but not used. ## Timeseries and Aggregations @@ -33,9 +33,9 @@ Prerequisites: - Subgraph specVersion must be ≥1.1.0. -Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, and more. +Timeseries and aggregations enable your Subgraph to track statistics like daily average price, hourly total transfers, and more. -This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. +This feature introduces two new types of Subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. ### Example Schema @@ -97,11 +97,11 @@ Aggregation entities are automatically calculated on the basis of the specified ## Non-fatal errors -پہلے سے مطابقت پذیر سب گرافس پر انڈیکسنگ کی غلطیاں، بذریعہ ڈیفالٹ، سب گراف کے ناکام ہونے اور مطابقت پذیری کو روکنے کا سبب بنیں گی. سب گراف کو متبادل طور پر غلطیوں کی موجودگی میں مطابقت پذیری جاری رکھنے کے لیے ترتیب دیا جا سکتا ہے، ہینڈلر کی طرف سے کی گئی تبدیلیوں کو نظر انداز کر کے جس سے خرابی پیدا ہوئی. اس سے سب گراف مصنفین کو اپنے سب گراف کو درست کرنے کا وقت ملتا ہے جب کہ تازہ ترین بلاک کے خلاف کیوریز پیش کی جاتی رہتی ہیں، حالانکہ اس خرابی کی وجہ سے نتائج متضاد ہو سکتے ہیں. نوٹ کریں کہ کچھ غلطیاں اب بھی ہمیشہ مہلک ہوتی ہیں. غیر مہلک ہونے کے لیے، خرابی کو تعییناتی معلوم ہونا چاہیے. +Indexing errors on already synced Subgraphs will, by default, cause the Subgraph to fail and stop syncing. Subgraphs can alternatively be configured to continue syncing in the presence of errors, by ignoring the changes made by the handler which provoked the error. This gives Subgraph authors time to correct their Subgraphs while queries continue to be served against the latest block, though the results might be inconsistent due to the bug that caused the error. Note that some errors are still always fatal. To be non-fatal, the error must be known to be deterministic. -> **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy subgraphs using that functionality to the network via the Studio. +> **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy Subgraphs using that functionality to the network via the Studio. -غیر مہلک غلطیوں کو فعال کرنے کے لیے سب گراف مینی فیسٹ پر درج ذیل خصوصیت کا فلیگ ترتیب دینے کی ضرورت ہے: +Enabling non-fatal errors requires setting the following feature flag on the Subgraph manifest: ```yaml specVersion: 0.0.4 @@ -111,7 +111,7 @@ features: ... ``` -The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the subgraph has skipped over errors, as in the example: +The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the Subgraph has skipped over errors, as in the example: ```graphql foos(first: 100, subgraphError: allow) { @@ -123,7 +123,7 @@ _meta { } ``` -If the subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: +If the Subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: ```graphql "data": { @@ -145,7 +145,7 @@ If the subgraph encounters an error, that query will return both the data and a ## IPFS/Arweave File Data Sources -فائل ڈیٹا کے ذرائع ایک مضبوط، قابل توسیع طریقے سے انڈیکسنگ کے دوران آف چین ڈیٹا تک رسائی کے لیے ایک نئی سب گراف کی فعالیت ہے۔ فائل ڈیٹا کے ذرائع IPFS اور Arweave سے فائلیں لانے میں معاونت کرتے ہیں. +File data sources are a new Subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. > یہ آف چین ڈیٹا کی تعییناتی انڈیکسنگ کے ساتھ ساتھ صوابدیدی HTTP سے حاصل کردہ ڈیٹا کے ممکنہ تعارف کی بنیاد بھی رکھتا ہے. @@ -290,7 +290,7 @@ For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave b import { TokenMetadata as TokenMetadataTemplate } from '../generated/templates' const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' -//This example code is for a Crypto coven subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. +//This example code is for a Crypto coven Subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. export function handleTransfer(event: TransferEvent): void { let token = Token.load(event.params.tokenId.toString()) @@ -317,23 +317,23 @@ export function handleTransfer(event: TransferEvent): void { This example is using the CID as the lookup between the parent `Token` entity and the resulting `TokenMetadata` entity. -> Previously, this is the point at which a subgraph developer would have called `ipfs.cat(CID)` to fetch the file +> Previously, this is the point at which a Subgraph developer would have called `ipfs.cat(CID)` to fetch the file مبارک ہو، آپ فائل ڈیٹا سورسز استعمال کر رہے ہیں! -#### آپ کے سب گراف کو تعینات کرنا +#### Deploying your Subgraphs -You can now `build` and `deploy` your subgraph to any Graph Node >=v0.30.0-rc.0. +You can now `build` and `deploy` your Subgraph to any Graph Node >=v0.30.0-rc.0. #### حدود -فائل ڈیٹا سورس کے ہینڈلرز اور ہستیوں کو دیگر سب گراف ہستیوں سے الگ تھلگ کر دیا جاتا ہے، اس بات کو یقینی بناتے ہوئے کہ عمل درآمد کے وقت وہ تعیین پسند ہیں، اور اس بات کو یقینی بناتے ہیں کہ چین پر مبنی ڈیٹا سورسز کی کوئی آلودگی نہ ہو۔ مخصوص ہونا: +File data source handlers and entities are isolated from other Subgraph entities, ensuring that they are deterministic when executed, and ensuring no contamination of chain-based data sources. To be specific: - فائل ڈیٹا سورسز کے ذریعے تخلیق کردہ ادارے ناقابل تغیر ہیں، اور انہیں اپ ڈیٹ نہیں کیا جا سکتا - فائل ڈیٹا کے ذرائع ہینڈلرز دوسرے فائل ڈیٹا سورسز سے اداروں تک رسائی حاصل نہیں کرسکتے ہیں - فائل ڈیٹا کے ذرائع سے وابستہ ہستیوں تک چین پر مبنی ہینڈلرز تک رسائی حاصل نہیں کی جا سکتی ہے -> اگرچہ یہ رکاوٹ زیادہ تر استعمال کے معاملات کے لیے مشکل نہیں ہونی چاہیے، لیکن یہ کچھ لوگوں کے لیے پیچیدگی پیدا کر سکتی ہے۔ براہ کرم ڈسکورڈ کے ذریعے رابطہ کریں اگر آپ کو اپنے فائل پر مبنی ڈیٹا کو سب گراف میں ماڈل کرنے میں مسئلہ درپیش ہے! +> While this constraint should not be problematic for most use-cases, it may introduce complexity for some. Please get in touch via Discord if you are having issues modelling your file-based data in a Subgraph! مزید برآں، فائل ڈیٹا سورس سے ڈیٹا سورسز بنانا ممکن نہیں ہے، چاہے وہ آن چین ڈیٹا سورس ہو یا کوئی اور فائل ڈیٹا سورس۔ مستقبل میں یہ پابندی ختم ہو سکتی ہے. @@ -365,15 +365,15 @@ Handlers for File Data Sources cannot be in files which import `eth_call` contra > **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0` -Topic filters, also known as indexed argument filters, are a powerful feature in subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. +Topic filters, also known as indexed argument filters, are a powerful feature in Subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. -- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing subgraphs to operate more efficiently by focusing only on relevant data. +- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing Subgraphs to operate more efficiently by focusing only on relevant data. -- This is useful for creating personal subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. +- This is useful for creating personal Subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. ### How Topic Filters Work -When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a subgraph's manifest. This allows the subgraph to listen selectively for events that match these indexed arguments. +When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a Subgraph's manifest. This allows the Subgraph to listen selectively for events that match these indexed arguments. - The event's first indexed argument corresponds to `topic1`, the second to `topic2`, and so on, up to `topic3`, since the Ethereum Virtual Machine (EVM) allows up to three indexed arguments per event. @@ -401,7 +401,7 @@ In this example: #### Configuration in Subgraphs -Topic filters are defined directly within the event handler configuration in the subgraph manifest. Here is how they are configured: +Topic filters are defined directly within the event handler configuration in the Subgraph manifest. Here is how they are configured: ```yaml eventHandlers: @@ -436,7 +436,7 @@ In this configuration: - `topic1` is configured to filter `Transfer` events where `0xAddressA` is the sender. - `topic2` is configured to filter `Transfer` events where `0xAddressB` is the receiver. -- The subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. +- The Subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. #### Example 2: Tracking Transactions in Either Direction Between Two or More Addresses @@ -452,17 +452,17 @@ In this configuration: - `topic1` is configured to filter `Transfer` events where `0xAddressA`, `0xAddressB`, `0xAddressC` is the sender. - `topic2` is configured to filter `Transfer` events where `0xAddressB` and `0xAddressC` is the receiver. -- The subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. +- The Subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. ## Declared eth_call > Note: This is an experimental feature that is not currently available in a stable Graph Node release yet. You can only use it in Subgraph Studio or your self-hosted node. -Declarative `eth_calls` are a valuable subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. +Declarative `eth_calls` are a valuable Subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. This feature does the following: -- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the subgraph's overall efficiency. +- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the Subgraph's overall efficiency. - Allows faster data fetching, resulting in quicker query responses and a better user experience. - Reduces wait times for applications that need to aggregate data from multiple Ethereum calls, making the data retrieval process more efficient. @@ -474,7 +474,7 @@ This feature does the following: #### Scenario without Declarative `eth_calls` -Imagine you have a subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. +Imagine you have a Subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. Traditionally, these calls might be made sequentially: @@ -498,15 +498,15 @@ Total time taken = max (3, 2, 4) = 4 seconds #### How it Works -1. Declarative Definition: In the subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. +1. Declarative Definition: In the Subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. 2. Parallel Execution Engine: The Graph Node's execution engine recognizes these declarations and runs the calls simultaneously. -3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the subgraph for further processing. +3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the Subgraph for further processing. #### Example Configuration in Subgraph Manifest Declared `eth_calls` can access the `event.address` of the underlying event as well as all the `event.params`. -`Subgraph.yaml` using `event.address`: +`subgraph.yaml` using `event.address`: ```yaml eventHandlers: @@ -524,7 +524,7 @@ Details for the example above: - The text (`Pool[event.address].feeGrowthGlobal0X128()`) is the actual `eth_call` that will be executed, which is in the form of `Contract[address].function(arguments)` - The `address` and `arguments` can be replaced with variables that will be available when the handler is executed. -`Subgraph.yaml` using `event.params` +`subgraph.yaml` using `event.params` ```yaml calls: @@ -535,22 +535,22 @@ calls: > **Note:** it is not recommended to use grafting when initially upgrading to The Graph Network. Learn more [here](/subgraphs/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). -When a subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. +When a Subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing Subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing Subgraph working again after it has failed. -A subgraph is grafted onto a base subgraph when the subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: +A Subgraph is grafted onto a base Subgraph when the Subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: ```yaml description: ... graft: - base: Qm... # Subgraph ID of base subgraph + base: Qm... # Subgraph ID of base Subgraph block: 7345624 # Block number ``` -When a subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` subgraph up to and including the given `block` and then continue indexing the new subgraph from that block on. The base subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted subgraph. +When a Subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` Subgraph up to and including the given `block` and then continue indexing the new Subgraph from that block on. The base Subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted Subgraph. -چونکہ گرافٹنگ بیس ڈیٹا کو انڈیکس کرنے کے بجائے کاپی کرتا ہے، شروع سے انڈیکس کرنے کے مقابلے میں مطلوبہ بلاک میں سب گراف حاصل کرنا بہت تیز ہے، حالانکہ ابتدائی ڈیٹا کاپی بہت بڑے سب گراف کے لیے کئی گھنٹے لگ سکتی ہے۔ جب گرافٹ شدہ سب گراف کو شروع کیا جا رہا ہے، گراف نوڈ ان ہستی کی اقسام کے بارے میں معلومات کو لاگ کرے گا جو پہلے ہی کاپی ہو چکی ہیں. +Because grafting copies rather than indexes base data, it is much quicker to get the Subgraph to the desired block than indexing from scratch, though the initial data copy can still take several hours for very large Subgraphs. While the grafted Subgraph is being initialized, the Graph Node will log information about the entity types that have already been copied. -گرافٹڈ سب گراف ایک گراف کیو ایل اسکیما استعمال کرسکتا ہے جو بیس سب گراف میں سے ایک سے مماثل نہیں ہے، لیکن اس کے ساتھ محض مطابقت رکھتا ہے۔ اسے اپنے طور پر ایک درست سب گراف سکیما ہونا چاہیے، لیکن درج ذیل طریقوں سے بنیادی سب گراف کے سکیما سے انحراف ہو سکتا ہے: +The grafted Subgraph can use a GraphQL schema that is not identical to the one of the base Subgraph, but merely compatible with it. It has to be a valid Subgraph schema in its own right, but may deviate from the base Subgraph's schema in the following ways: - یہ ہستی کی اقسام کو جوڑتا یا ہٹاتا ہے - یہ ہستی کی اقسام سے صفات کو ہٹاتا ہے @@ -560,4 +560,4 @@ When a subgraph whose manifest contains a `graft` block is deployed, Graph Node - یہ انٹرفیس کو جوڑتا یا ہٹاتا ہے - یہ تبدیل ہوتا ہے جس کے لیے ایک انٹرفیس لاگو کیا جاتا ہے -> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the subgraph manifest. +> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the Subgraph manifest. From 644655de3689edbfbc1779e84e01c2553ddf54df Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:10:28 -0500 Subject: [PATCH 0136/1789] New translations advanced.mdx (Vietnamese) --- .../developing/creating/advanced.mdx | 78 +++++++++---------- 1 file changed, 39 insertions(+), 39 deletions(-) diff --git a/website/src/pages/vi/subgraphs/developing/creating/advanced.mdx b/website/src/pages/vi/subgraphs/developing/creating/advanced.mdx index 82d7dd120a70..c275fd53fcab 100644 --- a/website/src/pages/vi/subgraphs/developing/creating/advanced.mdx +++ b/website/src/pages/vi/subgraphs/developing/creating/advanced.mdx @@ -4,9 +4,9 @@ title: Advanced Subgraph Features ## Tổng quan -Add and implement advanced subgraph features to enhanced your subgraph's built. +Add and implement advanced Subgraph features to enhanced your Subgraph's built. -Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: +Starting from `specVersion` `0.0.4`, Subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: | Feature | Name | | ---------------------------------------------------- | ---------------- | @@ -14,7 +14,7 @@ Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declar | [Full-text Search](#defining-fulltext-search-fields) | `fullTextSearch` | | [Grafting](#grafting-onto-existing-subgraphs) | `grafting` | -For instance, if a subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: +For instance, if a Subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: ```yaml specVersion: 0.0.4 @@ -25,7 +25,7 @@ features: dataSources: ... ``` -> Note that using a feature without declaring it will incur a **validation error** during subgraph deployment, but no errors will occur if a feature is declared but not used. +> Note that using a feature without declaring it will incur a **validation error** during Subgraph deployment, but no errors will occur if a feature is declared but not used. ## Timeseries and Aggregations @@ -33,9 +33,9 @@ Prerequisites: - Subgraph specVersion must be ≥1.1.0. -Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, and more. +Timeseries and aggregations enable your Subgraph to track statistics like daily average price, hourly total transfers, and more. -This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. +This feature introduces two new types of Subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. ### Example Schema @@ -97,11 +97,11 @@ Aggregation entities are automatically calculated on the basis of the specified ## Lỗi không nghiêm trọng -Indexing errors on already synced subgraphs will, by default, cause the subgraph to fail and stop syncing. Subgraphs can alternatively be configured to continue syncing in the presence of errors, by ignoring the changes made by the handler which provoked the error. This gives subgraph authors time to correct their subgraphs while queries continue to be served against the latest block, though the results might be inconsistent due to the bug that caused the error. Note that some errors are still always fatal. To be non-fatal, the error must be known to be deterministic. +Indexing errors on already synced Subgraphs will, by default, cause the Subgraph to fail and stop syncing. Subgraphs can alternatively be configured to continue syncing in the presence of errors, by ignoring the changes made by the handler which provoked the error. This gives Subgraph authors time to correct their Subgraphs while queries continue to be served against the latest block, though the results might be inconsistent due to the bug that caused the error. Note that some errors are still always fatal. To be non-fatal, the error must be known to be deterministic. -> **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy subgraphs using that functionality to the network via the Studio. +> **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy Subgraphs using that functionality to the network via the Studio. -Enabling non-fatal errors requires setting the following feature flag on the subgraph manifest: +Enabling non-fatal errors requires setting the following feature flag on the Subgraph manifest: ```yaml specVersion: 0.0.4 @@ -111,7 +111,7 @@ features: ... ``` -The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the subgraph has skipped over errors, as in the example: +The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the Subgraph has skipped over errors, as in the example: ```graphql foos(first: 100, subgraphError: allow) { @@ -123,7 +123,7 @@ _meta { } ``` -If the subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: +If the Subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: ```graphql "data": { @@ -145,7 +145,7 @@ If the subgraph encounters an error, that query will return both the data and a ## IPFS/Arweave File Data Sources -File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. +File data sources are a new Subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. > This also lays the groundwork for deterministic indexing of off-chain data, as well as the potential introduction of arbitrary HTTP-sourced data. @@ -290,7 +290,7 @@ Example: import { TokenMetadata as TokenMetadataTemplate } from '../generated/templates' const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' -//This example code is for a Crypto coven subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. +//This example code is for a Crypto coven Subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. export function handleTransfer(event: TransferEvent): void { let token = Token.load(event.params.tokenId.toString()) @@ -317,23 +317,23 @@ This will create a new file data source, which will poll Graph Node's configured This example is using the CID as the lookup between the parent `Token` entity and the resulting `TokenMetadata` entity. -> Previously, this is the point at which a subgraph developer would have called `ipfs.cat(CID)` to fetch the file +> Previously, this is the point at which a Subgraph developer would have called `ipfs.cat(CID)` to fetch the file Congratulations, you are using file data sources! -#### Deploying your subgraphs +#### Deploying your Subgraphs -You can now `build` and `deploy` your subgraph to any Graph Node >=v0.30.0-rc.0. +You can now `build` and `deploy` your Subgraph to any Graph Node >=v0.30.0-rc.0. #### Limitations -File data source handlers and entities are isolated from other subgraph entities, ensuring that they are deterministic when executed, and ensuring no contamination of chain-based data sources. To be specific: +File data source handlers and entities are isolated from other Subgraph entities, ensuring that they are deterministic when executed, and ensuring no contamination of chain-based data sources. To be specific: - Entities created by File Data Sources are immutable, and cannot be updated - File Data Source handlers cannot access entities from other file data sources - Entities associated with File Data Sources cannot be accessed by chain-based handlers -> While this constraint should not be problematic for most use-cases, it may introduce complexity for some. Please get in touch via Discord if you are having issues modelling your file-based data in a subgraph! +> While this constraint should not be problematic for most use-cases, it may introduce complexity for some. Please get in touch via Discord if you are having issues modelling your file-based data in a Subgraph! Additionally, it is not possible to create data sources from a file data source, be it an onchain data source or another file data source. This restriction may be lifted in the future. @@ -365,15 +365,15 @@ Handlers for File Data Sources cannot be in files which import `eth_call` contra > **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0` -Topic filters, also known as indexed argument filters, are a powerful feature in subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. +Topic filters, also known as indexed argument filters, are a powerful feature in Subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. -- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing subgraphs to operate more efficiently by focusing only on relevant data. +- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing Subgraphs to operate more efficiently by focusing only on relevant data. -- This is useful for creating personal subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. +- This is useful for creating personal Subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. ### How Topic Filters Work -When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a subgraph's manifest. This allows the subgraph to listen selectively for events that match these indexed arguments. +When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a Subgraph's manifest. This allows the Subgraph to listen selectively for events that match these indexed arguments. - The event's first indexed argument corresponds to `topic1`, the second to `topic2`, and so on, up to `topic3`, since the Ethereum Virtual Machine (EVM) allows up to three indexed arguments per event. @@ -401,7 +401,7 @@ In this example: #### Configuration in Subgraphs -Topic filters are defined directly within the event handler configuration in the subgraph manifest. Here is how they are configured: +Topic filters are defined directly within the event handler configuration in the Subgraph manifest. Here is how they are configured: ```yaml eventHandlers: @@ -436,7 +436,7 @@ In this configuration: - `topic1` is configured to filter `Transfer` events where `0xAddressA` is the sender. - `topic2` is configured to filter `Transfer` events where `0xAddressB` is the receiver. -- The subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. +- The Subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. #### Example 2: Tracking Transactions in Either Direction Between Two or More Addresses @@ -452,17 +452,17 @@ In this configuration: - `topic1` is configured to filter `Transfer` events where `0xAddressA`, `0xAddressB`, `0xAddressC` is the sender. - `topic2` is configured to filter `Transfer` events where `0xAddressB` and `0xAddressC` is the receiver. -- The subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. +- The Subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. ## Declared eth_call > Note: This is an experimental feature that is not currently available in a stable Graph Node release yet. You can only use it in Subgraph Studio or your self-hosted node. -Declarative `eth_calls` are a valuable subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. +Declarative `eth_calls` are a valuable Subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. This feature does the following: -- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the subgraph's overall efficiency. +- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the Subgraph's overall efficiency. - Allows faster data fetching, resulting in quicker query responses and a better user experience. - Reduces wait times for applications that need to aggregate data from multiple Ethereum calls, making the data retrieval process more efficient. @@ -474,7 +474,7 @@ This feature does the following: #### Scenario without Declarative `eth_calls` -Imagine you have a subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. +Imagine you have a Subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. Traditionally, these calls might be made sequentially: @@ -498,15 +498,15 @@ Total time taken = max (3, 2, 4) = 4 seconds #### How it Works -1. Declarative Definition: In the subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. +1. Declarative Definition: In the Subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. 2. Parallel Execution Engine: The Graph Node's execution engine recognizes these declarations and runs the calls simultaneously. -3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the subgraph for further processing. +3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the Subgraph for further processing. #### Example Configuration in Subgraph Manifest Declared `eth_calls` can access the `event.address` of the underlying event as well as all the `event.params`. -`Subgraph.yaml` using `event.address`: +`subgraph.yaml` using `event.address`: ```yaml eventHandlers: @@ -524,7 +524,7 @@ Details for the example above: - The text (`Pool[event.address].feeGrowthGlobal0X128()`) is the actual `eth_call` that will be executed, which is in the form of `Contract[address].function(arguments)` - The `address` and `arguments` can be replaced with variables that will be available when the handler is executed. -`Subgraph.yaml` using `event.params` +`subgraph.yaml` using `event.params` ```yaml calls: @@ -535,22 +535,22 @@ calls: > **Note:** it is not recommended to use grafting when initially upgrading to The Graph Network. Learn more [here](/subgraphs/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). -When a subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. +When a Subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing Subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing Subgraph working again after it has failed. -A subgraph is grafted onto a base subgraph when the subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: +A Subgraph is grafted onto a base Subgraph when the Subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: ```yaml description: ... graft: - base: Qm... # Subgraph ID of base subgraph + base: Qm... # Subgraph ID of base Subgraph block: 7345624 # Block number ``` -When a subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` subgraph up to and including the given `block` and then continue indexing the new subgraph from that block on. The base subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted subgraph. +When a Subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` Subgraph up to and including the given `block` and then continue indexing the new Subgraph from that block on. The base Subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted Subgraph. -Because grafting copies rather than indexes base data, it is much quicker to get the subgraph to the desired block than indexing from scratch, though the initial data copy can still take several hours for very large subgraphs. While the grafted subgraph is being initialized, the Graph Node will log information about the entity types that have already been copied. +Because grafting copies rather than indexes base data, it is much quicker to get the Subgraph to the desired block than indexing from scratch, though the initial data copy can still take several hours for very large Subgraphs. While the grafted Subgraph is being initialized, the Graph Node will log information about the entity types that have already been copied. -The grafted subgraph can use a GraphQL schema that is not identical to the one of the base subgraph, but merely compatible with it. It has to be a valid subgraph schema in its own right, but may deviate from the base subgraph's schema in the following ways: +The grafted Subgraph can use a GraphQL schema that is not identical to the one of the base Subgraph, but merely compatible with it. It has to be a valid Subgraph schema in its own right, but may deviate from the base Subgraph's schema in the following ways: - It adds or removes entity types - It removes attributes from entity types @@ -560,4 +560,4 @@ The grafted subgraph can use a GraphQL schema that is not identical to the one o - It adds or removes interfaces - Nó thay đổi đối với loại thực thể nào mà một giao diện được triển khai -> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the subgraph manifest. +> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the Subgraph manifest. From 108ffd8791036d8cf51d8abeaa38c6153c271129 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:10:29 -0500 Subject: [PATCH 0137/1789] New translations advanced.mdx (Marathi) --- .../developing/creating/advanced.mdx | 78 +++++++++---------- 1 file changed, 39 insertions(+), 39 deletions(-) diff --git a/website/src/pages/mr/subgraphs/developing/creating/advanced.mdx b/website/src/pages/mr/subgraphs/developing/creating/advanced.mdx index c24f72030078..52fa9f987217 100644 --- a/website/src/pages/mr/subgraphs/developing/creating/advanced.mdx +++ b/website/src/pages/mr/subgraphs/developing/creating/advanced.mdx @@ -4,9 +4,9 @@ title: Advanced Subgraph Features ## सविश्लेषण -Add and implement advanced subgraph features to enhanced your subgraph's built. +Add and implement advanced Subgraph features to enhanced your Subgraph's built. -Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: +Starting from `specVersion` `0.0.4`, Subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: | Feature | Name | | ---------------------------------------------------- | ---------------- | @@ -14,7 +14,7 @@ Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declar | [Full-text Search](#defining-fulltext-search-fields) | `fullTextSearch` | | [Grafting](#grafting-onto-existing-subgraphs) | `grafting` | -For instance, if a subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: +For instance, if a Subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: ```yaml specVersion: 0.0.4 @@ -25,7 +25,7 @@ features: dataSources: ... ``` -> Note that using a feature without declaring it will incur a **validation error** during subgraph deployment, but no errors will occur if a feature is declared but not used. +> Note that using a feature without declaring it will incur a **validation error** during Subgraph deployment, but no errors will occur if a feature is declared but not used. ## Timeseries and Aggregations @@ -33,9 +33,9 @@ Prerequisites: - Subgraph specVersion must be ≥1.1.0. -Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, and more. +Timeseries and aggregations enable your Subgraph to track statistics like daily average price, hourly total transfers, and more. -This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. +This feature introduces two new types of Subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. ### Example Schema @@ -97,11 +97,11 @@ Aggregation entities are automatically calculated on the basis of the specified ## गैर-घातक त्रुटी -आधीच समक्रमित केलेल्या सबग्राफ्सवर अनुक्रमणिका त्रुटी, डीफॉल्टनुसार, सबग्राफ अयशस्वी होण्यास आणि समक्रमण थांबवण्यास कारणीभूत ठरतील. सबग्राफ वैकल्पिकरित्या त्रुटींच्या उपस्थितीत समक्रमण सुरू ठेवण्यासाठी कॉन्फिगर केले जाऊ शकतात, हँडलरने केलेल्या बदलांकडे दुर्लक्ष करून, ज्यामुळे त्रुटी उद्भवली. हे सबग्राफ लेखकांना त्यांचे सबग्राफ दुरुस्त करण्यासाठी वेळ देते जेव्हा की नवीनतम ब्लॉकच्या विरूद्ध क्वेरी चालू ठेवल्या जातात, जरी त्रुटीमुळे परिणाम विसंगत असू शकतात. लक्षात घ्या की काही त्रुटी अजूनही नेहमीच घातक असतात. गैर-घातक होण्यासाठी, त्रुटी निश्चितपणे ज्ञात असणे आवश्यक आहे. +Indexing errors on already synced Subgraphs will, by default, cause the Subgraph to fail and stop syncing. Subgraphs can alternatively be configured to continue syncing in the presence of errors, by ignoring the changes made by the handler which provoked the error. This gives Subgraph authors time to correct their Subgraphs while queries continue to be served against the latest block, though the results might be inconsistent due to the bug that caused the error. Note that some errors are still always fatal. To be non-fatal, the error must be known to be deterministic. -> **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy subgraphs using that functionality to the network via the Studio. +> **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy Subgraphs using that functionality to the network via the Studio. -Enabling non-fatal errors requires setting the following feature flag on the subgraph manifest: +Enabling non-fatal errors requires setting the following feature flag on the Subgraph manifest: ```yaml specVersion: 0.0.4 @@ -111,7 +111,7 @@ features: ... ``` -The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the subgraph has skipped over errors, as in the example: +The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the Subgraph has skipped over errors, as in the example: ```graphql foos(first: 100, subgraphError: allow) { @@ -123,7 +123,7 @@ _meta { } ``` -If the subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: +If the Subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: ```graphql "data": { @@ -145,7 +145,7 @@ If the subgraph encounters an error, that query will return both the data and a ## IPFS/Arweave File Data Sources -File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. +File data sources are a new Subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. > हे ऑफ-चेन डेटाच्या निर्धारवादी अनुक्रमणिकेसाठी तसेच अनियंत्रित HTTP-स्रोत डेटाच्या संभाव्य परिचयासाठी देखील पाया घालते. @@ -290,7 +290,7 @@ For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave b import { TokenMetadata as TokenMetadataTemplate } from '../generated/templates' const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' -//This example code is for a Crypto coven subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. +//This example code is for a Crypto coven Subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. export function handleTransfer(event: TransferEvent): void { let token = Token.load(event.params.tokenId.toString()) @@ -317,23 +317,23 @@ This will create a new file data source, which will poll Graph Node's configured This example is using the CID as the lookup between the parent `Token` entity and the resulting `TokenMetadata` entity. -> Previously, this is the point at which a subgraph developer would have called `ipfs.cat(CID)` to fetch the file +> Previously, this is the point at which a Subgraph developer would have called `ipfs.cat(CID)` to fetch the file Congratulations, you are using file data sources! -#### तुमचे सबग्राफ उपयोजित करत आहे +#### Deploying your Subgraphs -You can now `build` and `deploy` your subgraph to any Graph Node >=v0.30.0-rc.0. +You can now `build` and `deploy` your Subgraph to any Graph Node >=v0.30.0-rc.0. #### Limitations -फाइल डेटा स्रोत हँडलर आणि संस्था इतर सबग्राफ संस्थांपासून वेगळ्या केल्या जातात, ते कार्यान्वित केल्यावर ते निर्धारवादी आहेत याची खात्री करून आणि साखळी-आधारित डेटा स्रोतांचे दूषित होणार नाही याची खात्री करतात. विशिष्ट असणे: +File data source handlers and entities are isolated from other Subgraph entities, ensuring that they are deterministic when executed, and ensuring no contamination of chain-based data sources. To be specific: - Entities created by File Data Sources are immutable, and cannot be updated - File Data Source handlers cannot access entities from other file data sources - Entities associated with File Data Sources cannot be accessed by chain-based handlers -> बहुतेक वापर-प्रकरणांसाठी ही मर्यादा समस्याप्रधान नसावी, परंतु काहींसाठी ते जटिलता आणू शकते. सबग्राफमध्‍ये तुमच्‍या फाईल-आधारित डेटाचे मॉडेल बनवण्‍यात तुम्‍हाला समस्या येत असल्‍यास कृपया डिस्‍कॉर्ड द्वारे संपर्क साधा! +> While this constraint should not be problematic for most use-cases, it may introduce complexity for some. Please get in touch via Discord if you are having issues modelling your file-based data in a Subgraph! याव्यतिरिक्त, फाइल डेटा स्रोतावरून डेटा स्रोत तयार करणे शक्य नाही, मग ते ऑनचेन डेटा स्रोत असो किंवा अन्य फाइल डेटा स्रोत. भविष्यात हे निर्बंध उठवले जाऊ शकतात. @@ -365,15 +365,15 @@ Handlers for File Data Sources cannot be in files which import `eth_call` contra > **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0` -Topic filters, also known as indexed argument filters, are a powerful feature in subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. +Topic filters, also known as indexed argument filters, are a powerful feature in Subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. -- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing subgraphs to operate more efficiently by focusing only on relevant data. +- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing Subgraphs to operate more efficiently by focusing only on relevant data. -- This is useful for creating personal subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. +- This is useful for creating personal Subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. ### How Topic Filters Work -When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a subgraph's manifest. This allows the subgraph to listen selectively for events that match these indexed arguments. +When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a Subgraph's manifest. This allows the Subgraph to listen selectively for events that match these indexed arguments. - The event's first indexed argument corresponds to `topic1`, the second to `topic2`, and so on, up to `topic3`, since the Ethereum Virtual Machine (EVM) allows up to three indexed arguments per event. @@ -401,7 +401,7 @@ In this example: #### Configuration in Subgraphs -Topic filters are defined directly within the event handler configuration in the subgraph manifest. Here is how they are configured: +Topic filters are defined directly within the event handler configuration in the Subgraph manifest. Here is how they are configured: ```yaml eventHandlers: @@ -436,7 +436,7 @@ In this configuration: - `topic1` is configured to filter `Transfer` events where `0xAddressA` is the sender. - `topic2` is configured to filter `Transfer` events where `0xAddressB` is the receiver. -- The subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. +- The Subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. #### Example 2: Tracking Transactions in Either Direction Between Two or More Addresses @@ -452,17 +452,17 @@ In this configuration: - `topic1` is configured to filter `Transfer` events where `0xAddressA`, `0xAddressB`, `0xAddressC` is the sender. - `topic2` is configured to filter `Transfer` events where `0xAddressB` and `0xAddressC` is the receiver. -- The subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. +- The Subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. ## Declared eth_call > Note: This is an experimental feature that is not currently available in a stable Graph Node release yet. You can only use it in Subgraph Studio or your self-hosted node. -Declarative `eth_calls` are a valuable subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. +Declarative `eth_calls` are a valuable Subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. This feature does the following: -- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the subgraph's overall efficiency. +- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the Subgraph's overall efficiency. - Allows faster data fetching, resulting in quicker query responses and a better user experience. - Reduces wait times for applications that need to aggregate data from multiple Ethereum calls, making the data retrieval process more efficient. @@ -474,7 +474,7 @@ This feature does the following: #### Scenario without Declarative `eth_calls` -Imagine you have a subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. +Imagine you have a Subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. Traditionally, these calls might be made sequentially: @@ -498,15 +498,15 @@ Total time taken = max (3, 2, 4) = 4 seconds #### How it Works -1. Declarative Definition: In the subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. +1. Declarative Definition: In the Subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. 2. Parallel Execution Engine: The Graph Node's execution engine recognizes these declarations and runs the calls simultaneously. -3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the subgraph for further processing. +3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the Subgraph for further processing. #### Example Configuration in Subgraph Manifest Declared `eth_calls` can access the `event.address` of the underlying event as well as all the `event.params`. -`Subgraph.yaml` using `event.address`: +`subgraph.yaml` using `event.address`: ```yaml eventHandlers: @@ -524,7 +524,7 @@ Details for the example above: - The text (`Pool[event.address].feeGrowthGlobal0X128()`) is the actual `eth_call` that will be executed, which is in the form of `Contract[address].function(arguments)` - The `address` and `arguments` can be replaced with variables that will be available when the handler is executed. -`Subgraph.yaml` using `event.params` +`subgraph.yaml` using `event.params` ```yaml calls: @@ -535,22 +535,22 @@ calls: > **Note:** it is not recommended to use grafting when initially upgrading to The Graph Network. Learn more [here](/subgraphs/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). -When a subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. +When a Subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing Subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing Subgraph working again after it has failed. -A subgraph is grafted onto a base subgraph when the subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: +A Subgraph is grafted onto a base Subgraph when the Subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: ```yaml description: ... graft: - base: Qm... # Subgraph ID of base subgraph + base: Qm... # Subgraph ID of base Subgraph block: 7345624 # Block number ``` -When a subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` subgraph up to and including the given `block` and then continue indexing the new subgraph from that block on. The base subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted subgraph. +When a Subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` Subgraph up to and including the given `block` and then continue indexing the new Subgraph from that block on. The base Subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted Subgraph. -बेस डेटा इंडेक्स करण्याऐवजी कॉपीचे ग्राफ्टिंग केल्यामुळे, सुरवातीपासून इंडेक्स करण्यापेक्षा इच्छित ब्लॉकमध्ये सबग्राफ मिळवणे खूप जलद आहे, जरी सुरुवातीच्या डेटा कॉपीला खूप मोठ्या सबग्राफसाठी बरेच तास लागू शकतात. ग्रॅफ्टेड सबग्राफ सुरू होत असताना, ग्राफ नोड आधीपासून कॉपी केलेल्या घटक प्रकारांबद्दल माहिती लॉग करेल. +Because grafting copies rather than indexes base data, it is much quicker to get the Subgraph to the desired block than indexing from scratch, though the initial data copy can still take several hours for very large Subgraphs. While the grafted Subgraph is being initialized, the Graph Node will log information about the entity types that have already been copied. -ग्राफ्टेड सबग्राफ GraphQL स्कीमा वापरू शकतो जो बेस सबग्राफपैकी एकाशी एकसारखा नसतो, परंतु त्याच्याशी फक्त सुसंगत असतो. ती स्वतःच्या अधिकारात वैध सबग्राफ स्कीमा असणे आवश्यक आहे, परंतु खालील प्रकारे बेस सबग्राफच्या स्कीमापासून विचलित होऊ शकते: +The grafted Subgraph can use a GraphQL schema that is not identical to the one of the base Subgraph, but merely compatible with it. It has to be a valid Subgraph schema in its own right, but may deviate from the base Subgraph's schema in the following ways: - हे घटक प्रकार जोडते किंवा काढून टाकते - हे घटक प्रकारातील गुणधर्म काढून टाकते @@ -560,4 +560,4 @@ When a subgraph whose manifest contains a `graft` block is deployed, Graph Node - हे इंटरफेस जोडते किंवा काढून टाकते - कोणत्या घटकासाठी इंटरफेस लागू केला जातो ते बदलते -> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the subgraph manifest. +> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the Subgraph manifest. From 332e430b80b3baebadc62d03925ab90b46859899 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:10:30 -0500 Subject: [PATCH 0138/1789] New translations advanced.mdx (Hindi) --- .../developing/creating/advanced.mdx | 79 ++++++++++--------- 1 file changed, 40 insertions(+), 39 deletions(-) diff --git a/website/src/pages/hi/subgraphs/developing/creating/advanced.mdx b/website/src/pages/hi/subgraphs/developing/creating/advanced.mdx index ac869ec36e5b..572b6deb5be3 100644 --- a/website/src/pages/hi/subgraphs/developing/creating/advanced.mdx +++ b/website/src/pages/hi/subgraphs/developing/creating/advanced.mdx @@ -4,9 +4,9 @@ title: Advanced Subgraph Features ## अवलोकन -Add and implement advanced subgraph features to enhanced your subgraph's built. +Add and implement advanced Subgraph features to enhanced your Subgraph's built. -Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: +Starting from `specVersion` `0.0.4`, Subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: | Feature | Name | | ---------------------------------------------------- | ---------------- | @@ -14,7 +14,7 @@ Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declar | [Full-text Search](#defining-fulltext-search-fields) | `fullTextSearch` | | [Grafting](#grafting-onto-existing-subgraphs) | `grafting` | -For instance, if a subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: +For instance, if a Subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: ```yaml specVersion: 0.0.4 @@ -25,7 +25,7 @@ features: dataSources: ... ``` -> Note that using a feature without declaring it will incur a **validation error** during subgraph deployment, but no errors will occur if a feature is declared but not used. +> Note that using a feature without declaring it will incur a **validation error** during Subgraph deployment, but no errors will occur if a feature is declared but not used. ## Timeseries और Aggregations @@ -33,9 +33,9 @@ Prerequisites: - Subgraph specVersion must be ≥1.1.0. -Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, and more. +Timeseries and aggregations enable your Subgraph to track statistics like daily average price, hourly total transfers, and more. -This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. +This feature introduces two new types of Subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. ### उदाहरण स्कीमा @@ -97,11 +97,11 @@ Aggregation entities are automatically calculated on the basis of the specified ## गैर-घातक त्रुटियाँ -पहले से सिंक किए गए सबग्राफ पर इंडेक्सिंग त्रुटियां, डिफ़ॉल्ट रूप से, सबग्राफ को विफल कर देंगी और सिंक करना बंद कर देंगी। सबग्राफ को वैकल्पिक रूप से त्रुटियों की उपस्थिति में समन्वयन जारी रखने के लिए कॉन्फ़िगर किया जा सकता है, हैंडलर द्वारा किए गए परिवर्तनों को अनदेखा करके त्रुटि उत्पन्न हुई। यह सबग्राफ लेखकों को अपने सबग्राफ को ठीक करने का समय देता है, जबकि नवीनतम ब्लॉक के विरुद्ध प्रश्नों को जारी रखा जाता है, हालांकि त्रुटि के कारण बग के कारण परिणाम असंगत हो सकते हैं। ध्यान दें कि कुछ त्रुटियाँ अभी भी हमेशा घातक होती हैं। गैर-घातक होने के लिए, त्रुटि नियतात्मक होने के लिए जानी जानी चाहिए। +Indexing errors on already synced Subgraphs will, by default, cause the Subgraph to fail and stop syncing. Subgraphs can alternatively be configured to continue syncing in the presence of errors, by ignoring the changes made by the handler which provoked the error. This gives Subgraph authors time to correct their Subgraphs while queries continue to be served against the latest block, though the results might be inconsistent due to the bug that caused the error. Note that some errors are still always fatal. To be non-fatal, the error must be known to be deterministic. -> **ध्यान दें:** The Graph Network अभी तक गैर-घातक त्रुटियों non-fatal errors का समर्थन नहीं करता है, और डेवलपर्स को Studio के माध्यम से उस कार्यक्षमता का उपयोग करके सबग्राफ को नेटवर्क पर परिनियोजित (deploy) नहीं करना चाहिए। +> **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy Subgraphs using that functionality to the network via the Studio. -गैर-घातक त्रुटियों को सक्षम करने के लिए सबग्राफ मेनिफ़ेस्ट पर निम्न फ़ीचर फ़्लैग सेट करने की आवश्यकता होती है: +Enabling non-fatal errors requires setting the following feature flag on the Subgraph manifest: ```yaml specVersion: 0.0.4 @@ -111,7 +111,7 @@ features: ... ``` -Queries को संभावित असंगतियों वाले डेटा को queries करने के लिए `subgraphError` आर्ग्यूमेंट के माध्यम से ऑप्ट-इन करना होगा। यह भी अनुशंसा की जाती है कि `_meta` को queries करें यह जांचने के लिए कि subgraph ने त्रुटियों को स्किप किया है या नहीं, जैसे इस उदाहरण में: +The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the Subgraph has skipped over errors, as in the example: ```graphql foos(first: 100, subgraphError: allow) { @@ -123,7 +123,7 @@ _meta { } ``` -यदि subgraph में कोई त्रुटि आती है, तो वह queries डेटा और एक graphql त्रुटि के साथ `"indexing_error"` संदेश लौटाएगी, जैसा कि इस उदाहरण उत्तर में दिखाया गया है: +If the Subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: ```graphql "data": { @@ -145,7 +145,7 @@ _meta { ## IPFS/Arweave फ़ाइल डेटा स्रोत -फाइल डेटा स्रोत एक नई subgraph कार्यक्षमता है जो indexing के दौरान ऑफ-चेन डेटा तक एक मजबूत, विस्तारित तरीके से पहुँच प्रदान करती है। फाइल डेटा स्रोत IPFS और Arweave से फ़ाइलें फ़ेच करने का समर्थन करते हैं। +File data sources are a new Subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. > यह ऑफ-चेन डेटा के नियतात्मक अनुक्रमण के साथ-साथ स्वैच्छिक HTTP-स्रोत डेटा के संभावित परिचय के लिए आधार भी देता है। @@ -290,7 +290,7 @@ For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave b import { TokenMetadata as TokenMetadataTemplate } from '../generated/templates' const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' -//This example code is for a Crypto coven subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. +//This example code is for a Crypto coven Subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. export function handleTransfer(event: TransferEvent): void { let token = Token.load(event.params.tokenId.toString()) @@ -317,23 +317,23 @@ export function handleTransfer(event: TransferEvent): void { This example is using the CID as the lookup between the parent `Token` entity and the resulting `TokenMetadata` entity. -> Previously, this is the point at which a subgraph developer would have called `ipfs.cat(CID)` to fetch the file +> Previously, this is the point at which a Subgraph developer would have called `ipfs.cat(CID)` to fetch the file बधाई हो, आप फ़ाइल डेटा स्रोतों का उपयोग कर रहे हैं! -#### अपने उप-अनुच्छेदों को तैनात करना +#### Deploying your Subgraphs -You can now `build` and `deploy` your subgraph to any Graph Node >=v0.30.0-rc.0. +You can now `build` and `deploy` your Subgraph to any Graph Node >=v0.30.0-rc.0. #### परिसीमन -फ़ाइल डेटा स्रोत हैंडलर और संस्थाएँ अन्य सबग्राफ संस्थाओं से अलग हैं, यह सुनिश्चित करते हुए कि वे निष्पादित होने पर नियतात्मक हैं, और श्रृंखला-आधारित डेटा स्रोतों का कोई संदूषण सुनिश्चित नहीं करते हैं। विस्तार से: +File data source handlers and entities are isolated from other Subgraph entities, ensuring that they are deterministic when executed, and ensuring no contamination of chain-based data sources. To be specific: - फ़ाइल डेटा स्रोतों द्वारा बनाई गई इकाइयाँ अपरिवर्तनीय हैं, और इन्हें अद्यतन नहीं किया जा सकता है - फ़ाइल डेटा स्रोत हैंडलर अन्य फ़ाइल डेटा स्रोतों से संस्थाओं तक नहीं पहुँच सकते - फ़ाइल डेटा स्रोतों से जुड़ी संस्थाओं को चेन-आधारित हैंडलर द्वारा एक्सेस नहीं किया जा सकता है -> हालांकि यह बाधा अधिकांश उपयोग-मामलों के लिए समस्याग्रस्त नहीं होनी चाहिए, यह कुछ के लिए जटिलता का परिचय दे सकती है। यदि आपको अपने फ़ाइल-आधारित डेटा को सबग्राफ में मॉडलिंग करने में समस्या आ रही है, तो कृपया डिस्कॉर्ड के माध्यम से संपर्क करें! +> While this constraint should not be problematic for most use-cases, it may introduce complexity for some. Please get in touch via Discord if you are having issues modelling your file-based data in a Subgraph! इसके अतिरिक्त, फ़ाइल डेटा स्रोत से डेटा स्रोत बनाना संभव नहीं है, चाहे वह ऑनचेन डेटा स्रोत हो या अन्य फ़ाइल डेटा स्रोत। भविष्य में यह प्रतिबंध हटाया जा सकता है। @@ -365,15 +365,15 @@ Handlers for File Data Sources cannot be in files which import `eth_call` contra > **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0` -विषय फ़िल्टर, जिन्हें इंडेक्स किए गए तर्क फ़िल्टर भी कहा जाता है, एक शक्तिशाली विशेषता है जो उपयोगकर्ताओं को उनके इंडेक्स किए गए तर्कों के मानों के आधार पर ब्लॉकचेन घटनाओं को सटीक रूप से फ़िल्टर करने की अनुमति देती है। +Topic filters, also known as indexed argument filters, are a powerful feature in Subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. -- ये फ़िल्टर ब्लॉकचेन पर घटनाओं की विशाल धारा से रुचि की विशिष्ट घटनाओं को अलग करने में मदद करते हैं, जिससे सबग्राफ़ केवल प्रासंगिक डेटा पर ध्यान केंद्रित करके अधिक कुशलता से कार्य कर सके। +- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing Subgraphs to operate more efficiently by focusing only on relevant data. -- यह व्यक्तिगत subgraphs बनाने के लिए उपयोगी है जो विशेष पते और विभिन्न स्मार्ट कॉन्ट्रैक्ट्स के साथ उनके इंटरैक्शन को ट्रैक करते हैं ब्लॉकचेन पर। +- This is useful for creating personal Subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. ### शीर्षक फ़िल्टर कैसे काम करते हैं -जब एक स्मार्ट कॉन्ट्रैक्ट एक इवेंट को उत्पन्न करता है, तो कोई भी तर्क जो 'indexed' के रूप में चिह्नित किया गया है, एक 'subgraph' की मैनिफेस्ट में फ़िल्टर के रूप में उपयोग किया जा सकता है। यह 'subgraph' को इन 'indexed' तर्कों से मेल खाने वाले इवेंट्स के लिए चयनात्मक रूप से सुनने की अनुमति देता है। +When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a Subgraph's manifest. This allows the Subgraph to listen selectively for events that match these indexed arguments. - The event's first indexed argument corresponds to `topic1`, the second to `topic2`, and so on, up to `topic3`, since the Ethereum Virtual Machine (EVM) allows up to three indexed arguments per event. @@ -401,7 +401,7 @@ contract Token { #### सबस्पष्ट में कॉन्फ़िगरेशन -Topic filters are defined directly within the event handler configuration in the subgraph manifest. Here is how they are configured: +Topic filters are defined directly within the event handler configuration in the Subgraph manifest. Here is how they are configured: ```yaml eventHandlers: @@ -436,7 +436,7 @@ eventHandlers: - `topic1` is configured to filter `Transfer` events where `0xAddressA` is the sender. - `topic2` is configured to filter `Transfer` events where `0xAddressB` is the receiver. -- The subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. +- The Subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. #### उदाहरण 2: दो या अधिक 'पते' के बीच किसी भी दिशा में लेन-देन को ट्रैक करना @@ -452,17 +452,17 @@ eventHandlers: - `topic1` is configured to filter `Transfer` events where `0xAddressA`, `0xAddressB`, `0xAddressC` is the sender. - `topic2` is configured to filter `Transfer` events where `0xAddressB` and `0xAddressC` is the receiver. -- Subgraph उन कई पतों के बीच होने वाले लेनदेन को दोनों दिशाओं में सूचीबद्ध करेगा, जिससे सभी पतों के बीच इंटरैक्शन की व्यापक निगरानी संभव हो सकेगी। +- The Subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. ## घोषित eth_call > नोट: यह एक प्रयोगात्मक फीचर है जो अभी तक स्थिर Graph Node रिलीज़ में उपलब्ध नहीं है। आप इसे केवल Subgraph Studio या अपने स्वयं-होस्टेड नोड में ही उपयोग कर सकते हैं। -Declarative `eth_calls` are a valuable subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. +Declarative `eth_calls` are a valuable Subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. यह फ़ीचर निम्नलिखित कार्य करता है: -- इथेरियम ब्लॉकचेन से डेटा प्राप्त करने के प्रदर्शन में महत्वपूर्ण सुधार करता है, जिससे कई कॉल के लिए कुल समय कम होता है और सबग्राफ की समग्र दक्षता का अनुकूलन होता है। +- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the Subgraph's overall efficiency. - यह तेजी से डेटा फ़ेचिंग की अनुमति देता है, जिससे तेजी से क्वेरी प्रतिक्रियाएँ और बेहतर उपयोगकर्ता अनुभव मिलता है। - कई Ethereum कॉल्स से डेटा को एकत्रित करने की आवश्यकता वाली अनुप्रयोगों के लिए प्रतीक्षा समय को कम करता है, जिससे डेटा पुनर्प्राप्ति प्रक्रिया अधिक प्रभावी हो जाती है। @@ -474,7 +474,7 @@ Declarative `eth_calls` are a valuable subgraph feature that allows `eth_calls` #### Scenario without Declarative `eth_calls` -आपके पास एक subgraph है जिसे एक उपयोगकर्ता के लेनदेन, बैलेंस और टोकन होल्डिंग्स के बारे में डेटा प्राप्त करने के लिए तीन Ethereum कॉल करने की आवश्यकता है। +Imagine you have a Subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. परंपरागत रूप से, ये कॉल क्रमिक रूप से की जा सकती हैं: @@ -498,15 +498,15 @@ Declarative `eth_calls` are a valuable subgraph feature that allows `eth_calls` #### कैसे कार्य करता है -1. In the subgraph manifest, आप Ethereum कॉल्स को इस तरह घोषित करते हैं कि ये समानांतर में निष्पादित किए जा सकें। +1. Declarative Definition: In the Subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. 2. पैरलेल निष्पादन इंजन: The Graph Node का निष्पादन इंजन इन घोषणाओं को पहचानता है और कॉल को समानांतर में चलाता है। -3. परिणाम संग्रहण: जब सभी कॉल समाप्त हो जाते हैं, तो परिणामों को एकत्रित किया जाता है और आगे की प्रक्रिया के लिए उपयोग किया जाता है। +3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the Subgraph for further processing. #### उदाहरण कॉन्फ़िगरेशन Subgraph मैनिफेस्ट में Declared `eth_calls` can access the `event.address` of the underlying event as well as all the `event.params`. -`Subgraph.yaml` using `event.address`: +`subgraph.yaml` using `event.address`: ```yaml eventHandlers: @@ -524,33 +524,34 @@ calls: - The text (`Pool[event.address].feeGrowthGlobal0X128()`) is the actual `eth_call` that will be executed, which is in the form of `Contract[address].function(arguments)` - The `address` and `arguments` can be replaced with variables that will be available when the handler is executed. -`Subgraph.yaml` using `event.params` +`subgraph.yaml` using `event.params` ```yaml calls: - ERC20DecimalsToken0: ERC20[event.params.token0].decimals() + ``` ### मौजूदा सबग्राफ पर ग्राफ्टिंग > **Note:** it is not recommended to use grafting when initially upgrading to The Graph Network. Learn more [here](/subgraphs/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). -When a subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. +When a Subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing Subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing Subgraph working again after it has failed. -A subgraph is grafted onto a base subgraph when the subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: +A Subgraph is grafted onto a base Subgraph when the Subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: ```yaml description: ... graft: - base: Qm... # Subgraph ID of base subgraph + base: Qm... # Subgraph ID of base Subgraph block: 7345624 # Block number ``` -When a subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` subgraph up to and including the given `block` and then continue indexing the new subgraph from that block on. The base subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted subgraph. +When a Subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` Subgraph up to and including the given `block` and then continue indexing the new Subgraph from that block on. The base Subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted Subgraph. -क्योंकि आधार डेटा को अनुक्रमित करने के बजाय प्रतियों को ग्राफ्ट करना, स्क्रैच से अनुक्रमणित करने की तुलना में सबग्राफ को वांछित ब्लॉक में प्राप्त करना बहुत तेज है, हालांकि बहुत बड़े सबग्राफ के लिए प्रारंभिक डेटा कॉपी में अभी भी कई घंटे लग सकते हैं। जबकि ग्राफ्टेड सबग्राफ को इनिशियलाइज़ किया जा रहा है, ग्राफ़ नोड उन एंटिटी प्रकारों के बारे में जानकारी लॉग करेगा जो पहले ही कॉपी किए जा चुके हैं। +Because grafting copies rather than indexes base data, it is much quicker to get the Subgraph to the desired block than indexing from scratch, though the initial data copy can still take several hours for very large Subgraphs. While the grafted Subgraph is being initialized, the Graph Node will log information about the entity types that have already been copied. -ग्राफ्टेड सबग्राफ एक ग्राफक्यूएल स्कीमा का उपयोग कर सकता है जो बेस सबग्राफ के समान नहीं है, लेकिन इसके अनुकूल हो। यह अपने आप में एक मान्य सबग्राफ स्कीमा होना चाहिए, लेकिन निम्नलिखित तरीकों से बेस सबग्राफ के स्कीमा से विचलित हो सकता है: +The grafted Subgraph can use a GraphQL schema that is not identical to the one of the base Subgraph, but merely compatible with it. It has to be a valid Subgraph schema in its own right, but may deviate from the base Subgraph's schema in the following ways: - यह इकाई के प्रकारों को जोड़ या हटा सकता है| - यह इकाई प्रकारों में से गुणों को हटाता है| @@ -560,4 +561,4 @@ When a subgraph whose manifest contains a `graft` block is deployed, Graph Node - यह इंटरफेस जोड़ता या हटाता है| - यह कि, किन इकाई प्रकारों के लिए इंटरफ़ेस लागू होगा, इसे बदल देता है| -> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the subgraph manifest. +> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the Subgraph manifest. From 68df1e69b3edd8883410997ec254f31e7514f519 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:10:31 -0500 Subject: [PATCH 0139/1789] New translations advanced.mdx (Swahili) --- .../developing/creating/advanced.mdx | 563 ++++++++++++++++++ 1 file changed, 563 insertions(+) create mode 100644 website/src/pages/sw/subgraphs/developing/creating/advanced.mdx diff --git a/website/src/pages/sw/subgraphs/developing/creating/advanced.mdx b/website/src/pages/sw/subgraphs/developing/creating/advanced.mdx new file mode 100644 index 000000000000..d19755ac0876 --- /dev/null +++ b/website/src/pages/sw/subgraphs/developing/creating/advanced.mdx @@ -0,0 +1,563 @@ +--- +title: Advanced Subgraph Features +--- + +## Overview + +Add and implement advanced Subgraph features to enhanced your Subgraph's built. + +Starting from `specVersion` `0.0.4`, Subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: + +| Feature | Name | +| ---------------------------------------------------- | ---------------- | +| [Non-fatal errors](#non-fatal-errors) | `nonFatalErrors` | +| [Full-text Search](#defining-fulltext-search-fields) | `fullTextSearch` | +| [Grafting](#grafting-onto-existing-subgraphs) | `grafting` | + +For instance, if a Subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: + +```yaml +specVersion: 0.0.4 +description: Gravatar for Ethereum +features: + - fullTextSearch + - nonFatalErrors +dataSources: ... +``` + +> Note that using a feature without declaring it will incur a **validation error** during Subgraph deployment, but no errors will occur if a feature is declared but not used. + +## Timeseries and Aggregations + +Prerequisites: + +- Subgraph specVersion must be ≥1.1.0. + +Timeseries and aggregations enable your Subgraph to track statistics like daily average price, hourly total transfers, and more. + +This feature introduces two new types of Subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. + +### Example Schema + +```graphql +type Data @entity(timeseries: true) { + id: Int8! + timestamp: Timestamp! + price: BigDecimal! +} + +type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { + id: Int8! + timestamp: Timestamp! + sum: BigDecimal! @aggregate(fn: "sum", arg: "price") +} +``` + +### How to Define Timeseries and Aggregations + +Timeseries entities are defined with `@entity(timeseries: true)` in the GraphQL schema. Every timeseries entity must: + +- have a unique ID of the int8 type +- have a timestamp of the Timestamp type +- include data that will be used for calculation by aggregation entities. + +These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the aggregation entities. + +Aggregation entities are defined with `@aggregation` in the GraphQL schema. Every aggregation entity defines the source from which it will gather data (which must be a timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). + +Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. + +#### Available Aggregation Intervals + +- `hour`: sets the timeseries period every hour, on the hour. +- `day`: sets the timeseries period every day, starting and ending at 00:00. + +#### Available Aggregation Functions + +- `sum`: Total of all values. +- `count`: Number of values. +- `min`: Minimum value. +- `max`: Maximum value. +- `first`: First value in the period. +- `last`: Last value in the period. + +#### Example Aggregations Query + +```graphql +{ + stats(interval: "hour", where: { timestamp_gt: 1704085200 }) { + id + timestamp + sum + } +} +``` + +[Read more](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) about Timeseries and Aggregations. + +## Non-fatal errors + +Indexing errors on already synced Subgraphs will, by default, cause the Subgraph to fail and stop syncing. Subgraphs can alternatively be configured to continue syncing in the presence of errors, by ignoring the changes made by the handler which provoked the error. This gives Subgraph authors time to correct their Subgraphs while queries continue to be served against the latest block, though the results might be inconsistent due to the bug that caused the error. Note that some errors are still always fatal. To be non-fatal, the error must be known to be deterministic. + +> **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy Subgraphs using that functionality to the network via the Studio. + +Enabling non-fatal errors requires setting the following feature flag on the Subgraph manifest: + +```yaml +specVersion: 0.0.4 +description: Gravatar for Ethereum +features: + - nonFatalErrors + ... +``` + +The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the Subgraph has skipped over errors, as in the example: + +```graphql +foos(first: 100, subgraphError: allow) { + id +} + +_meta { + hasIndexingErrors +} +``` + +If the Subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: + +```graphql +"data": { + "foos": [ + { + "id": "0xdead" + } + ], + "_meta": { + "hasIndexingErrors": true + } +}, +"errors": [ + { + "message": "indexing_error" + } +] +``` + +## IPFS/Arweave File Data Sources + +File data sources are a new Subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. + +> This also lays the groundwork for deterministic indexing of off-chain data, as well as the potential introduction of arbitrary HTTP-sourced data. + +### Overview + +Rather than fetching files "in line" during handler execution, this introduces templates which can be spawned as new data sources for a given file identifier. These new data sources fetch the files, retrying if they are unsuccessful, running a dedicated handler when the file is found. + +This is similar to the [existing data source templates](/developing/creating-a-subgraph/#data-source-templates), which are used to dynamically create new chain-based data sources. + +> This replaces the existing `ipfs.cat` API + +### Upgrade guide + +#### Update `graph-ts` and `graph-cli` + +File data sources requires graph-ts >=0.29.0 and graph-cli >=0.33.1 + +#### Add a new entity type which will be updated when files are found + +File data sources cannot access or update chain-based entities, but must update file specific entities. + +This may mean splitting out fields from existing entities into separate entities, linked together. + +Original combined entity: + +```graphql +type Token @entity { + id: ID! + tokenID: BigInt! + tokenURI: String! + externalURL: String! + ipfsURI: String! + image: String! + name: String! + description: String! + type: String! + updatedAtTimestamp: BigInt + owner: User! +} +``` + +New, split entity: + +```graphql +type Token @entity { + id: ID! + tokenID: BigInt! + tokenURI: String! + ipfsURI: TokenMetadata + updatedAtTimestamp: BigInt + owner: String! +} + +type TokenMetadata @entity { + id: ID! + image: String! + externalURL: String! + name: String! + description: String! +} +``` + +If the relationship is 1:1 between the parent entity and the resulting file data source entity, the simplest pattern is to link the parent entity to a resulting file entity by using the IPFS CID as the lookup. Get in touch on Discord if you are having difficulty modelling your new file-based entities! + +> You can use [nested filters](/subgraphs/querying/graphql-api/#example-for-nested-entity-filtering) to filter parent entities on the basis of these nested entities. + +#### Add a new templated data source with `kind: file/ipfs` or `kind: file/arweave` + +This is the data source which will be spawned when a file of interest is identified. + +```yaml +templates: + - name: TokenMetadata + kind: file/ipfs + mapping: + apiVersion: 0.0.7 + language: wasm/assemblyscript + file: ./src/mapping.ts + handler: handleMetadata + entities: + - TokenMetadata + abis: + - name: Token + file: ./abis/Token.json +``` + +> Currently `abis` are required, though it is not possible to call contracts from within file data sources + +The file data source must specifically mention all the entity types which it will interact with under `entities`. See [limitations](#limitations) for more details. + +#### Create a new handler to process files + +This handler should accept one `Bytes` parameter, which will be the contents of the file, when it is found, which can then be processed. This will often be a JSON file, which can be processed with `graph-ts` helpers ([documentation](/subgraphs/developing/creating/graph-ts/api/#json-api)). + +The CID of the file as a readable string can be accessed via the `dataSource` as follows: + +```typescript +const cid = dataSource.stringParam() +``` + +Example handler: + +```typescript +import { json, Bytes, dataSource } from '@graphprotocol/graph-ts' +import { TokenMetadata } from '../generated/schema' + +export function handleMetadata(content: Bytes): void { + let tokenMetadata = new TokenMetadata(dataSource.stringParam()) + const value = json.fromBytes(content).toObject() + if (value) { + const image = value.get('image') + const name = value.get('name') + const description = value.get('description') + const externalURL = value.get('external_url') + + if (name && image && description && externalURL) { + tokenMetadata.name = name.toString() + tokenMetadata.image = image.toString() + tokenMetadata.externalURL = externalURL.toString() + tokenMetadata.description = description.toString() + } + + tokenMetadata.save() + } +} +``` + +#### Spawn file data sources when required + +You can now create file data sources during execution of chain-based handlers: + +- Import the template from the auto-generated `templates` +- call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid content identifier for IPFS or Arweave + +For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifiers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). + +For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/arweave-node-server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Irys (previously Bundlr), and Graph Node can also fetch files based on [Irys manifests](https://docs.irys.xyz/overview/gateways#indexing). + +Example: + +```typescript +import { TokenMetadata as TokenMetadataTemplate } from '../generated/templates' + +const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' +//This example code is for a Crypto coven Subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. + +export function handleTransfer(event: TransferEvent): void { + let token = Token.load(event.params.tokenId.toString()) + if (!token) { + token = new Token(event.params.tokenId.toString()) + token.tokenID = event.params.tokenId + + token.tokenURI = '/' + event.params.tokenId.toString() + '.json' + const tokenIpfsHash = ipfshash + token.tokenURI + //This creates a path to the metadata for a single Crypto coven NFT. It concats the directory with "/" + filename + ".json" + + token.ipfsURI = tokenIpfsHash + + TokenMetadataTemplate.create(tokenIpfsHash) + } + + token.updatedAtTimestamp = event.block.timestamp + token.owner = event.params.to.toHexString() + token.save() +} +``` + +This will create a new file data source, which will poll Graph Node's configured IPFS or Arweave endpoint, retrying if it is not found. When the file is found, the file data source handler will be executed. + +This example is using the CID as the lookup between the parent `Token` entity and the resulting `TokenMetadata` entity. + +> Previously, this is the point at which a Subgraph developer would have called `ipfs.cat(CID)` to fetch the file + +Congratulations, you are using file data sources! + +#### Deploying your Subgraphs + +You can now `build` and `deploy` your Subgraph to any Graph Node >=v0.30.0-rc.0. + +#### Limitations + +File data source handlers and entities are isolated from other Subgraph entities, ensuring that they are deterministic when executed, and ensuring no contamination of chain-based data sources. To be specific: + +- Entities created by File Data Sources are immutable, and cannot be updated +- File Data Source handlers cannot access entities from other file data sources +- Entities associated with File Data Sources cannot be accessed by chain-based handlers + +> While this constraint should not be problematic for most use-cases, it may introduce complexity for some. Please get in touch via Discord if you are having issues modelling your file-based data in a Subgraph! + +Additionally, it is not possible to create data sources from a file data source, be it an onchain data source or another file data source. This restriction may be lifted in the future. + +#### Best practices + +If you are linking NFT metadata to corresponding tokens, use the metadata's IPFS hash to reference a Metadata entity from the Token entity. Save the Metadata entity using the IPFS hash as an ID. + +You can use [DataSource context](/subgraphs/developing/creating/graph-ts/api/#entity-and-datasourcecontext) when creating File Data Sources to pass extra information which will be available to the File Data Source handler. + +If you have entities which are refreshed multiple times, create unique file-based entities using the IPFS hash & the entity ID, and reference them using a derived field in the chain-based entity. + +> We are working to improve the above recommendation, so queries only return the "most recent" version + +#### Known issues + +File data sources currently require ABIs, even though ABIs are not used ([issue](https://github.com/graphprotocol/graph-cli/issues/961)). Workaround is to add any ABI. + +Handlers for File Data Sources cannot be in files which import `eth_call` contract bindings, failing with "unknown import: `ethereum::ethereum.call` has not been defined" ([issue](https://github.com/graphprotocol/graph-node/issues/4309)). Workaround is to create file data source handlers in a dedicated file. + +#### Examples + +[Crypto Coven Subgraph migration](https://github.com/azf20/cryptocoven-api/tree/file-data-sources-refactor) + +#### References + +[GIP File Data Sources](https://forum.thegraph.com/t/gip-file-data-sources/2721) + +## Indexed Argument Filters / Topic Filters + +> **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0` + +Topic filters, also known as indexed argument filters, are a powerful feature in Subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. + +- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing Subgraphs to operate more efficiently by focusing only on relevant data. + +- This is useful for creating personal Subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. + +### How Topic Filters Work + +When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a Subgraph's manifest. This allows the Subgraph to listen selectively for events that match these indexed arguments. + +- The event's first indexed argument corresponds to `topic1`, the second to `topic2`, and so on, up to `topic3`, since the Ethereum Virtual Machine (EVM) allows up to three indexed arguments per event. + +```solidity +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +contract Token { + // Event declaration with indexed parameters for addresses + event Transfer(address indexed from, address indexed to, uint256 value); + + // Function to simulate transferring tokens + function transfer(address to, uint256 value) public { + // Emitting the Transfer event with from, to, and value + emit Transfer(msg.sender, to, value); + } +} +``` + +In this example: + +- The `Transfer` event is used to log transactions of tokens between addresses. +- The `from` and `to` parameters are indexed, allowing event listeners to filter and monitor transfers involving specific addresses. +- The `transfer` function is a simple representation of a token transfer action, emitting the Transfer event whenever it is called. + +#### Configuration in Subgraphs + +Topic filters are defined directly within the event handler configuration in the Subgraph manifest. Here is how they are configured: + +```yaml +eventHandlers: + - event: SomeEvent(indexed uint256, indexed address, indexed uint256) + handler: handleSomeEvent + topic1: ['0xValue1', '0xValue2'] + topic2: ['0xAddress1', '0xAddress2'] + topic3: ['0xValue3'] +``` + +In this setup: + +- `topic1` corresponds to the first indexed argument of the event, `topic2` to the second, and `topic3` to the third. +- Each topic can have one or more values, and an event is only processed if it matches one of the values in each specified topic. + +#### Filter Logic + +- Within a Single Topic: The logic functions as an OR condition. The event will be processed if it matches any one of the listed values in a given topic. +- Between Different Topics: The logic functions as an AND condition. An event must satisfy all specified conditions across different topics to trigger the associated handler. + +#### Example 1: Tracking Direct Transfers from Address A to Address B + +```yaml +eventHandlers: + - event: Transfer(indexed address,indexed address,uint256) + handler: handleDirectedTransfer + topic1: ['0xAddressA'] # Sender Address + topic2: ['0xAddressB'] # Receiver Address +``` + +In this configuration: + +- `topic1` is configured to filter `Transfer` events where `0xAddressA` is the sender. +- `topic2` is configured to filter `Transfer` events where `0xAddressB` is the receiver. +- The Subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. + +#### Example 2: Tracking Transactions in Either Direction Between Two or More Addresses + +```yaml +eventHandlers: + - event: Transfer(indexed address,indexed address,uint256) + handler: handleTransferToOrFrom + topic1: ['0xAddressA', '0xAddressB', '0xAddressC'] # Sender Address + topic2: ['0xAddressB', '0xAddressC'] # Receiver Address +``` + +In this configuration: + +- `topic1` is configured to filter `Transfer` events where `0xAddressA`, `0xAddressB`, `0xAddressC` is the sender. +- `topic2` is configured to filter `Transfer` events where `0xAddressB` and `0xAddressC` is the receiver. +- The Subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. + +## Declared eth_call + +> Note: This is an experimental feature that is not currently available in a stable Graph Node release yet. You can only use it in Subgraph Studio or your self-hosted node. + +Declarative `eth_calls` are a valuable Subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. + +This feature does the following: + +- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the Subgraph's overall efficiency. +- Allows faster data fetching, resulting in quicker query responses and a better user experience. +- Reduces wait times for applications that need to aggregate data from multiple Ethereum calls, making the data retrieval process more efficient. + +### Key Concepts + +- Declarative `eth_calls`: Ethereum calls that are defined to be executed in parallel rather than sequentially. +- Parallel Execution: Instead of waiting for one call to finish before starting the next, multiple calls can be initiated simultaneously. +- Time Efficiency: The total time taken for all the calls changes from the sum of the individual call times (sequential) to the time taken by the longest call (parallel). + +#### Scenario without Declarative `eth_calls` + +Imagine you have a Subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. + +Traditionally, these calls might be made sequentially: + +1. Call 1 (Transactions): Takes 3 seconds +2. Call 2 (Balance): Takes 2 seconds +3. Call 3 (Token Holdings): Takes 4 seconds + +Total time taken = 3 + 2 + 4 = 9 seconds + +#### Scenario with Declarative `eth_calls` + +With this feature, you can declare these calls to be executed in parallel: + +1. Call 1 (Transactions): Takes 3 seconds +2. Call 2 (Balance): Takes 2 seconds +3. Call 3 (Token Holdings): Takes 4 seconds + +Since these calls are executed in parallel, the total time taken is equal to the time taken by the longest call. + +Total time taken = max (3, 2, 4) = 4 seconds + +#### How it Works + +1. Declarative Definition: In the Subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. +2. Parallel Execution Engine: The Graph Node's execution engine recognizes these declarations and runs the calls simultaneously. +3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the Subgraph for further processing. + +#### Example Configuration in Subgraph Manifest + +Declared `eth_calls` can access the `event.address` of the underlying event as well as all the `event.params`. + +`subgraph.yaml` using `event.address`: + +```yaml +eventHandlers: +event: Swap(indexed address,indexed address,int256,int256,uint160,uint128,int24) +handler: handleSwap +calls: + global0X128: Pool[event.address].feeGrowthGlobal0X128() + global1X128: Pool[event.address].feeGrowthGlobal1X128() +``` + +Details for the example above: + +- `global0X128` is the declared `eth_call`. +- The text (`global0X128`) is the label for this `eth_call` which is used when logging errors. +- The text (`Pool[event.address].feeGrowthGlobal0X128()`) is the actual `eth_call` that will be executed, which is in the form of `Contract[address].function(arguments)` +- The `address` and `arguments` can be replaced with variables that will be available when the handler is executed. + +`subgraph.yaml` using `event.params` + +```yaml +calls: + - ERC20DecimalsToken0: ERC20[event.params.token0].decimals() +``` + +### Grafting onto Existing Subgraphs + +> **Note:** it is not recommended to use grafting when initially upgrading to The Graph Network. Learn more [here](/subgraphs/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). + +When a Subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing Subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing Subgraph working again after it has failed. + +A Subgraph is grafted onto a base Subgraph when the Subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: + +```yaml +description: ... +graft: + base: Qm... # Subgraph ID of base Subgraph + block: 7345624 # Block number +``` + +When a Subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` Subgraph up to and including the given `block` and then continue indexing the new Subgraph from that block on. The base Subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted Subgraph. + +Because grafting copies rather than indexes base data, it is much quicker to get the Subgraph to the desired block than indexing from scratch, though the initial data copy can still take several hours for very large Subgraphs. While the grafted Subgraph is being initialized, the Graph Node will log information about the entity types that have already been copied. + +The grafted Subgraph can use a GraphQL schema that is not identical to the one of the base Subgraph, but merely compatible with it. It has to be a valid Subgraph schema in its own right, but may deviate from the base Subgraph's schema in the following ways: + +- It adds or removes entity types +- It removes attributes from entity types +- It adds nullable attributes to entity types +- It turns non-nullable attributes into nullable attributes +- It adds values to enums +- It adds or removes interfaces +- It changes for which entity types an interface is implemented + +> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the Subgraph manifest. From a85f62fbeb673a19e072331224dcd58cce757fbe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:10:32 -0500 Subject: [PATCH 0140/1789] New translations api.mdx (Romanian) --- .../developing/creating/graph-ts/api.mdx | 54 +++++++++---------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/website/src/pages/ro/subgraphs/developing/creating/graph-ts/api.mdx b/website/src/pages/ro/subgraphs/developing/creating/graph-ts/api.mdx index 35bb04826c98..2e256ae18190 100644 --- a/website/src/pages/ro/subgraphs/developing/creating/graph-ts/api.mdx +++ b/website/src/pages/ro/subgraphs/developing/creating/graph-ts/api.mdx @@ -2,12 +2,12 @@ title: AssemblyScript API --- -> Note: If you created a subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, then you're using an older version of AssemblyScript. It is recommended to review the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/). +> Note: If you created a Subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, then you're using an older version of AssemblyScript. It is recommended to review the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/). -Learn what built-in APIs can be used when writing subgraph mappings. There are two kinds of APIs available out of the box: +Learn what built-in APIs can be used when writing Subgraph mappings. There are two kinds of APIs available out of the box: - The [Graph TypeScript library](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts) (`graph-ts`) -- Code generated from subgraph files by `graph codegen` +- Code generated from Subgraph files by `graph codegen` You can also add other libraries as dependencies, as long as they are compatible with [AssemblyScript](https://github.com/AssemblyScript/assemblyscript). @@ -27,18 +27,18 @@ The `@graphprotocol/graph-ts` library provides the following APIs: ### Versions -The `apiVersion` in the subgraph manifest specifies the mapping API version which is run by Graph Node for a given subgraph. +The `apiVersion` in the Subgraph manifest specifies the mapping API version which is run by Graph Node for a given Subgraph. -| Version | Release notes | -| :-: | --- | -| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | -| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | -| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
Added `receipt` field to the Ethereum Event object | -| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
Added `baseFeePerGas` to the Ethereum Block object | -| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/))
`ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | -| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | -| 0.0.3 | Added `from` field to the Ethereum Call object
`ethereum.call.address` renamed to `ethereum.call.to` | -| 0.0.2 | Added `input` field to the Ethereum Transaction object | +| Version | Release notes | +| :-----: | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | +| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | +| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
Added `receipt` field to the Ethereum Event object | +| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
Added `baseFeePerGas` to the Ethereum Block object | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/))
`ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | +| 0.0.3 | Added `from` field to the Ethereum Call object
`ethereum.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Added `input` field to the Ethereum Transaction object | ### Built-in Types @@ -223,7 +223,7 @@ import { store } from '@graphprotocol/graph-ts' The `store` API allows to load, save and remove entities from and to the Graph Node store. -Entities written to the store map one-to-one to the `@entity` types defined in the subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. +Entities written to the store map one-to-one to the `@entity` types defined in the Subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. #### Creating entities @@ -282,8 +282,8 @@ As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 and `@graphprotoco The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a transaction from some onchain event, and a later handler wants to access this transaction if it exists. -- In the case where the transaction does not exist, the subgraph will have to go to the database simply to find out that the entity does not exist. If the subgraph author already knows that the entity must have been created in the same block, using `loadInBlock` avoids this database roundtrip. -- For some subgraphs, these missed lookups can contribute significantly to the indexing time. +- In the case where the transaction does not exist, the Subgraph will have to go to the database simply to find out that the entity does not exist. If the Subgraph author already knows that the entity must have been created in the same block, using `loadInBlock` avoids this database roundtrip. +- For some Subgraphs, these missed lookups can contribute significantly to the indexing time. ```typescript let id = event.transaction.hash // or however the ID is constructed @@ -380,11 +380,11 @@ The Ethereum API provides access to smart contracts, public state variables, con #### Support for Ethereum Types -As with entities, `graph codegen` generates classes for all smart contracts and events used in a subgraph. For this, the contract ABIs need to be part of the data source in the subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. +As with entities, `graph codegen` generates classes for all smart contracts and events used in a Subgraph. For this, the contract ABIs need to be part of the data source in the Subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. -With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that subgraph authors do not have to worry about them. +With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that Subgraph authors do not have to worry about them. -The following example illustrates this. Given a subgraph schema like +The following example illustrates this. Given a Subgraph schema like ```graphql type Transfer @entity { @@ -483,7 +483,7 @@ class Log { #### Access to Smart Contract State -The code generated by `graph codegen` also includes classes for the smart contracts used in the subgraph. These can be used to access public state variables and call functions of the contract at the current block. +The code generated by `graph codegen` also includes classes for the smart contracts used in the Subgraph. These can be used to access public state variables and call functions of the contract at the current block. A common pattern is to access the contract from which an event originates. This is achieved with the following code: @@ -506,7 +506,7 @@ export function handleTransfer(event: TransferEvent) { As long as the `ERC20Contract` on Ethereum has a public read-only function called `symbol`, it can be called with `.symbol()`. For public state variables a method with the same name is created automatically. -Any other contract that is part of the subgraph can be imported from the generated code and can be bound to a valid address. +Any other contract that is part of the Subgraph can be imported from the generated code and can be bound to a valid address. #### Handling Reverted Calls @@ -582,7 +582,7 @@ let isContract = ethereum.hasCode(eoa).inner // returns false import { log } from '@graphprotocol/graph-ts' ``` -The `log` API allows subgraphs to log information to the Graph Node standard output as well as Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. +The `log` API allows Subgraphs to log information to the Graph Node standard output as well as Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. The `log` API includes the following functions: @@ -590,7 +590,7 @@ The `log` API includes the following functions: - `log.info(fmt: string, args: Array): void` - logs an informational message. - `log.warning(fmt: string, args: Array): void` - logs a warning. - `log.error(fmt: string, args: Array): void` - logs an error message. -- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the subgraph. +- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the Subgraph. The `log` API takes a format string and an array of string values. It then replaces placeholders with the string values from the array. The first `{}` placeholder gets replaced by the first value in the array, the second `{}` placeholder gets replaced by the second value and so on. @@ -721,7 +721,7 @@ ipfs.mapJSON('Qm...', 'processItem', Value.fromString('parentId')) The only flag currently supported is `json`, which must be passed to `ipfs.map`. With the `json` flag, the IPFS file must consist of a series of JSON values, one value per line. The call to `ipfs.map` will read each line in the file, deserialize it into a `JSONValue` and call the callback for each of them. The callback can then use entity operations to store data from the `JSONValue`. Entity changes are stored only when the handler that called `ipfs.map` finishes successfully; in the meantime, they are kept in memory, and the size of the file that `ipfs.map` can process is therefore limited. -On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the subgraph is marked as failed. +On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the Subgraph is marked as failed. ### Crypto API @@ -836,7 +836,7 @@ The base `Entity` class and the child `DataSourceContext` class have helpers to ### DataSourceContext in Manifest -The `context` section within `dataSources` allows you to define key-value pairs that are accessible within your subgraph mappings. The available types are `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. +The `context` section within `dataSources` allows you to define key-value pairs that are accessible within your Subgraph mappings. The available types are `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Here is a YAML example illustrating the usage of various types in the `context` section: @@ -887,4 +887,4 @@ dataSources: - `List`: Specifies a list of items. Each item needs to specify its type and data. - `BigInt`: Specifies a large integer value. Must be quoted due to its large size. -This context is then accessible in your subgraph mapping files, enabling more dynamic and configurable subgraphs. +This context is then accessible in your Subgraph mapping files, enabling more dynamic and configurable Subgraphs. From 2798d7e357067a63ed8c8ec3205ce35afe9a55d1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:10:34 -0500 Subject: [PATCH 0141/1789] New translations api.mdx (French) --- .../developing/creating/graph-ts/api.mdx | 130 +++++++++--------- 1 file changed, 65 insertions(+), 65 deletions(-) diff --git a/website/src/pages/fr/subgraphs/developing/creating/graph-ts/api.mdx b/website/src/pages/fr/subgraphs/developing/creating/graph-ts/api.mdx index a74814844016..688a83a68b6f 100644 --- a/website/src/pages/fr/subgraphs/developing/creating/graph-ts/api.mdx +++ b/website/src/pages/fr/subgraphs/developing/creating/graph-ts/api.mdx @@ -2,12 +2,12 @@ title: API AssemblyScript --- -> Note : Si vous avez créé un subgraph avant la version `graph-cli`/`graph-ts` `0.22.0`, alors vous utilisez une ancienne version d'AssemblyScript. Il est recommandé de consulter le [`Guide de Migration`](/resources/migration-guides/assemblyscript-migration-guide/). +> Note: If you created a Subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, then you're using an older version of AssemblyScript. It is recommended to review the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/). -Découvrez quelles APIs intégrées peuvent être utilisées lors de l'écriture des mappages de subgraph. Il existe deux types d'APIs disponibles par défaut : +Learn what built-in APIs can be used when writing Subgraph mappings. There are two kinds of APIs available out of the box: - La [Bibliothèque TypeScript de The Graph](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts) (`graph-ts`) -- Code généré à partir des fichiers du subgraph par `graph codegen` +- Code generated from Subgraph files by `graph codegen` Vous pouvez également ajouter d'autres bibliothèques comme dépendances, à condition qu'elles soient compatibles avec [AssemblyScript] (https://github.com/AssemblyScript/assemblyscript). @@ -27,18 +27,18 @@ La bibliothèque `@graphprotocol/graph-ts` fournit les API suivantes : ### Versions -La `apiVersion` dans le manifeste du subgraph spécifie la version de l'API de mappage exécutée par Graph Node pour un subgraph donné. +The `apiVersion` in the Subgraph manifest specifies the mapping API version which is run by Graph Node for a given Subgraph. -| Version | Notes de version | -| :-: | --- | -| 0.0.9 | Ajout de nouvelles fonctions hôtes [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | -| 0.0.8 | Ajout de la validation pour l'existence des champs dans le schéma lors de l'enregistrement d'une entité. | -| 0.0.7 | Ajout des classes `TransactionReceipt` et `Log`aux types Ethereum
Ajout du champ `receipt` à l'objet Ethereum Event | -| 0.0.6 | Ajout du champ `nonce` à l'objet Ethereum Transaction
Ajout de `baseFeePerGas` à l'objet Ethereum Block | -| 0.0.5 | AssemblyScript a été mis à niveau à niveau vers la version 0.19.10 (cela inclut des changements brusques, veuillez consulter le [`Guide de migration`](/resources/migration-guides/assemblyscript-migration-guide/))
`ethereum.transaction.gasUsed` renommé en `ethereum.transaction.gasLimit` | -| 0.0.4 | Ajout du champ `functionSignature` à l'objet Ethereum SmartContractCall | -| 0.0.3 | Ajout du champ `from` à l'objet Ethereum Call
`ethereum.call.address` renommé en `ethereum.call.to` | -| 0.0.2 | Ajout du champ `input` à l'objet Ethereum Transaction | +| Version | Notes de version | +| :-----: | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 0.0.9 | Ajout de nouvelles fonctions hôtes [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | +| 0.0.8 | Ajout de la validation pour l'existence des champs dans le schéma lors de l'enregistrement d'une entité. | +| 0.0.7 | Ajout des classes `TransactionReceipt` et `Log`aux types Ethereum
Ajout du champ `receipt` à l'objet Ethereum Event | +| 0.0.6 | Ajout du champ `nonce` à l'objet Ethereum Transaction
Ajout de `baseFeePerGas` à l'objet Ethereum Block | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/))
`ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | Ajout du champ `functionSignature` à l'objet Ethereum SmartContractCall | +| 0.0.3 | Ajout du champ `from` à l'objet Ethereum Call
`ethereum.call.address` renommé en `ethereum.call.to` | +| 0.0.2 | Ajout du champ `input` à l'objet Ethereum Transaction | ### Types intégrés @@ -223,7 +223,7 @@ import { store } from '@graphprotocol/graph-ts' L'API `store` permet de charger, sauvegarder et supprimer des entités dans et depuis le magasin Graph Node. -Les entités écrites dans le magasin correspondent directement aux types `@entity` définis dans le schéma GraphQL du subgraph. Pour faciliter le travail avec ces entités, la commande `graph codegen` fournie par [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) génère des classes d'entités, qui sont des sous-classes du type `Entity` intégré, avec des accesseurs et des mutateurs pour les champs du schéma ainsi que des méthodes pour charger et sauvegarder ces entités. +Entities written to the store map one-to-one to the `@entity` types defined in the Subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. #### Création d'entités @@ -282,8 +282,8 @@ Depuis `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 et `@graphprotoco L'API de store facilite la récupération des entités créées ou mises à jour dans le bloc actuel. Une situation typique pour cela est qu'un gestionnaire crée une transaction à partir d'un événement onchain et qu'un gestionnaire ultérieur souhaite accéder à cette transaction si elle existe. -- Dans le cas où la transaction n'existe pas, le subgraph devra interroger la base de données pour découvrir que l'entité n'existe pas. Si l'auteur du subgraph sait déjà que l'entité doit avoir été créée dans le même bloc, utiliser `loadInBlock` évite ce détour par la base de données. -- Pour certains subgraphs, ces recherches infructueuses peuvent contribuer de manière significative au temps d'indexation. +- In the case where the transaction does not exist, the Subgraph will have to go to the database simply to find out that the entity does not exist. If the Subgraph author already knows that the entity must have been created in the same block, using `loadInBlock` avoids this database roundtrip. +- For some Subgraphs, these missed lookups can contribute significantly to the indexing time. ```typescript let id = event.transaction.hash // ou de toute autre manière dont l'ID est construit @@ -380,11 +380,11 @@ L'API Ethereum donne accès aux contrats intelligents, aux variables d'état pub #### Prise en charge des types Ethereum -Comme pour les entités, `graph codegen` génère des classes pour tous les contrats intelligents et événements utilisés dans un subgraph. Pour cela, les ABIs des contrats doivent faire partie de la source de données dans le manifeste du subgraph. En général, les fichiers ABI sont stockés dans un dossier `abis/` . +As with entities, `graph codegen` generates classes for all smart contracts and events used in a Subgraph. For this, the contract ABIs need to be part of the data source in the Subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. -Avec les classes générées, les conversions entre les types Ethereum et [les types intégrés](#built-in-types) se font en arrière-plan afin que les auteurs de subgraph n'aient pas à s'en soucier. +With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that Subgraph authors do not have to worry about them. -L’exemple suivant illustre cela. Étant donné un schéma de subgraph comme +The following example illustrates this. Given a Subgraph schema like ```graphql type Transfer @entity { @@ -483,7 +483,7 @@ class Log { #### Accès à l'état du contrat intelligent -Le code généré par `graph codegen` inclut également des classes pour les contrats intelligents utilisés dans le subgraph. Celles-ci peuvent être utilisées pour accéder aux variables d'état publiques et appeler des fonctions du contrat au bloc actuel. +The code generated by `graph codegen` also includes classes for the smart contracts used in the Subgraph. These can be used to access public state variables and call functions of the contract at the current block. Un modèle courant consiste à accéder au contrat dont provient un événement. Ceci est réalisé avec le code suivant : @@ -506,7 +506,7 @@ export function handleTransfer(event: TransferEvent) { Tant que le `ERC20Contract` sur Ethereum a une fonction publique en lecture seule appelée `symbol`, elle peut être appelée avec `.symbol()`. Pour les variables d'état publiques, une méthode du même nom est créée automatiquement. -Tout autre contrat faisant partie du subgraph peut être importé à partir du code généré et peut être lié à une adresse valide. +Any other contract that is part of the Subgraph can be imported from the generated code and can be bound to a valid address. #### Gestion des appels retournés @@ -582,7 +582,7 @@ let isContract = ethereum.hasCode(eoa).inner // renvoie false import { log } from '@graphprotocol/graph-ts' ``` -L'API `log` permet aux subgraphs d'enregistrer des informations sur la sortie standard de Graph Node ainsi que sur Graph Explorer. Les messages peuvent être enregistrés en utilisant différents niveaux de journalisation. Une syntaxe de chaîne de caractère de format de base est fournie pour composer des messages de journal à partir de l'argument. +The `log` API allows Subgraphs to log information to the Graph Node standard output as well as Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. L'API `log` inclut les fonctions suivantes : @@ -590,7 +590,7 @@ L'API `log` inclut les fonctions suivantes : - `log.info(fmt: string, args: Array): void` - enregistre un message d'information. - `log.warning(fmt: string, args: Array): void` - enregistre un avertissement. - `log.error(fmt: string, args: Array): void` - enregistre un message d'erreur. -- `log.critical(fmt: string, args: Array): void` – enregistre un message critique _et_ met fin au subgraph. +- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the Subgraph. L'API `log` prend une chaîne de caractères de format et un tableau de valeurs de chaîne de caractères. Elle remplace ensuite les espaces réservés par les valeurs de chaîne de caractères du tableau. Le premier espace réservé `{}` est remplacé par la première valeur du tableau, le second `{}` est remplacé par la deuxième valeur, et ainsi de suite. @@ -721,7 +721,7 @@ ipfs.mapJSON('Qm...', 'processItem', Value.fromString('parentId')) Le seul indicateur actuellement pris en charge est `json`, qui doit être passé à `ipfs.map`. Avec l'indicateur `json` , le fichier IPFS doit consister en une série de valeurs JSON, une valeur par ligne. L'appel à `ipfs.map` lira chaque ligne du fichier, la désérialisera en un `JSONValue` et appellera le callback pour chacune d'entre elles. Le callback peut alors utiliser des opérations des entités pour stocker des données à partir du `JSONValue`. Les modifications d'entité ne sont enregistrées que lorsque le gestionnaire qui a appelé `ipfs.map` se termine avec succès ; en attendant, elles sont conservées en mémoire, et la taille du fichier que `ipfs.map` peut traiter est donc limitée. -En cas de succès, `ipfs.map` renvoie `void`. Si une invocation du callback provoque une erreur, le gestionnaire qui a invoqué `ipfs.map` est interrompu et le subgraph marqué comme échoué. +On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the Subgraph is marked as failed. ### Crypto API @@ -770,44 +770,44 @@ Lorsque le type d'une valeur est certain, il peut être converti en un [type int ### Référence des conversions de types -| Source(s) | Destination | Fonctions de conversion | -| -------------------- | -------------------- | ---------------------------- | -| Address | Bytes | aucune | -| Address | String | s.toHexString() | -| BigDecimal | String | s.toString() | -| BigInt | BigDecimal | s.toBigDecimal() | -| BigInt | String (hexadecimal) | s.toHexString() or s.toHex() | -| BigInt | String (unicode) | s.toString() | -| BigInt | i32 | s.toI32() | -| Boolean | Boolean | aucune | -| Bytes (signé) | BigInt | BigInt.fromSignedBytes(s) | -| Bytes (non signé) | BigInt | BigInt.fromUnsignedBytes(s) | -| Bytes | String (hexadecimal) | s.toHexString() or s.toHex() | -| Bytes | String (unicode) | s.toString() | -| Bytes | String (base58) | s.toBase58() | -| Bytes | i32 | s.toI32() | -| Bytes | u32 | s.toU32() | -| Bytes | JSON | json.fromBytes(s) | -| int8 | i32 | aucune | -| int32 | i32 | aucune | -| int32 | BigInt | BigInt.fromI32(s) | -| uint24 | i32 | aucune | -| int64 - int256 | BigInt | aucune | -| uint32 - uint256 | BigInt | aucune | -| JSON | boolean | s.toBool() | -| JSON | i64 | s.toI64() | -| JSON | u64 | s.toU64() | -| JSON | f64 | s.toF64() | -| JSON | BigInt | s.toBigInt() | -| JSON | string | s.toString() | -| JSON | Array | s.toArray() | -| JSON | Object | s.toObject() | -| String | Address | Address.fromString(s) | -| Bytes | Address | Address.fromBytes(s) | -| String | BigInt | BigInt.fromString(s) | -| String | BigDecimal | BigDecimal.fromString(s) | -| String (hexadecimal) | Bytes | ByteArray.fromHexString(s) | -| String (UTF-8) | Bytes | ByteArray.fromUTF8(s) | +| Source(s) | Destination | Fonctions de conversion | +| --------------------- | -------------------- | -------------------------------- | +| Address | Bytes | aucune | +| Address | String | s.toHexString() | +| BigDecimal | String | s.toString() | +| BigInt | BigDecimal | s.toBigDecimal() | +| BigInt | String (hexadecimal) | s.toHexString() or s.toHex() | +| BigInt | String (unicode) | s.toString() | +| BigInt | i32 | s.toI32() | +| Boolean | Boolean | aucune | +| Bytes (signé) | BigInt | BigInt.fromSignedBytes(s) | +| Bytes (non signé) | BigInt | BigInt.fromUnsignedBytes(s) | +| Bytes | String (hexadecimal) | s.toHexString() or s.toHex() | +| Bytes | String (unicode) | s.toString() | +| Bytes | String (base58) | s.toBase58() | +| Bytes | i32 | s.toI32() | +| Bytes | u32 | s.toU32() | +| Bytes | JSON | json.fromBytes(s) | +| int8 | i32 | aucune | +| int32 | i32 | aucune | +| int32 | BigInt | BigInt.fromI32(s) | +| uint24 | i32 | aucune | +| int64 - int256 | BigInt | aucune | +| uint32 - uint256 | BigInt | aucune | +| JSON | boolean | s.toBool() | +| JSON | i64 | s.toI64() | +| JSON | u64 | s.toU64() | +| JSON | f64 | s.toF64() | +| JSON | BigInt | s.toBigInt() | +| JSON | string | s.toString() | +| JSON | Array | s.toArray() | +| JSON | Object | s.toObject() | +| String | Address | Address.fromString(s) | +| Bytes | Address | Address.fromBytes(s) | +| String | BigInt | BigInt.fromString(s) | +| String | BigDecimal | BigDecimal.fromString(s) | +| String (hexadecimal) | Bytes | ByteArray.fromHexString(s) | +| String (UTF-8) | Bytes | ByteArray.fromUTF8(s) | ### Métadonnées de la source de données @@ -836,7 +836,7 @@ La classe de base `Entity` et la classe enfant `DataSourceContext` disposent d'a ### DataSourceContext in Manifest -La section `context` de `dataSources` vous permet de définir des paires clé-valeur qui sont accessibles dans vos mappages de subgraphs. Les types disponibles sont `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, et `BigInt`. +The `context` section within `dataSources` allows you to define key-value pairs that are accessible within your Subgraph mappings. The available types are `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Voici un exemple YAML illustrant l'utilisation de différents types dans la section `context` : @@ -887,4 +887,4 @@ dataSources: - `List` : Spécifie une liste d'éléments. Chaque élément doit spécifier son type et ses données. - `BigInt` : Spécifie une grande valeur entière. Elle doit être mise entre guillemets en raison de sa grande taille. -Ce contexte est ensuite accessible dans vos fichiers de mappage de subgraphs, permettant des subgraphs plus dynamiques et configurables. +This context is then accessible in your Subgraph mapping files, enabling more dynamic and configurable Subgraphs. From 4661091ebfab2265d3db21385552528cdd5e808b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:10:35 -0500 Subject: [PATCH 0142/1789] New translations api.mdx (Spanish) --- .../developing/creating/graph-ts/api.mdx | 54 +++++++++---------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/website/src/pages/es/subgraphs/developing/creating/graph-ts/api.mdx b/website/src/pages/es/subgraphs/developing/creating/graph-ts/api.mdx index 67ec89027c6b..4479673b2af3 100644 --- a/website/src/pages/es/subgraphs/developing/creating/graph-ts/api.mdx +++ b/website/src/pages/es/subgraphs/developing/creating/graph-ts/api.mdx @@ -2,12 +2,12 @@ title: AssemblyScript API --- -> Note: If you created a subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, then you're using an older version of AssemblyScript. It is recommended to review the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/). +> Note: If you created a Subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, then you're using an older version of AssemblyScript. It is recommended to review the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/). -Learn what built-in APIs can be used when writing subgraph mappings. There are two kinds of APIs available out of the box: +Learn what built-in APIs can be used when writing Subgraph mappings. There are two kinds of APIs available out of the box: - The [Graph TypeScript library](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts) (`graph-ts`) -- Code generated from subgraph files by `graph codegen` +- Code generated from Subgraph files by `graph codegen` You can also add other libraries as dependencies, as long as they are compatible with [AssemblyScript](https://github.com/AssemblyScript/assemblyscript). @@ -27,18 +27,18 @@ The `@graphprotocol/graph-ts` library provides the following APIs: ### Versiones -The `apiVersion` in the subgraph manifest specifies the mapping API version which is run by Graph Node for a given subgraph. +The `apiVersion` in the Subgraph manifest specifies the mapping API version which is run by Graph Node for a given Subgraph. -| Version | Notas del lanzamiento | -| :-: | --- | -| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | -| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | -| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
Added `receipt` field to the Ethereum Event object | -| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
Added `baseFeePerGas` to the Ethereum Block object | -| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/))
`ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | -| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | -| 0.0.3 | Added `from` field to the Ethereum Call object
`ethereum.call.address` renamed to `ethereum.call.to` | -| 0.0.2 | Added `input` field to the Ethereum Transaction object | +| Version | Notas del lanzamiento | +| :-----: | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | +| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | +| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
Added `receipt` field to the Ethereum Event object | +| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
Added `baseFeePerGas` to the Ethereum Block object | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/))
`ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | +| 0.0.3 | Added `from` field to the Ethereum Call object
`ethereum.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Added `input` field to the Ethereum Transaction object | ### Tipos Incorporados @@ -223,7 +223,7 @@ import { store } from '@graphprotocol/graph-ts' The `store` API allows to load, save and remove entities from and to the Graph Node store. -Entities written to the store map one-to-one to the `@entity` types defined in the subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. +Entities written to the store map one-to-one to the `@entity` types defined in the Subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. #### Creacion de entidades @@ -282,8 +282,8 @@ As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 and `@graphprotoco The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a transaction from some onchain event, and a later handler wants to access this transaction if it exists. -- In the case where the transaction does not exist, the subgraph will have to go to the database simply to find out that the entity does not exist. If the subgraph author already knows that the entity must have been created in the same block, using `loadInBlock` avoids this database roundtrip. -- For some subgraphs, these missed lookups can contribute significantly to the indexing time. +- In the case where the transaction does not exist, the Subgraph will have to go to the database simply to find out that the entity does not exist. If the Subgraph author already knows that the entity must have been created in the same block, using `loadInBlock` avoids this database roundtrip. +- For some Subgraphs, these missed lookups can contribute significantly to the indexing time. ```typescript let id = event.transaction.hash // or however the ID is constructed @@ -380,11 +380,11 @@ La API de Ethereum proporciona acceso a los contratos inteligentes, a las variab #### Compatibilidad con los tipos de Ethereum -As with entities, `graph codegen` generates classes for all smart contracts and events used in a subgraph. For this, the contract ABIs need to be part of the data source in the subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. +As with entities, `graph codegen` generates classes for all smart contracts and events used in a Subgraph. For this, the contract ABIs need to be part of the data source in the Subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. -With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that subgraph authors do not have to worry about them. +With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that Subgraph authors do not have to worry about them. -El siguiente ejemplo lo ilustra. Dado un esquema de subgrafos como +The following example illustrates this. Given a Subgraph schema like ```graphql type Transfer @entity { @@ -483,7 +483,7 @@ class Log { #### Acceso al Estado del Contrato Inteligente -The code generated by `graph codegen` also includes classes for the smart contracts used in the subgraph. These can be used to access public state variables and call functions of the contract at the current block. +The code generated by `graph codegen` also includes classes for the smart contracts used in the Subgraph. These can be used to access public state variables and call functions of the contract at the current block. Un patrón común es acceder al contrato desde el que se origina un evento. Esto se consigue con el siguiente código: @@ -506,7 +506,7 @@ export function handleTransfer(event: TransferEvent) { As long as the `ERC20Contract` on Ethereum has a public read-only function called `symbol`, it can be called with `.symbol()`. For public state variables a method with the same name is created automatically. -Cualquier otro contrato que forme parte del subgrafo puede ser importado desde el código generado y puede ser vinculado a una dirección válida. +Any other contract that is part of the Subgraph can be imported from the generated code and can be bound to a valid address. #### Tratamiento de las Llamadas Revertidas @@ -582,7 +582,7 @@ let isContract = ethereum.hasCode(eoa).inner // returns false import { log } from '@graphprotocol/graph-ts' ``` -The `log` API allows subgraphs to log information to the Graph Node standard output as well as Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. +The `log` API allows Subgraphs to log information to the Graph Node standard output as well as Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. The `log` API includes the following functions: @@ -590,7 +590,7 @@ The `log` API includes the following functions: - `log.info(fmt: string, args: Array): void` - logs an informational message. - `log.warning(fmt: string, args: Array): void` - logs a warning. - `log.error(fmt: string, args: Array): void` - logs an error message. -- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the subgraph. +- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the Subgraph. The `log` API takes a format string and an array of string values. It then replaces placeholders with the string values from the array. The first `{}` placeholder gets replaced by the first value in the array, the second `{}` placeholder gets replaced by the second value and so on. @@ -721,7 +721,7 @@ ipfs.mapJSON('Qm...', 'processItem', Value.fromString('parentId')) The only flag currently supported is `json`, which must be passed to `ipfs.map`. With the `json` flag, the IPFS file must consist of a series of JSON values, one value per line. The call to `ipfs.map` will read each line in the file, deserialize it into a `JSONValue` and call the callback for each of them. The callback can then use entity operations to store data from the `JSONValue`. Entity changes are stored only when the handler that called `ipfs.map` finishes successfully; in the meantime, they are kept in memory, and the size of the file that `ipfs.map` can process is therefore limited. -On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the subgraph is marked as failed. +On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the Subgraph is marked as failed. ### API Cripto @@ -836,7 +836,7 @@ The base `Entity` class and the child `DataSourceContext` class have helpers to ### DataSourceContext in Manifest -The `context` section within `dataSources` allows you to define key-value pairs that are accessible within your subgraph mappings. The available types are `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. +The `context` section within `dataSources` allows you to define key-value pairs that are accessible within your Subgraph mappings. The available types are `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Here is a YAML example illustrating the usage of various types in the `context` section: @@ -887,4 +887,4 @@ dataSources: - `List`: Specifies a list of items. Each item needs to specify its type and data. - `BigInt`: Specifies a large integer value. Must be quoted due to its large size. -This context is then accessible in your subgraph mapping files, enabling more dynamic and configurable subgraphs. +This context is then accessible in your Subgraph mapping files, enabling more dynamic and configurable Subgraphs. From 4a19ab7943f2c2596bfdfe65af44c6ed814270e9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:10:36 -0500 Subject: [PATCH 0143/1789] New translations api.mdx (Arabic) --- .../developing/creating/graph-ts/api.mdx | 54 +++++++++---------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/website/src/pages/ar/subgraphs/developing/creating/graph-ts/api.mdx b/website/src/pages/ar/subgraphs/developing/creating/graph-ts/api.mdx index 8245a637cc8a..ef43760cfdbf 100644 --- a/website/src/pages/ar/subgraphs/developing/creating/graph-ts/api.mdx +++ b/website/src/pages/ar/subgraphs/developing/creating/graph-ts/api.mdx @@ -2,12 +2,12 @@ title: AssemblyScript API --- -> Note: If you created a subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, then you're using an older version of AssemblyScript. It is recommended to review the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/). +> Note: If you created a Subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, then you're using an older version of AssemblyScript. It is recommended to review the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/). -Learn what built-in APIs can be used when writing subgraph mappings. There are two kinds of APIs available out of the box: +Learn what built-in APIs can be used when writing Subgraph mappings. There are two kinds of APIs available out of the box: - The [Graph TypeScript library](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts) (`graph-ts`) -- Code generated from subgraph files by `graph codegen` +- Code generated from Subgraph files by `graph codegen` You can also add other libraries as dependencies, as long as they are compatible with [AssemblyScript](https://github.com/AssemblyScript/assemblyscript). @@ -27,18 +27,18 @@ The `@graphprotocol/graph-ts` library provides the following APIs: ### إصدارات -The `apiVersion` in the subgraph manifest specifies the mapping API version which is run by Graph Node for a given subgraph. +The `apiVersion` in the Subgraph manifest specifies the mapping API version which is run by Graph Node for a given Subgraph. -| الاصدار | ملاحظات الإصدار | -| :-: | --- | -| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | -| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | -| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
Added `receipt` field to the Ethereum Event object | -| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
Added `baseFeePerGas` to the Ethereum Block object | -| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/))
`ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | -| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | -| 0.0.3 | Added `from` field to the Ethereum Call object
`ethereum.call.address` renamed to `ethereum.call.to` | -| 0.0.2 | Added `input` field to the Ethereum Transaction object | +| الاصدار | ملاحظات الإصدار | +| :-----: | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | +| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | +| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
Added `receipt` field to the Ethereum Event object | +| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
Added `baseFeePerGas` to the Ethereum Block object | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/))
`ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | +| 0.0.3 | Added `from` field to the Ethereum Call object
`ethereum.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Added `input` field to the Ethereum Transaction object | ### الأنواع المضمنة (Built-in) @@ -223,7 +223,7 @@ It adds the following method on top of the `Bytes` API: The `store` API allows to load, save and remove entities from and to the Graph Node store. -Entities written to the store map one-to-one to the `@entity` types defined in the subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. +Entities written to the store map one-to-one to the `@entity` types defined in the Subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. #### إنشاء الكيانات @@ -282,8 +282,8 @@ As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 and `@graphprotoco The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a transaction from some onchain event, and a later handler wants to access this transaction if it exists. -- In the case where the transaction does not exist, the subgraph will have to go to the database simply to find out that the entity does not exist. If the subgraph author already knows that the entity must have been created in the same block, using `loadInBlock` avoids this database roundtrip. -- For some subgraphs, these missed lookups can contribute significantly to the indexing time. +- In the case where the transaction does not exist, the Subgraph will have to go to the database simply to find out that the entity does not exist. If the Subgraph author already knows that the entity must have been created in the same block, using `loadInBlock` avoids this database roundtrip. +- For some Subgraphs, these missed lookups can contribute significantly to the indexing time. ```typescript let id = event.transaction.hash // or however the ID is constructed @@ -380,11 +380,11 @@ The Ethereum API provides access to smart contracts, public state variables, con #### دعم أنواع الإيثيريوم -As with entities, `graph codegen` generates classes for all smart contracts and events used in a subgraph. For this, the contract ABIs need to be part of the data source in the subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. +As with entities, `graph codegen` generates classes for all smart contracts and events used in a Subgraph. For this, the contract ABIs need to be part of the data source in the Subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. -With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that subgraph authors do not have to worry about them. +With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that Subgraph authors do not have to worry about them. -The following example illustrates this. Given a subgraph schema like +The following example illustrates this. Given a Subgraph schema like ```graphql type Transfer @entity { @@ -483,7 +483,7 @@ class Log { #### الوصول إلى حالة العقد الذكي Smart Contract -The code generated by `graph codegen` also includes classes for the smart contracts used in the subgraph. These can be used to access public state variables and call functions of the contract at the current block. +The code generated by `graph codegen` also includes classes for the smart contracts used in the Subgraph. These can be used to access public state variables and call functions of the contract at the current block. A common pattern is to access the contract from which an event originates. This is achieved with the following code: @@ -506,7 +506,7 @@ export function handleTransfer(event: TransferEvent) { As long as the `ERC20Contract` on Ethereum has a public read-only function called `symbol`, it can be called with `.symbol()`. For public state variables a method with the same name is created automatically. -Any other contract that is part of the subgraph can be imported from the generated code and can be bound to a valid address. +Any other contract that is part of the Subgraph can be imported from the generated code and can be bound to a valid address. #### معالجة الاستدعاءات المعادة @@ -582,7 +582,7 @@ let isContract = ethereum.hasCode(eoa).inner // returns false import { log } from '@graphprotocol/graph-ts ``` -The `log` API allows subgraphs to log information to the Graph Node standard output as well as Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. +The `log` API allows Subgraphs to log information to the Graph Node standard output as well as Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. The `log` API includes the following functions: @@ -590,7 +590,7 @@ The `log` API includes the following functions: - `log.info(fmt: string, args: Array): void` - logs an informational message. - `log.warning(fmt: string, args: Array): void` - logs a warning. - `log.error(fmt: string, args: Array): void` - logs an error message. -- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the subgraph. +- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the Subgraph. The `log` API takes a format string and an array of string values. It then replaces placeholders with the string values from the array. The first `{}` placeholder gets replaced by the first value in the array, the second `{}` placeholder gets replaced by the second value and so on. @@ -721,7 +721,7 @@ ipfs.mapJSON('Qm...', 'processItem', Value.fromString('parentId')) The only flag currently supported is `json`, which must be passed to `ipfs.map`. With the `json` flag, the IPFS file must consist of a series of JSON values, one value per line. The call to `ipfs.map` will read each line in the file, deserialize it into a `JSONValue` and call the callback for each of them. The callback can then use entity operations to store data from the `JSONValue`. Entity changes are stored only when the handler that called `ipfs.map` finishes successfully; in the meantime, they are kept in memory, and the size of the file that `ipfs.map` can process is therefore limited. -On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the subgraph is marked as failed. +On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the Subgraph is marked as failed. ### Crypto API @@ -836,7 +836,7 @@ The base `Entity` class and the child `DataSourceContext` class have helpers to ### DataSourceContext in Manifest -The `context` section within `dataSources` allows you to define key-value pairs that are accessible within your subgraph mappings. The available types are `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. +The `context` section within `dataSources` allows you to define key-value pairs that are accessible within your Subgraph mappings. The available types are `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Here is a YAML example illustrating the usage of various types in the `context` section: @@ -887,4 +887,4 @@ dataSources: - `List`: Specifies a list of items. Each item needs to specify its type and data. - `BigInt`: Specifies a large integer value. Must be quoted due to its large size. -This context is then accessible in your subgraph mapping files, enabling more dynamic and configurable subgraphs. +This context is then accessible in your Subgraph mapping files, enabling more dynamic and configurable Subgraphs. From 98ba8e797456d937ebe3cf5cfa5e23f05903f4dd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:10:38 -0500 Subject: [PATCH 0144/1789] New translations api.mdx (Czech) --- .../developing/creating/graph-ts/api.mdx | 54 +++++++++---------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/website/src/pages/cs/subgraphs/developing/creating/graph-ts/api.mdx b/website/src/pages/cs/subgraphs/developing/creating/graph-ts/api.mdx index 3c3dbdc7671f..e794c1caa32c 100644 --- a/website/src/pages/cs/subgraphs/developing/creating/graph-ts/api.mdx +++ b/website/src/pages/cs/subgraphs/developing/creating/graph-ts/api.mdx @@ -2,12 +2,12 @@ title: AssemblyScript API --- -> Note: If you created a subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, then you're using an older version of AssemblyScript. It is recommended to review the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/). +> Note: If you created a Subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, then you're using an older version of AssemblyScript. It is recommended to review the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/). -Learn what built-in APIs can be used when writing subgraph mappings. There are two kinds of APIs available out of the box: +Learn what built-in APIs can be used when writing Subgraph mappings. There are two kinds of APIs available out of the box: - The [Graph TypeScript library](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts) (`graph-ts`) -- Code generated from subgraph files by `graph codegen` +- Code generated from Subgraph files by `graph codegen` You can also add other libraries as dependencies, as long as they are compatible with [AssemblyScript](https://github.com/AssemblyScript/assemblyscript). @@ -27,18 +27,18 @@ Knihovna `@graphprotocol/graph-ts` poskytuje následující API: ### Verze -`apiVersion` v manifestu podgrafu určuje verzi mapovacího API, kterou pro daný podgraf používá uzel Graf. +The `apiVersion` in the Subgraph manifest specifies the mapping API version which is run by Graph Node for a given Subgraph. -| Verze | Poznámky vydání | -| :-: | --- | -| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | -| 0.0.8 | Přidá ověření existence polí ve schéma při ukládání entity. | -| 0.0.7 | Přidání tříd `TransactionReceipt` a `Log` do typů Ethereum
Přidání pole `receipt` do objektu Ethereum událost | -| 0.0.6 | Přidáno pole `nonce` do objektu Ethereum Transaction
Přidáno `baseFeePerGas` do objektu Ethereum bloku | +| Verze | Poznámky vydání | +| :---: | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | +| 0.0.8 | Přidá ověření existence polí ve schéma při ukládání entity. | +| 0.0.7 | Přidání tříd `TransactionReceipt` a `Log` do typů Ethereum
Přidání pole `receipt` do objektu Ethereum událost | +| 0.0.6 | Přidáno pole `nonce` do objektu Ethereum Transaction
Přidáno `baseFeePerGas` do objektu Ethereum bloku | | 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/))
`ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | -| 0.0.4 | Přidání pole `functionSignature` do objektu Ethereum SmartContractCall | -| 0.0.3 | Added `from` field to the Ethereum Call object
`ethereum.call.address` renamed to `ethereum.call.to` | -| 0.0.2 | Přidání pole `input` do objektu Ethereum Transackce | +| 0.0.4 | Přidání pole `functionSignature` do objektu Ethereum SmartContractCall | +| 0.0.3 | Added `from` field to the Ethereum Call object
`ethereum.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Přidání pole `input` do objektu Ethereum Transackce | ### Vestavěné typy @@ -147,7 +147,7 @@ _Math_ - `x.notEqual(y: BigInt): bool` –lze zapsat jako `x != y`. - `x.lt(y: BigInt): bool` – lze zapsat jako `x < y`. - `x.le(y: BigInt): bool` – lze zapsat jako `x <= y`. -- `x.gt(y: BigInt): bool` – lze zapsat jako `x > y`. +- `x.gt(y: BigInt): bool` – lze zapsat jako `x > y`. - `x.ge(y: BigInt): bool` – lze zapsat jako `x >= y`. - `x.neg(): BigInt` – lze zapsat jako `-x`. - `x.divDecimal(y: BigDecimal): BigDecimal` – dělí desetinným číslem, čímž získá desetinný výsledek. @@ -223,7 +223,7 @@ import { store } from '@graphprotocol/graph-ts' API `store` umožňuje načítat, ukládat a odebírat entity z a do úložiště Graf uzel. -Entities written to the store map one-to-one to the `@entity` types defined in the subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. +Entities written to the store map one-to-one to the `@entity` types defined in the Subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. #### Vytváření entity @@ -282,8 +282,8 @@ Od verzí `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 a `@graphproto The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a transaction from some onchain event, and a later handler wants to access this transaction if it exists. -- In the case where the transaction does not exist, the subgraph will have to go to the database simply to find out that the entity does not exist. If the subgraph author already knows that the entity must have been created in the same block, using `loadInBlock` avoids this database roundtrip. -- For some subgraphs, these missed lookups can contribute significantly to the indexing time. +- In the case where the transaction does not exist, the Subgraph will have to go to the database simply to find out that the entity does not exist. If the Subgraph author already knows that the entity must have been created in the same block, using `loadInBlock` avoids this database roundtrip. +- For some Subgraphs, these missed lookups can contribute significantly to the indexing time. ```typescript let id = event.transaction.hash // or however the ID is constructed @@ -380,11 +380,11 @@ Ethereum API poskytuje přístup k inteligentním smlouvám, veřejným stavový #### Podpora typů Ethereum -Stejně jako u entit generuje `graph codegen` třídy pro všechny inteligentní smlouvy a události používané v podgrafu. Za tímto účelem musí být ABI kontraktu součástí zdroje dat v manifestu podgrafu. Obvykle jsou soubory ABI uloženy ve složce `abis/`. +As with entities, `graph codegen` generates classes for all smart contracts and events used in a Subgraph. For this, the contract ABIs need to be part of the data source in the Subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. -Ve vygenerovaných třídách probíhají konverze mezi typy Ethereum [built-in-types](#built-in-types) v pozadí, takže se o ně autoři podgraf nemusí starat. +With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that Subgraph authors do not have to worry about them. -To ilustruje následující příklad. Je dáno schéma podgrafu, jako je +The following example illustrates this. Given a Subgraph schema like ```graphql type Transfer @entity { @@ -483,7 +483,7 @@ class Log { #### Přístup ke stavu inteligentní smlouvy -Kód vygenerovaný nástrojem `graph codegen` obsahuje také třídy pro inteligentní smlouvy používané v podgrafu. Ty lze použít k přístupu k veřejným stavovým proměnným a k volání funkcí kontraktu v aktuálním bloku. +The code generated by `graph codegen` also includes classes for the smart contracts used in the Subgraph. These can be used to access public state variables and call functions of the contract at the current block. Běžným vzorem je přístup ke smlouvě, ze které událost pochází. Toho lze dosáhnout pomocí následujícího kódu: @@ -506,7 +506,7 @@ export function handleTransfer(event: TransferEvent) { Pokud má smlouva `ERC20Contract` na platformě Ethereum veřejnou funkci pouze pro čtení s názvem `symbol`, lze ji volat pomocí `.symbol()`. Pro veřejné stavové proměnné se automaticky vytvoří metoda se stejným názvem. -Jakákoli jiná smlouva, která je součástí podgrafu, může být importována z vygenerovaného kódu a může být svázána s platnou adresou. +Any other contract that is part of the Subgraph can be imported from the generated code and can be bound to a valid address. #### Zpracování vrácených volání @@ -582,7 +582,7 @@ let isContract = ethereum.hasCode(eoa).inner // returns false import { log } from '@graphprotocol/graph-ts' ``` -The `log` API allows subgraphs to log information to the Graph Node standard output as well as Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. +The `log` API allows Subgraphs to log information to the Graph Node standard output as well as Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. `log` API obsahuje následující funkce: @@ -590,7 +590,7 @@ The `log` API allows subgraphs to log information to the Graph Node standard out - `log.info(fmt: string, args: Array): void` - zaznamená informační zprávu. - `log.warning(fmt: string, args: Array): void` - zaznamená varování. - `log.error(fmt: string, args: Array): void` - zaznamená chybovou zprávu. -- `log.critical(fmt: string, args: Array): void` - zaznamená kritickou zprávu _a_ ukončí podgraf. +- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the Subgraph. `log` API přebírá formátovací řetězec a pole řetězcových hodnot. Poté nahradí zástupné symboly řetězcovými hodnotami z pole. První zástupný symbol „{}“ bude nahrazen první hodnotou v poli, druhý zástupný symbol „{}“ bude nahrazen druhou hodnotou a tak dále. @@ -721,7 +721,7 @@ ipfs.mapJSON('Qm...', 'processItem', Value.fromString('parentId')) V současné době je podporován pouze příznak `json`, který musí být předán souboru `ipfs.map`. S příznakem `json` se soubor IPFS musí skládat z řady hodnot JSON, jedna hodnota na řádek. Volání příkazu `ipfs.map` přečte každý řádek souboru, deserializuje jej do hodnoty `JSONValue` a pro každou z nich zavolá zpětné volání. Zpětné volání pak může použít operace entit k uložení dat z `JSONValue`. Změny entit se uloží až po úspěšném ukončení obsluhy, která volala `ipfs.map`; do té doby se uchovávají v paměti, a velikost souboru, který může `ipfs.map` zpracovat, je proto omezená. -Při úspěchu vrátí `ipfs.map` hodnotu `void`. Pokud vyvolání zpětného volání způsobí chybu, obslužná rutina, která vyvolala `ipfs.map`, se přeruší a podgraf se označí jako neúspěšný. +On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the Subgraph is marked as failed. ### Crypto API @@ -836,7 +836,7 @@ Základní třída `Entity` a podřízená třída `DataSourceContext` mají pom ### DataSourceContext v manifestu -Sekce `context` v rámci `dataSources` umožňuje definovat páry klíč-hodnota, které jsou přístupné v rámci mapování podgrafů. Dostupné typy jsou `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List` a `BigInt`. +The `context` section within `dataSources` allows you to define key-value pairs that are accessible within your Subgraph mappings. The available types are `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Zde je příklad YAML ilustrující použití různých typů v sekci `context`: @@ -887,4 +887,4 @@ dataSources: - `Seznam`: Určuje seznam položek. U každé položky je třeba zadat její typ a data. - `BigInt`: Určuje velkou celočíselnou hodnotu. Kvůli velké velikosti musí být uvedena v uvozovkách. -Tento kontext je pak přístupný v souborech mapování podgrafů, což umožňuje vytvářet dynamičtější a konfigurovatelnější podgrafy. +This context is then accessible in your Subgraph mapping files, enabling more dynamic and configurable Subgraphs. From 17a02efb4f1199956a2431a53839e00c079d5662 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:10:39 -0500 Subject: [PATCH 0145/1789] New translations api.mdx (German) --- .../developing/creating/graph-ts/api.mdx | 54 +++++++++---------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/website/src/pages/de/subgraphs/developing/creating/graph-ts/api.mdx b/website/src/pages/de/subgraphs/developing/creating/graph-ts/api.mdx index 6106b8cdf0dc..0aa9389416a9 100644 --- a/website/src/pages/de/subgraphs/developing/creating/graph-ts/api.mdx +++ b/website/src/pages/de/subgraphs/developing/creating/graph-ts/api.mdx @@ -2,12 +2,12 @@ title: AssemblyScript API --- -> Note: If you created a subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, then you're using an older version of AssemblyScript. It is recommended to review the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/). +> Note: If you created a Subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, then you're using an older version of AssemblyScript. It is recommended to review the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/). -Learn what built-in APIs can be used when writing subgraph mappings. There are two kinds of APIs available out of the box: +Learn what built-in APIs can be used when writing Subgraph mappings. There are two kinds of APIs available out of the box: - The [Graph TypeScript library](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts) (`graph-ts`) -- Code generated from subgraph files by `graph codegen` +- Code generated from Subgraph files by `graph codegen` You can also add other libraries as dependencies, as long as they are compatible with [AssemblyScript](https://github.com/AssemblyScript/assemblyscript). @@ -27,18 +27,18 @@ The `@graphprotocol/graph-ts` library provides the following APIs: ### Versionen -The `apiVersion` in the subgraph manifest specifies the mapping API version which is run by Graph Node for a given subgraph. +The `apiVersion` in the Subgraph manifest specifies the mapping API version which is run by Graph Node for a given Subgraph. -| Version | Release notes | -| :-: | --- | -| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | -| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | -| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
Added `receipt` field to the Ethereum Event object | -| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
Added `baseFeePerGas` to the Ethereum Block object | -| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/))
`ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | -| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | -| 0.0.3 | Added `from` field to the Ethereum Call object
`ethereum.call.address` renamed to `ethereum.call.to` | -| 0.0.2 | Added `input` field to the Ethereum Transaction object | +| Version | Release notes | +| :-----: | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | +| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | +| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
Added `receipt` field to the Ethereum Event object | +| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
Added `baseFeePerGas` to the Ethereum Block object | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/))
`ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | +| 0.0.3 | Added `from` field to the Ethereum Call object
`ethereum.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Added `input` field to the Ethereum Transaction object | ### Built-in Types @@ -223,7 +223,7 @@ import { store } from '@graphprotocol/graph-ts' The `store` API allows to load, save and remove entities from and to the Graph Node store. -Entities written to the store map one-to-one to the `@entity` types defined in the subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. +Entities written to the store map one-to-one to the `@entity` types defined in the Subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. #### Creating entities @@ -282,8 +282,8 @@ As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 and `@graphprotoco The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a transaction from some onchain event, and a later handler wants to access this transaction if it exists. -- In the case where the transaction does not exist, the subgraph will have to go to the database simply to find out that the entity does not exist. If the subgraph author already knows that the entity must have been created in the same block, using `loadInBlock` avoids this database roundtrip. -- For some subgraphs, these missed lookups can contribute significantly to the indexing time. +- In the case where the transaction does not exist, the Subgraph will have to go to the database simply to find out that the entity does not exist. If the Subgraph author already knows that the entity must have been created in the same block, using `loadInBlock` avoids this database roundtrip. +- For some Subgraphs, these missed lookups can contribute significantly to the indexing time. ```typescript let id = event.transaction.hash // or however the ID is constructed @@ -380,11 +380,11 @@ The Ethereum API provides access to smart contracts, public state variables, con #### Support for Ethereum Types -As with entities, `graph codegen` generates classes for all smart contracts and events used in a subgraph. For this, the contract ABIs need to be part of the data source in the subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. +As with entities, `graph codegen` generates classes for all smart contracts and events used in a Subgraph. For this, the contract ABIs need to be part of the data source in the Subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. -With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that subgraph authors do not have to worry about them. +With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that Subgraph authors do not have to worry about them. -The following example illustrates this. Given a subgraph schema like +The following example illustrates this. Given a Subgraph schema like ```graphql type Transfer @entity { @@ -483,7 +483,7 @@ class Log { #### Access to Smart Contract State -The code generated by `graph codegen` also includes classes for the smart contracts used in the subgraph. These can be used to access public state variables and call functions of the contract at the current block. +The code generated by `graph codegen` also includes classes for the smart contracts used in the Subgraph. These can be used to access public state variables and call functions of the contract at the current block. A common pattern is to access the contract from which an event originates. This is achieved with the following code: @@ -506,7 +506,7 @@ export function handleTransfer(event: TransferEvent) { As long as the `ERC20Contract` on Ethereum has a public read-only function called `symbol`, it can be called with `.symbol()`. For public state variables a method with the same name is created automatically. -Any other contract that is part of the subgraph can be imported from the generated code and can be bound to a valid address. +Any other contract that is part of the Subgraph can be imported from the generated code and can be bound to a valid address. #### Handling Reverted Calls @@ -582,7 +582,7 @@ let isContract = ethereum.hasCode(eoa).inner // returns false import { log } from '@graphprotocol/graph-ts' ``` -The `log` API allows subgraphs to log information to the Graph Node standard output as well as Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. +The `log` API allows Subgraphs to log information to the Graph Node standard output as well as Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. The `log` API includes the following functions: @@ -590,7 +590,7 @@ The `log` API includes the following functions: - `log.info(fmt: string, args: Array): void` - logs an informational message. - `log.warning(fmt: string, args: Array): void` - logs a warning. - `log.error(fmt: string, args: Array): void` - logs an error message. -- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the subgraph. +- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the Subgraph. The `log` API takes a format string and an array of string values. It then replaces placeholders with the string values from the array. The first `{}` placeholder gets replaced by the first value in the array, the second `{}` placeholder gets replaced by the second value and so on. @@ -721,7 +721,7 @@ ipfs.mapJSON('Qm...', 'processItem', Value.fromString('parentId')) The only flag currently supported is `json`, which must be passed to `ipfs.map`. With the `json` flag, the IPFS file must consist of a series of JSON values, one value per line. The call to `ipfs.map` will read each line in the file, deserialize it into a `JSONValue` and call the callback for each of them. The callback can then use entity operations to store data from the `JSONValue`. Entity changes are stored only when the handler that called `ipfs.map` finishes successfully; in the meantime, they are kept in memory, and the size of the file that `ipfs.map` can process is therefore limited. -On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the subgraph is marked as failed. +On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the Subgraph is marked as failed. ### Crypto API @@ -836,7 +836,7 @@ The base `Entity` class and the child `DataSourceContext` class have helpers to ### DataSourceContext in Manifest -The `context` section within `dataSources` allows you to define key-value pairs that are accessible within your subgraph mappings. The available types are `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. +The `context` section within `dataSources` allows you to define key-value pairs that are accessible within your Subgraph mappings. The available types are `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Here is a YAML example illustrating the usage of various types in the `context` section: @@ -887,4 +887,4 @@ dataSources: - `List`: Specifies a list of items. Each item needs to specify its type and data. - `BigInt`: Specifies a large integer value. Must be quoted due to its large size. -This context is then accessible in your subgraph mapping files, enabling more dynamic and configurable subgraphs. +This context is then accessible in your Subgraph mapping files, enabling more dynamic and configurable Subgraphs. From 3578d24aa267452103e1271c5fc197621af44090 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:10:40 -0500 Subject: [PATCH 0146/1789] New translations api.mdx (Italian) --- .../developing/creating/graph-ts/api.mdx | 130 +++++++++--------- 1 file changed, 65 insertions(+), 65 deletions(-) diff --git a/website/src/pages/it/subgraphs/developing/creating/graph-ts/api.mdx b/website/src/pages/it/subgraphs/developing/creating/graph-ts/api.mdx index 1d6fa48848b3..fb87d521d968 100644 --- a/website/src/pages/it/subgraphs/developing/creating/graph-ts/api.mdx +++ b/website/src/pages/it/subgraphs/developing/creating/graph-ts/api.mdx @@ -2,12 +2,12 @@ title: API AssemblyScript --- -> Note: If you created a subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, then you're using an older version of AssemblyScript. It is recommended to review the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/). +> Note: If you created a Subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, then you're using an older version of AssemblyScript. It is recommended to review the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/). -Learn what built-in APIs can be used when writing subgraph mappings. There are two kinds of APIs available out of the box: +Learn what built-in APIs can be used when writing Subgraph mappings. There are two kinds of APIs available out of the box: - The [Graph TypeScript library](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts) (`graph-ts`) -- Code generated from subgraph files by `graph codegen` +- Code generated from Subgraph files by `graph codegen` You can also add other libraries as dependencies, as long as they are compatible with [AssemblyScript](https://github.com/AssemblyScript/assemblyscript). @@ -27,18 +27,18 @@ La libreria `@graphprotocol/graph-ts` fornisce le seguenti API: ### Versioni -La `apiVersion` nel manifest del subgraph specifica la versione dell'API di mappatura che viene eseguita da the Graph Node per un dato subgraph. +The `apiVersion` in the Subgraph manifest specifies the mapping API version which is run by Graph Node for a given Subgraph. -| Versione | Note di rilascio | -| :-: | --- | -| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | -| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | -| 0.0.7 | Aggiunte le classi `TransactionReceipt` e `Log` ai tipi di Ethereum
Aggiunto il campo `receipt` all'oggetto Ethereum Event | -| 0.0.6 | Aggiunto il campo `nonce` all'oggetto Ethereum Transaction
Aggiunto `baseFeePerGas` all'oggetto Ethereum Block | -| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/))
`ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | -| 0.0.4 | Aggiunto il campo `functionSignature` all'oggetto Ethereum SmartContractCall | -| 0.0.3 | Added `from` field to the Ethereum Call object
`ethereum.call.address` renamed to `ethereum.call.to` | -| 0.0.2 | Aggiunto il campo `input` all'oggetto Ethereum Transaction | +| Versione | Note di rilascio | +| :------: | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | +| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | +| 0.0.7 | Aggiunte le classi `TransactionReceipt` e `Log` ai tipi di Ethereum
Aggiunto il campo `receipt` all'oggetto Ethereum Event | +| 0.0.6 | Aggiunto il campo `nonce` all'oggetto Ethereum Transaction
Aggiunto `baseFeePerGas` all'oggetto Ethereum Block | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/))
`ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | Aggiunto il campo `functionSignature` all'oggetto Ethereum SmartContractCall | +| 0.0.3 | Added `from` field to the Ethereum Call object
`ethereum.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Aggiunto il campo `input` all'oggetto Ethereum Transaction | ### Tipi integrati @@ -223,7 +223,7 @@ import { store } from '@graphprotocol/graph-ts' L'API `store` consente di caricare, salvare e rimuovere entità da e verso il Graph Node store. -Entities written to the store map one-to-one to the `@entity` types defined in the subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. +Entities written to the store map one-to-one to the `@entity` types defined in the Subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. #### Creazione di entità @@ -282,8 +282,8 @@ A partire da `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 e `@graphpr The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a transaction from some onchain event, and a later handler wants to access this transaction if it exists. -- In the case where the transaction does not exist, the subgraph will have to go to the database simply to find out that the entity does not exist. If the subgraph author already knows that the entity must have been created in the same block, using `loadInBlock` avoids this database roundtrip. -- For some subgraphs, these missed lookups can contribute significantly to the indexing time. +- In the case where the transaction does not exist, the Subgraph will have to go to the database simply to find out that the entity does not exist. If the Subgraph author already knows that the entity must have been created in the same block, using `loadInBlock` avoids this database roundtrip. +- For some Subgraphs, these missed lookups can contribute significantly to the indexing time. ```typescript let id = event.transaction.hash // or however the ID is constructed @@ -380,11 +380,11 @@ L'API di Ethereum fornisce l'accesso agli smart contract, alle variabili di stat #### Supporto per i tipi di Ethereum -Come per le entità, `graph codegen` genera classi per tutti gli smart contract e gli eventi utilizzati in un subgraph. Per questo, gli ABI dei contratti devono far parte dell'origine dati nel manifest del subgraph. In genere, i file ABI sono memorizzati in una cartella `abis/`. +As with entities, `graph codegen` generates classes for all smart contracts and events used in a Subgraph. For this, the contract ABIs need to be part of the data source in the Subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. -Con le classi generate, le conversioni tra i tipi di Ethereum e i [tipi incorporati](#built-in-types) avvengono dietro le quinte, in modo che gli autori dei subgraph non debbano preoccuparsene. +With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that Subgraph authors do not have to worry about them. -L'esempio seguente lo illustra. Dato uno schema di subgraph come +The following example illustrates this. Given a Subgraph schema like ```graphql type Transfer @entity { @@ -483,7 +483,7 @@ class Log { #### Accesso allo stato dello smart contract -Il codice generato da `graph codegen` include anche classi per gli smart contract utilizzati nel subgraph. Queste possono essere utilizzate per accedere alle variabili di stato pubbliche e per chiamare le funzioni del contratto nel blocco corrente. +The code generated by `graph codegen` also includes classes for the smart contracts used in the Subgraph. These can be used to access public state variables and call functions of the contract at the current block. Un modello comune è quello di accedere al contratto da cui proviene un evento. Questo si ottiene con il seguente codice: @@ -506,7 +506,7 @@ export function handleTransfer(event: TransferEvent) { Finché il `ERC20Contract` su Ethereum ha una funzione pubblica di sola lettura chiamata `symbol`, questa può essere chiamata con `.symbol()`. Per le variabili di stato pubbliche viene creato automaticamente un metodo con lo stesso nome. -Qualsiasi altro contratto che faccia parte del subgraph può essere importato dal codice generato e può essere legato a un indirizzo valido. +Any other contract that is part of the Subgraph can be imported from the generated code and can be bound to a valid address. #### Gestione delle chiamate annullate @@ -582,7 +582,7 @@ let isContract = ethereum.hasCode(eoa).inner // returns false import { log } from '@graphprotocol/graph-ts' ``` -The `log` API allows subgraphs to log information to the Graph Node standard output as well as Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. +The `log` API allows Subgraphs to log information to the Graph Node standard output as well as Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. L'API `log` include le seguenti funzioni: @@ -590,7 +590,7 @@ L'API `log` include le seguenti funzioni: - `log.info(fmt: string, args: Array): void` - registra un messaggio informativo. - `log.warning(fmt: string, args: Array): void` - registra un avviso. - `log.error(fmt: string, args: Array): void` - registra un messaggio di errore. -- `log.critical(fmt: string, args: Array): void` - registra un messaggio critico _and_ termina il subgraph. +- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the Subgraph. L'API `log` accetta una stringa di formato e un array di valori stringa. Quindi sostituisce i segnaposto con i valori stringa dell'array. Il primo segnaposto `{}` viene sostituito dal primo valore dell'array, il secondo segnaposto `{}` viene sostituito dal secondo valore e così via. @@ -721,7 +721,7 @@ ipfs.mapJSON('Qm...', 'processItem', Value.fromString('parentId')) L'unico flag attualmente supportato è `json`, che deve essere passato a `ipfs.map`. Con il flag `json`, il file IPFS deve essere costituito da una serie di valori JSON, un valore per riga. La chiamata a `ipfs.map` leggerà ogni riga del file, la deserializzerà in un `JSONValue` e chiamerà il callback per ognuno di essi. Il callback può quindi utilizzare le operazioni sulle entità per memorizzare i dati dal `JSONValue`. Le modifiche alle entità vengono memorizzate solo quando il gestore che ha chiamato `ipfs.map` termina con successo; nel frattempo, vengono mantenute in memoria e la dimensione del file che `ipfs.map` può elaborare è quindi limitata. -In caso di successo, `ipfs.map` restituisce `void`. Se una qualsiasi invocazione del callback causa un errore, il gestore che ha invocato `ipfs.map` viene interrotto e il subgraph viene contrassegnato come fallito. +On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the Subgraph is marked as failed. ### Crypto API @@ -770,44 +770,44 @@ Quando il tipo di un valore è certo, può essere convertito in un [tipo incorpo ### Riferimento alle conversioni di tipo -| Fonte(i) | Destinazione | Funzione di conversione | -| -------------------- | -------------------- | --------------------------- | -| Address | Bytes | none | -| Address | String | s.toHexString() | -| BigDecimal | String | s.toString() | -| BigInt | BigDecimal | s.toBigDecimal() | -| BigInt | String (hexadecimal) | s.toHexString() o s.toHex() | -| BigInt | String (unicode) | s.toString() | -| BigInt | i32 | s.toI32() | -| Boolean | Boolean | none | -| Bytes (signed) | BigInt | BigInt.fromSignedBytes(s) | -| Bytes (unsigned) | BigInt | BigInt.fromUnsignedBytes(s) | -| Bytes | String (hexadecimal) | s.toHexString() o s.toHex() | -| Bytes | String (unicode) | s.toString() | -| Bytes | String (base58) | s.toBase58() | -| Bytes | i32 | s.toI32() | -| Bytes | u32 | s.toU32() | -| Bytes | JSON | json.fromBytes(s) | -| int8 | i32 | none | -| int32 | i32 | none | -| int32 | BigInt | BigInt.fromI32(s) | -| uint24 | i32 | none | -| int64 - int256 | BigInt | none | -| uint32 - uint256 | BigInt | none | -| JSON | boolean | s.toBool() | -| JSON | i64 | s.toI64() | -| JSON | u64 | s.toU64() | -| JSON | f64 | s.toF64() | -| JSON | BigInt | s.toBigInt() | -| JSON | string | s.toString() | -| JSON | Array | s.toArray() | -| JSON | Object | s.toObject() | -| String | Address | Address.fromString(s) | -| Bytes | Address | Address.fromBytes(s) | -| String | BigInt | BigInt.fromString(s) | -| String | BigDecimal | BigDecimal.fromString(s) | -| String (hexadecimal) | Bytes | ByteArray.fromHexString(s) | -| String (UTF-8) | Bytes | ByteArray.fromUTF8(s) | +| Fonte(i) | Destinazione | Funzione di conversione | +| -------------------- | --------------------- | -------------------------------- | +| Address | Bytes | none | +| Address | String | s.toHexString() | +| BigDecimal | String | s.toString() | +| BigInt | BigDecimal | s.toBigDecimal() | +| BigInt | String (hexadecimal) | s.toHexString() o s.toHex() | +| BigInt | String (unicode) | s.toString() | +| BigInt | i32 | s.toI32() | +| Boolean | Boolean | none | +| Bytes (signed) | BigInt | BigInt.fromSignedBytes(s) | +| Bytes (unsigned) | BigInt | BigInt.fromUnsignedBytes(s) | +| Bytes | String (hexadecimal) | s.toHexString() o s.toHex() | +| Bytes | String (unicode) | s.toString() | +| Bytes | String (base58) | s.toBase58() | +| Bytes | i32 | s.toI32() | +| Bytes | u32 | s.toU32() | +| Bytes | JSON | json.fromBytes(s) | +| int8 | i32 | none | +| int32 | i32 | none | +| int32 | BigInt | BigInt.fromI32(s) | +| uint24 | i32 | none | +| int64 - int256 | BigInt | none | +| uint32 - uint256 | BigInt | none | +| JSON | boolean | s.toBool() | +| JSON | i64 | s.toI64() | +| JSON | u64 | s.toU64() | +| JSON | f64 | s.toF64() | +| JSON | BigInt | s.toBigInt() | +| JSON | string | s.toString() | +| JSON | Array | s.toArray() | +| JSON | Object | s.toObject() | +| String | Address | Address.fromString(s) | +| Bytes | Address | Address.fromBytes(s) | +| String | BigInt | BigInt.fromString(s) | +| String | BigDecimal | BigDecimal.fromString(s) | +| String (hexadecimal) | Bytes | ByteArray.fromHexString(s) | +| String (UTF-8) | Bytes | ByteArray.fromUTF8(s) | ### Metadati della Data Source @@ -836,7 +836,7 @@ La classe base `Entity` e la classe figlia `DataSourceContext` hanno degli helpe ### DataSourceContext nel manifesto -La sezione `contesto` all'interno di `dataSources` consente di definire coppie chiave-valore accessibili nelle mappature dei subgraph. I tipi disponibili sono `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List` e `BigInt`. +The `context` section within `dataSources` allows you to define key-value pairs that are accessible within your Subgraph mappings. The available types are `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Ecco un esempio YAML che illustra l'uso di vari tipi nella sezione `context`: @@ -887,4 +887,4 @@ dataSources: - `List`: Specifica un elenco di elementi. Ogni elemento deve specificare il suo tipo e i suoi dati. - `BigInt`: Specifica un valore intero di grandi dimensioni. Deve essere quotato a causa delle sue grandi dimensioni. -Questo contesto è quindi accessibile nei file di mappatura dei subgraph, consentendo di ottenere subgraph più dinamici e configurabili. +This context is then accessible in your Subgraph mapping files, enabling more dynamic and configurable Subgraphs. From e598785c6ea2394d1ede78857b90641c4c77894c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:10:41 -0500 Subject: [PATCH 0147/1789] New translations api.mdx (Japanese) --- .../developing/creating/graph-ts/api.mdx | 130 +++++++++--------- 1 file changed, 65 insertions(+), 65 deletions(-) diff --git a/website/src/pages/ja/subgraphs/developing/creating/graph-ts/api.mdx b/website/src/pages/ja/subgraphs/developing/creating/graph-ts/api.mdx index c9d5c8a3ba47..b9e5cace8281 100644 --- a/website/src/pages/ja/subgraphs/developing/creating/graph-ts/api.mdx +++ b/website/src/pages/ja/subgraphs/developing/creating/graph-ts/api.mdx @@ -2,12 +2,12 @@ title: AssemblyScript API --- -> Note: If you created a subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, then you're using an older version of AssemblyScript. It is recommended to review the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/). +> Note: If you created a Subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, then you're using an older version of AssemblyScript. It is recommended to review the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/). -Learn what built-in APIs can be used when writing subgraph mappings. There are two kinds of APIs available out of the box: +Learn what built-in APIs can be used when writing Subgraph mappings. There are two kinds of APIs available out of the box: - The [Graph TypeScript library](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts) (`graph-ts`) -- Code generated from subgraph files by `graph codegen` +- Code generated from Subgraph files by `graph codegen` You can also add other libraries as dependencies, as long as they are compatible with [AssemblyScript](https://github.com/AssemblyScript/assemblyscript). @@ -27,18 +27,18 @@ Since language mappings are written in AssemblyScript, it is useful to review th ### バージョン -サブグラフマニフェストapiVersionは、特定のサブグラフのマッピングAPIバージョンを指定します。このバージョンは、Graph Nodeによって実行されます。 +The `apiVersion` in the Subgraph manifest specifies the mapping API version which is run by Graph Node for a given Subgraph. -| バージョン | リリースノート | -| :-: | --- | -| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | -| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | -| 0.0.7 | Ethereum タイプに `TransactionReceipt` と `Log` クラスを追加
Ethereum Event オブジェクトに `receipt` フィールドを追加。 | -| 0.0.6 | Ethereum Transactionオブジェクトに`nonce`フィールドを追加
Ethereum Blockオブジェクトに`baseFeePerGas`を追加。 | +| バージョン | リリースノート | +| :---: | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | +| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | +| 0.0.7 | Ethereum タイプに `TransactionReceipt` と `Log` クラスを追加
Ethereum Event オブジェクトに `receipt` フィールドを追加。 | +| 0.0.6 | Ethereum Transactionオブジェクトに`nonce`フィールドを追加
Ethereum Blockオブジェクトに`baseFeePerGas`を追加。 | | 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/))
`ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | -| 0.0.4 | Ethereum SmartContractCall オブジェクトにfunctionSignatureフィールドを追加 | -| 0.0.3 | Added `from` field to the Ethereum Call object
`ethereum.call.address` renamed to `ethereum.call.to` | -| 0.0.2 | Ethereum Transaction オブジェクトに inputフィールドを追加 | +| 0.0.4 | Ethereum SmartContractCall オブジェクトにfunctionSignatureフィールドを追加 | +| 0.0.3 | Added `from` field to the Ethereum Call object
`ethereum.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Ethereum Transaction オブジェクトに inputフィールドを追加 | ### 組み込み型 @@ -223,7 +223,7 @@ Bytesの API の上に以下のメソッドを追加しています。 Store API は、グラフノードのストアにエンティティを読み込んだり、保存したり、削除したりすることができます。 -Entities written to the store map one-to-one to the `@entity` types defined in the subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. +Entities written to the store map one-to-one to the `@entity` types defined in the Subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. #### エンティティの作成 @@ -282,11 +282,11 @@ graph-node v0.31.0、@graphprotocol/graph-ts v0.30.0、および @graphprotocol/ The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a transaction from some onchain event, and a later handler wants to access this transaction if it exists. -- In the case where the transaction does not exist, the subgraph will have to go to the database simply to find out that the entity does not exist. If the subgraph author already knows that the entity must have been created in the same block, using `loadInBlock` avoids this database roundtrip. -- For some subgraphs, these missed lookups can contribute significantly to the indexing time. +- In the case where the transaction does not exist, the Subgraph will have to go to the database simply to find out that the entity does not exist. If the Subgraph author already knows that the entity must have been created in the same block, using `loadInBlock` avoids this database roundtrip. +- For some Subgraphs, these missed lookups can contribute significantly to the indexing time. ```typescript -let id = event.transaction.hash // または ID が構築される方法 +let id =event.transaction.hash // または ID が構築される方法 let transfer = Transfer.loadInBlock(id) if (transfer == null) { transfer = 新しい転送(id) @@ -380,11 +380,11 @@ Ethereum API は、スマートコントラクト、パブリックステート #### Ethereum タイプのサポート -エンティティと同様に、graph codegenは、サブグラフで使用されるすべてのスマートコントラクトとイベントのためのクラスを生成します。 このためには、コントラクト ABI がサブグラフマニフェストのデータソースの一部である必要があります。 通常、ABI ファイルはabis/フォルダに格納されています。 +As with entities, `graph codegen` generates classes for all smart contracts and events used in a Subgraph. For this, the contract ABIs need to be part of the data source in the Subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. -生成されたクラスでは、Ethereum Typeと[built-in types](#built-in-types)間の変換が舞台裏で行われるため、サブグラフ作成者はそれらを気にする必要がありません。 +With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that Subgraph authors do not have to worry about them. -以下の例で説明します。 以下のようなサブグラフのスキーマが与えられます。 +The following example illustrates this. Given a Subgraph schema like ```graphql type Transfer @entity { @@ -483,7 +483,7 @@ class Log { #### スマートコントラクトの状態へのアクセス -graph codegenが生成するコードには、サブグラフで使用されるスマートコントラクトのクラスも含まれています。 これらを使って、パブリックな状態変数にアクセスしたり、現在のブロックにあるコントラクトの関数を呼び出したりすることができます。 +The code generated by `graph codegen` also includes classes for the smart contracts used in the Subgraph. These can be used to access public state variables and call functions of the contract at the current block. よくあるパターンは、イベントが発生したコントラクトにアクセスすることです。 これは以下のコードで実現できます。 @@ -506,7 +506,7 @@ Transferは、エンティティタイプとの名前の衝突を避けるため Ethereum の ERC20Contractにsymbolというパブリックな読み取り専用の関数があれば、.symbol()で呼び出すことができます。 パブリックな状態変数については、同じ名前のメソッドが自動的に作成されます。 -サブグラフの一部である他のコントラクトは、生成されたコードからインポートすることができ、有効なアドレスにバインドすることができます。 +Any other contract that is part of the Subgraph can be imported from the generated code and can be bound to a valid address. #### リバートされた呼び出しの処理 @@ -582,7 +582,7 @@ let isContract = ethereum.hasCode(eoa).inner // returns false '@graphprotocol/graph-ts'から{ log } をインポートします。 ``` -The `log` API allows subgraphs to log information to the Graph Node standard output as well as Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. +The `log` API allows Subgraphs to log information to the Graph Node standard output as well as Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. log API には以下の機能があります: @@ -590,7 +590,7 @@ log API には以下の機能があります: - `log.info(fmt: string, args: Array): void` - インフォメーションメッセージを記録します。 - `log.warning(fmt: string, args: Array): void` - 警告メッセージを記録します。 - `log.error(fmt: string, args: Array): void` - エラーメッセージを記録します。 -- `log.critical(fmt: string, args: Array): void` - クリティカル・メッセージを記録して、サブグラフを終了します。 +- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the Subgraph. log API は、フォーマット文字列と文字列値の配列を受け取ります。 そして、プレースホルダーを配列の文字列値で置き換えます。 最初の{}プレースホルダーは配列の最初の値に置き換えられ、2 番目の{}プレースホルダーは 2 番目の値に置き換えられ、以下のようになります。 @@ -721,7 +721,7 @@ ipfs.mapJSON('Qm...', 'processItem', Value.fromString('parentId')) 現在サポートされているフラグは `json` だけで、これは `ipfs.map` に渡さなければなりません。json` フラグを指定すると、IPFS ファイルは一連の JSON 値で構成されます。ipfs.map` を呼び出すと、ファイルの各行を読み込んで `JSONValue` にデシリアライズし、それぞれのコールバックを呼び出します。コールバックは `JSONValue` からデータを格納するためにエンティティ操作を使用することができます。エンティティの変更は、`ipfs.map` を呼び出したハンドラが正常に終了したときのみ保存されます。その間はメモリ上に保持されるので、`ipfs.map` が処理できるファイルのサイズは制限されます。 -成功すると,ipfs.mapは voidを返します。 コールバックの呼び出しでエラーが発生した場合、ipfs.mapを呼び出したハンドラは中止され、サブグラフは失敗とマークされます。 +On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the Subgraph is marked as failed. ### Crypto API @@ -770,44 +770,44 @@ if (value.kind == JSONValueKind.BOOL) { ### タイプ 変換参照 -| Source(s) | Destination | Conversion function | -| -------------------- | -------------------- | ---------------------------- | -| Address | Bytes | none | -| Address | String | s.toHexString() | -| BigDecimal | String | s.toString() | -| BigInt | BigDecimal | s.toBigDecimal() | -| BigInt | String (hexadecimal) | s.toHexString() or s.toHex() | -| BigInt | String (unicode) | s.toString() | -| BigInt | i32 | s.toI32() | -| Boolean | Boolean | none | -| Bytes (signed) | BigInt | BigInt.fromSignedBytes(s) | -| Bytes (unsigned) | BigInt | BigInt.fromUnsignedBytes(s) | -| Bytes | String (hexadecimal) | s.toHexString() or s.toHex() | -| Bytes | String (unicode) | s.toString() | -| Bytes | String (base58) | s.toBase58() | -| Bytes | i32 | s.toI32() | -| Bytes | u32 | s.toU32() | -| Bytes | JSON | json.fromBytes(s) | -| int8 | i32 | none | -| int32 | i32 | none | -| int32 | BigInt | Bigint.fromI32(s) | -| uint24 | i32 | none | -| int64 - int256 | BigInt | none | -| uint32 - uint256 | BigInt | none | -| JSON | boolean | s.toBool() | -| JSON | i64 | s.toI64() | -| JSON | u64 | s.toU64() | -| JSON | f64 | s.toF64() | -| JSON | BigInt | s.toBigInt() | -| JSON | string | s.toString() | -| JSON | Array | s.toArray() | -| JSON | Object | s.toObject() | -| String | Address | Address.fromString(s) | -| Bytes | Address | Address.fromString(s) | -| String | BigInt | BigDecimal.fromString(s) | -| String | BigDecimal | BigDecimal.fromString(s) | -| String (hexadecimal) | Bytes | ByteArray.fromHexString(s) | -| String (UTF-8) | Bytes | ByteArray.fromUTF8(s) | +| Source(s) | Destination | Conversion function | +| -------------------- | -------------------- | -------------------------------- | +| Address | Bytes | none | +| Address | String | s.toHexString() | +| BigDecimal | String | s.toString() | +| BigInt | BigDecimal | s.toBigDecimal() | +| BigInt | String (hexadecimal) | s.toHexString() or s.toHex() | +| BigInt | String (unicode) | s.toString() | +| BigInt | i32 | s.toI32() | +| Boolean | Boolean | none | +| Bytes (signed) | BigInt | BigInt.fromSignedBytes(s) | +| Bytes (unsigned) | BigInt | BigInt.fromUnsignedBytes(s) | +| Bytes | String (hexadecimal) | s.toHexString() or s.toHex() | +| Bytes | String (unicode) | s.toString() | +| Bytes | String (base58) | s.toBase58() | +| Bytes | i32 | s.toI32() | +| Bytes | u32 | s.toU32() | +| Bytes | JSON | json.fromBytes(s) | +| int8 | i32 | none | +| int32 | i32 | none | +| int32 | BigInt | Bigint.fromI32(s) | +| uint24 | i32 | none | +| int64 - int256 | BigInt | none | +| uint32 - uint256 | BigInt | none | +| JSON | boolean | s.toBool() | +| JSON | i64 | s.toI64() | +| JSON | u64 | s.toU64() | +| JSON | f64 | s.toF64() | +| JSON | BigInt | s.toBigInt() | +| JSON | string | s.toString() | +| JSON | Array | s.toArray() | +| JSON | Object | s.toObject() | +| String | Address | Address.fromString(s) | +| Bytes | Address | Address.fromString(s) | +| String | BigInt | BigDecimal.fromString(s) | +| String | BigDecimal | BigDecimal.fromString(s) | +| String (hexadecimal) | Bytes | ByteArray.fromHexString(s) | +| String (UTF-8) | Bytes | ByteArray.fromUTF8(s) | ### データソースのメタデータ @@ -836,7 +836,7 @@ if (value.kind == JSONValueKind.BOOL) { ### マニフェスト内のDataSourceContext -DataSources`の`context`セクションでは、サブグラフマッピング内でアクセス可能なキーと値のペアを定義することができます。使用可能な型は`Bool`、`String`、`Int`、`Int8`、`BigDecimal`、`Bytes`、`List`、`BigInt\` です。 +The `context` section within `dataSources` allows you to define key-value pairs that are accessible within your Subgraph mappings. The available types are `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. 以下は `context` セクションのさまざまな型の使い方を示す YAML の例です: @@ -887,4 +887,4 @@ dataSources: - `List`: Specifies a list of items. Each item needs to specify its type and data. - `BigInt`: Specifies a large integer value. Must be quoted due to its large size. -このコンテキストは、サブグラフのマッピング・ファイルからアクセスでき、よりダイナミックで設定可能なサブグラフを実現します。 +This context is then accessible in your Subgraph mapping files, enabling more dynamic and configurable Subgraphs. From 387958d5f284c9f7103f0cdf7a38717eafc52231 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:10:42 -0500 Subject: [PATCH 0148/1789] New translations api.mdx (Korean) --- .../developing/creating/graph-ts/api.mdx | 54 +++++++++---------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/website/src/pages/ko/subgraphs/developing/creating/graph-ts/api.mdx b/website/src/pages/ko/subgraphs/developing/creating/graph-ts/api.mdx index 35bb04826c98..2e256ae18190 100644 --- a/website/src/pages/ko/subgraphs/developing/creating/graph-ts/api.mdx +++ b/website/src/pages/ko/subgraphs/developing/creating/graph-ts/api.mdx @@ -2,12 +2,12 @@ title: AssemblyScript API --- -> Note: If you created a subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, then you're using an older version of AssemblyScript. It is recommended to review the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/). +> Note: If you created a Subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, then you're using an older version of AssemblyScript. It is recommended to review the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/). -Learn what built-in APIs can be used when writing subgraph mappings. There are two kinds of APIs available out of the box: +Learn what built-in APIs can be used when writing Subgraph mappings. There are two kinds of APIs available out of the box: - The [Graph TypeScript library](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts) (`graph-ts`) -- Code generated from subgraph files by `graph codegen` +- Code generated from Subgraph files by `graph codegen` You can also add other libraries as dependencies, as long as they are compatible with [AssemblyScript](https://github.com/AssemblyScript/assemblyscript). @@ -27,18 +27,18 @@ The `@graphprotocol/graph-ts` library provides the following APIs: ### Versions -The `apiVersion` in the subgraph manifest specifies the mapping API version which is run by Graph Node for a given subgraph. +The `apiVersion` in the Subgraph manifest specifies the mapping API version which is run by Graph Node for a given Subgraph. -| Version | Release notes | -| :-: | --- | -| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | -| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | -| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
Added `receipt` field to the Ethereum Event object | -| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
Added `baseFeePerGas` to the Ethereum Block object | -| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/))
`ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | -| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | -| 0.0.3 | Added `from` field to the Ethereum Call object
`ethereum.call.address` renamed to `ethereum.call.to` | -| 0.0.2 | Added `input` field to the Ethereum Transaction object | +| Version | Release notes | +| :-----: | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | +| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | +| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
Added `receipt` field to the Ethereum Event object | +| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
Added `baseFeePerGas` to the Ethereum Block object | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/))
`ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | +| 0.0.3 | Added `from` field to the Ethereum Call object
`ethereum.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Added `input` field to the Ethereum Transaction object | ### Built-in Types @@ -223,7 +223,7 @@ import { store } from '@graphprotocol/graph-ts' The `store` API allows to load, save and remove entities from and to the Graph Node store. -Entities written to the store map one-to-one to the `@entity` types defined in the subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. +Entities written to the store map one-to-one to the `@entity` types defined in the Subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. #### Creating entities @@ -282,8 +282,8 @@ As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 and `@graphprotoco The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a transaction from some onchain event, and a later handler wants to access this transaction if it exists. -- In the case where the transaction does not exist, the subgraph will have to go to the database simply to find out that the entity does not exist. If the subgraph author already knows that the entity must have been created in the same block, using `loadInBlock` avoids this database roundtrip. -- For some subgraphs, these missed lookups can contribute significantly to the indexing time. +- In the case where the transaction does not exist, the Subgraph will have to go to the database simply to find out that the entity does not exist. If the Subgraph author already knows that the entity must have been created in the same block, using `loadInBlock` avoids this database roundtrip. +- For some Subgraphs, these missed lookups can contribute significantly to the indexing time. ```typescript let id = event.transaction.hash // or however the ID is constructed @@ -380,11 +380,11 @@ The Ethereum API provides access to smart contracts, public state variables, con #### Support for Ethereum Types -As with entities, `graph codegen` generates classes for all smart contracts and events used in a subgraph. For this, the contract ABIs need to be part of the data source in the subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. +As with entities, `graph codegen` generates classes for all smart contracts and events used in a Subgraph. For this, the contract ABIs need to be part of the data source in the Subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. -With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that subgraph authors do not have to worry about them. +With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that Subgraph authors do not have to worry about them. -The following example illustrates this. Given a subgraph schema like +The following example illustrates this. Given a Subgraph schema like ```graphql type Transfer @entity { @@ -483,7 +483,7 @@ class Log { #### Access to Smart Contract State -The code generated by `graph codegen` also includes classes for the smart contracts used in the subgraph. These can be used to access public state variables and call functions of the contract at the current block. +The code generated by `graph codegen` also includes classes for the smart contracts used in the Subgraph. These can be used to access public state variables and call functions of the contract at the current block. A common pattern is to access the contract from which an event originates. This is achieved with the following code: @@ -506,7 +506,7 @@ export function handleTransfer(event: TransferEvent) { As long as the `ERC20Contract` on Ethereum has a public read-only function called `symbol`, it can be called with `.symbol()`. For public state variables a method with the same name is created automatically. -Any other contract that is part of the subgraph can be imported from the generated code and can be bound to a valid address. +Any other contract that is part of the Subgraph can be imported from the generated code and can be bound to a valid address. #### Handling Reverted Calls @@ -582,7 +582,7 @@ let isContract = ethereum.hasCode(eoa).inner // returns false import { log } from '@graphprotocol/graph-ts' ``` -The `log` API allows subgraphs to log information to the Graph Node standard output as well as Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. +The `log` API allows Subgraphs to log information to the Graph Node standard output as well as Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. The `log` API includes the following functions: @@ -590,7 +590,7 @@ The `log` API includes the following functions: - `log.info(fmt: string, args: Array): void` - logs an informational message. - `log.warning(fmt: string, args: Array): void` - logs a warning. - `log.error(fmt: string, args: Array): void` - logs an error message. -- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the subgraph. +- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the Subgraph. The `log` API takes a format string and an array of string values. It then replaces placeholders with the string values from the array. The first `{}` placeholder gets replaced by the first value in the array, the second `{}` placeholder gets replaced by the second value and so on. @@ -721,7 +721,7 @@ ipfs.mapJSON('Qm...', 'processItem', Value.fromString('parentId')) The only flag currently supported is `json`, which must be passed to `ipfs.map`. With the `json` flag, the IPFS file must consist of a series of JSON values, one value per line. The call to `ipfs.map` will read each line in the file, deserialize it into a `JSONValue` and call the callback for each of them. The callback can then use entity operations to store data from the `JSONValue`. Entity changes are stored only when the handler that called `ipfs.map` finishes successfully; in the meantime, they are kept in memory, and the size of the file that `ipfs.map` can process is therefore limited. -On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the subgraph is marked as failed. +On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the Subgraph is marked as failed. ### Crypto API @@ -836,7 +836,7 @@ The base `Entity` class and the child `DataSourceContext` class have helpers to ### DataSourceContext in Manifest -The `context` section within `dataSources` allows you to define key-value pairs that are accessible within your subgraph mappings. The available types are `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. +The `context` section within `dataSources` allows you to define key-value pairs that are accessible within your Subgraph mappings. The available types are `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Here is a YAML example illustrating the usage of various types in the `context` section: @@ -887,4 +887,4 @@ dataSources: - `List`: Specifies a list of items. Each item needs to specify its type and data. - `BigInt`: Specifies a large integer value. Must be quoted due to its large size. -This context is then accessible in your subgraph mapping files, enabling more dynamic and configurable subgraphs. +This context is then accessible in your Subgraph mapping files, enabling more dynamic and configurable Subgraphs. From 0ae6aec82df4684af873070d39379f0387e45e24 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:10:44 -0500 Subject: [PATCH 0149/1789] New translations api.mdx (Dutch) --- .../developing/creating/graph-ts/api.mdx | 54 +++++++++---------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/website/src/pages/nl/subgraphs/developing/creating/graph-ts/api.mdx b/website/src/pages/nl/subgraphs/developing/creating/graph-ts/api.mdx index 35bb04826c98..2e256ae18190 100644 --- a/website/src/pages/nl/subgraphs/developing/creating/graph-ts/api.mdx +++ b/website/src/pages/nl/subgraphs/developing/creating/graph-ts/api.mdx @@ -2,12 +2,12 @@ title: AssemblyScript API --- -> Note: If you created a subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, then you're using an older version of AssemblyScript. It is recommended to review the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/). +> Note: If you created a Subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, then you're using an older version of AssemblyScript. It is recommended to review the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/). -Learn what built-in APIs can be used when writing subgraph mappings. There are two kinds of APIs available out of the box: +Learn what built-in APIs can be used when writing Subgraph mappings. There are two kinds of APIs available out of the box: - The [Graph TypeScript library](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts) (`graph-ts`) -- Code generated from subgraph files by `graph codegen` +- Code generated from Subgraph files by `graph codegen` You can also add other libraries as dependencies, as long as they are compatible with [AssemblyScript](https://github.com/AssemblyScript/assemblyscript). @@ -27,18 +27,18 @@ The `@graphprotocol/graph-ts` library provides the following APIs: ### Versions -The `apiVersion` in the subgraph manifest specifies the mapping API version which is run by Graph Node for a given subgraph. +The `apiVersion` in the Subgraph manifest specifies the mapping API version which is run by Graph Node for a given Subgraph. -| Version | Release notes | -| :-: | --- | -| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | -| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | -| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
Added `receipt` field to the Ethereum Event object | -| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
Added `baseFeePerGas` to the Ethereum Block object | -| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/))
`ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | -| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | -| 0.0.3 | Added `from` field to the Ethereum Call object
`ethereum.call.address` renamed to `ethereum.call.to` | -| 0.0.2 | Added `input` field to the Ethereum Transaction object | +| Version | Release notes | +| :-----: | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | +| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | +| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
Added `receipt` field to the Ethereum Event object | +| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
Added `baseFeePerGas` to the Ethereum Block object | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/))
`ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | +| 0.0.3 | Added `from` field to the Ethereum Call object
`ethereum.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Added `input` field to the Ethereum Transaction object | ### Built-in Types @@ -223,7 +223,7 @@ import { store } from '@graphprotocol/graph-ts' The `store` API allows to load, save and remove entities from and to the Graph Node store. -Entities written to the store map one-to-one to the `@entity` types defined in the subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. +Entities written to the store map one-to-one to the `@entity` types defined in the Subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. #### Creating entities @@ -282,8 +282,8 @@ As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 and `@graphprotoco The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a transaction from some onchain event, and a later handler wants to access this transaction if it exists. -- In the case where the transaction does not exist, the subgraph will have to go to the database simply to find out that the entity does not exist. If the subgraph author already knows that the entity must have been created in the same block, using `loadInBlock` avoids this database roundtrip. -- For some subgraphs, these missed lookups can contribute significantly to the indexing time. +- In the case where the transaction does not exist, the Subgraph will have to go to the database simply to find out that the entity does not exist. If the Subgraph author already knows that the entity must have been created in the same block, using `loadInBlock` avoids this database roundtrip. +- For some Subgraphs, these missed lookups can contribute significantly to the indexing time. ```typescript let id = event.transaction.hash // or however the ID is constructed @@ -380,11 +380,11 @@ The Ethereum API provides access to smart contracts, public state variables, con #### Support for Ethereum Types -As with entities, `graph codegen` generates classes for all smart contracts and events used in a subgraph. For this, the contract ABIs need to be part of the data source in the subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. +As with entities, `graph codegen` generates classes for all smart contracts and events used in a Subgraph. For this, the contract ABIs need to be part of the data source in the Subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. -With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that subgraph authors do not have to worry about them. +With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that Subgraph authors do not have to worry about them. -The following example illustrates this. Given a subgraph schema like +The following example illustrates this. Given a Subgraph schema like ```graphql type Transfer @entity { @@ -483,7 +483,7 @@ class Log { #### Access to Smart Contract State -The code generated by `graph codegen` also includes classes for the smart contracts used in the subgraph. These can be used to access public state variables and call functions of the contract at the current block. +The code generated by `graph codegen` also includes classes for the smart contracts used in the Subgraph. These can be used to access public state variables and call functions of the contract at the current block. A common pattern is to access the contract from which an event originates. This is achieved with the following code: @@ -506,7 +506,7 @@ export function handleTransfer(event: TransferEvent) { As long as the `ERC20Contract` on Ethereum has a public read-only function called `symbol`, it can be called with `.symbol()`. For public state variables a method with the same name is created automatically. -Any other contract that is part of the subgraph can be imported from the generated code and can be bound to a valid address. +Any other contract that is part of the Subgraph can be imported from the generated code and can be bound to a valid address. #### Handling Reverted Calls @@ -582,7 +582,7 @@ let isContract = ethereum.hasCode(eoa).inner // returns false import { log } from '@graphprotocol/graph-ts' ``` -The `log` API allows subgraphs to log information to the Graph Node standard output as well as Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. +The `log` API allows Subgraphs to log information to the Graph Node standard output as well as Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. The `log` API includes the following functions: @@ -590,7 +590,7 @@ The `log` API includes the following functions: - `log.info(fmt: string, args: Array): void` - logs an informational message. - `log.warning(fmt: string, args: Array): void` - logs a warning. - `log.error(fmt: string, args: Array): void` - logs an error message. -- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the subgraph. +- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the Subgraph. The `log` API takes a format string and an array of string values. It then replaces placeholders with the string values from the array. The first `{}` placeholder gets replaced by the first value in the array, the second `{}` placeholder gets replaced by the second value and so on. @@ -721,7 +721,7 @@ ipfs.mapJSON('Qm...', 'processItem', Value.fromString('parentId')) The only flag currently supported is `json`, which must be passed to `ipfs.map`. With the `json` flag, the IPFS file must consist of a series of JSON values, one value per line. The call to `ipfs.map` will read each line in the file, deserialize it into a `JSONValue` and call the callback for each of them. The callback can then use entity operations to store data from the `JSONValue`. Entity changes are stored only when the handler that called `ipfs.map` finishes successfully; in the meantime, they are kept in memory, and the size of the file that `ipfs.map` can process is therefore limited. -On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the subgraph is marked as failed. +On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the Subgraph is marked as failed. ### Crypto API @@ -836,7 +836,7 @@ The base `Entity` class and the child `DataSourceContext` class have helpers to ### DataSourceContext in Manifest -The `context` section within `dataSources` allows you to define key-value pairs that are accessible within your subgraph mappings. The available types are `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. +The `context` section within `dataSources` allows you to define key-value pairs that are accessible within your Subgraph mappings. The available types are `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Here is a YAML example illustrating the usage of various types in the `context` section: @@ -887,4 +887,4 @@ dataSources: - `List`: Specifies a list of items. Each item needs to specify its type and data. - `BigInt`: Specifies a large integer value. Must be quoted due to its large size. -This context is then accessible in your subgraph mapping files, enabling more dynamic and configurable subgraphs. +This context is then accessible in your Subgraph mapping files, enabling more dynamic and configurable Subgraphs. From 6d48dabde2fc4bc1bc88b07ae83f5dcf956a1939 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:10:45 -0500 Subject: [PATCH 0150/1789] New translations api.mdx (Polish) --- .../developing/creating/graph-ts/api.mdx | 54 +++++++++---------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/website/src/pages/pl/subgraphs/developing/creating/graph-ts/api.mdx b/website/src/pages/pl/subgraphs/developing/creating/graph-ts/api.mdx index 35bb04826c98..2e256ae18190 100644 --- a/website/src/pages/pl/subgraphs/developing/creating/graph-ts/api.mdx +++ b/website/src/pages/pl/subgraphs/developing/creating/graph-ts/api.mdx @@ -2,12 +2,12 @@ title: AssemblyScript API --- -> Note: If you created a subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, then you're using an older version of AssemblyScript. It is recommended to review the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/). +> Note: If you created a Subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, then you're using an older version of AssemblyScript. It is recommended to review the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/). -Learn what built-in APIs can be used when writing subgraph mappings. There are two kinds of APIs available out of the box: +Learn what built-in APIs can be used when writing Subgraph mappings. There are two kinds of APIs available out of the box: - The [Graph TypeScript library](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts) (`graph-ts`) -- Code generated from subgraph files by `graph codegen` +- Code generated from Subgraph files by `graph codegen` You can also add other libraries as dependencies, as long as they are compatible with [AssemblyScript](https://github.com/AssemblyScript/assemblyscript). @@ -27,18 +27,18 @@ The `@graphprotocol/graph-ts` library provides the following APIs: ### Versions -The `apiVersion` in the subgraph manifest specifies the mapping API version which is run by Graph Node for a given subgraph. +The `apiVersion` in the Subgraph manifest specifies the mapping API version which is run by Graph Node for a given Subgraph. -| Version | Release notes | -| :-: | --- | -| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | -| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | -| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
Added `receipt` field to the Ethereum Event object | -| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
Added `baseFeePerGas` to the Ethereum Block object | -| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/))
`ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | -| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | -| 0.0.3 | Added `from` field to the Ethereum Call object
`ethereum.call.address` renamed to `ethereum.call.to` | -| 0.0.2 | Added `input` field to the Ethereum Transaction object | +| Version | Release notes | +| :-----: | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | +| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | +| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
Added `receipt` field to the Ethereum Event object | +| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
Added `baseFeePerGas` to the Ethereum Block object | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/))
`ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | +| 0.0.3 | Added `from` field to the Ethereum Call object
`ethereum.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Added `input` field to the Ethereum Transaction object | ### Built-in Types @@ -223,7 +223,7 @@ import { store } from '@graphprotocol/graph-ts' The `store` API allows to load, save and remove entities from and to the Graph Node store. -Entities written to the store map one-to-one to the `@entity` types defined in the subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. +Entities written to the store map one-to-one to the `@entity` types defined in the Subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. #### Creating entities @@ -282,8 +282,8 @@ As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 and `@graphprotoco The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a transaction from some onchain event, and a later handler wants to access this transaction if it exists. -- In the case where the transaction does not exist, the subgraph will have to go to the database simply to find out that the entity does not exist. If the subgraph author already knows that the entity must have been created in the same block, using `loadInBlock` avoids this database roundtrip. -- For some subgraphs, these missed lookups can contribute significantly to the indexing time. +- In the case where the transaction does not exist, the Subgraph will have to go to the database simply to find out that the entity does not exist. If the Subgraph author already knows that the entity must have been created in the same block, using `loadInBlock` avoids this database roundtrip. +- For some Subgraphs, these missed lookups can contribute significantly to the indexing time. ```typescript let id = event.transaction.hash // or however the ID is constructed @@ -380,11 +380,11 @@ The Ethereum API provides access to smart contracts, public state variables, con #### Support for Ethereum Types -As with entities, `graph codegen` generates classes for all smart contracts and events used in a subgraph. For this, the contract ABIs need to be part of the data source in the subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. +As with entities, `graph codegen` generates classes for all smart contracts and events used in a Subgraph. For this, the contract ABIs need to be part of the data source in the Subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. -With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that subgraph authors do not have to worry about them. +With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that Subgraph authors do not have to worry about them. -The following example illustrates this. Given a subgraph schema like +The following example illustrates this. Given a Subgraph schema like ```graphql type Transfer @entity { @@ -483,7 +483,7 @@ class Log { #### Access to Smart Contract State -The code generated by `graph codegen` also includes classes for the smart contracts used in the subgraph. These can be used to access public state variables and call functions of the contract at the current block. +The code generated by `graph codegen` also includes classes for the smart contracts used in the Subgraph. These can be used to access public state variables and call functions of the contract at the current block. A common pattern is to access the contract from which an event originates. This is achieved with the following code: @@ -506,7 +506,7 @@ export function handleTransfer(event: TransferEvent) { As long as the `ERC20Contract` on Ethereum has a public read-only function called `symbol`, it can be called with `.symbol()`. For public state variables a method with the same name is created automatically. -Any other contract that is part of the subgraph can be imported from the generated code and can be bound to a valid address. +Any other contract that is part of the Subgraph can be imported from the generated code and can be bound to a valid address. #### Handling Reverted Calls @@ -582,7 +582,7 @@ let isContract = ethereum.hasCode(eoa).inner // returns false import { log } from '@graphprotocol/graph-ts' ``` -The `log` API allows subgraphs to log information to the Graph Node standard output as well as Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. +The `log` API allows Subgraphs to log information to the Graph Node standard output as well as Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. The `log` API includes the following functions: @@ -590,7 +590,7 @@ The `log` API includes the following functions: - `log.info(fmt: string, args: Array): void` - logs an informational message. - `log.warning(fmt: string, args: Array): void` - logs a warning. - `log.error(fmt: string, args: Array): void` - logs an error message. -- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the subgraph. +- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the Subgraph. The `log` API takes a format string and an array of string values. It then replaces placeholders with the string values from the array. The first `{}` placeholder gets replaced by the first value in the array, the second `{}` placeholder gets replaced by the second value and so on. @@ -721,7 +721,7 @@ ipfs.mapJSON('Qm...', 'processItem', Value.fromString('parentId')) The only flag currently supported is `json`, which must be passed to `ipfs.map`. With the `json` flag, the IPFS file must consist of a series of JSON values, one value per line. The call to `ipfs.map` will read each line in the file, deserialize it into a `JSONValue` and call the callback for each of them. The callback can then use entity operations to store data from the `JSONValue`. Entity changes are stored only when the handler that called `ipfs.map` finishes successfully; in the meantime, they are kept in memory, and the size of the file that `ipfs.map` can process is therefore limited. -On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the subgraph is marked as failed. +On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the Subgraph is marked as failed. ### Crypto API @@ -836,7 +836,7 @@ The base `Entity` class and the child `DataSourceContext` class have helpers to ### DataSourceContext in Manifest -The `context` section within `dataSources` allows you to define key-value pairs that are accessible within your subgraph mappings. The available types are `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. +The `context` section within `dataSources` allows you to define key-value pairs that are accessible within your Subgraph mappings. The available types are `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Here is a YAML example illustrating the usage of various types in the `context` section: @@ -887,4 +887,4 @@ dataSources: - `List`: Specifies a list of items. Each item needs to specify its type and data. - `BigInt`: Specifies a large integer value. Must be quoted due to its large size. -This context is then accessible in your subgraph mapping files, enabling more dynamic and configurable subgraphs. +This context is then accessible in your Subgraph mapping files, enabling more dynamic and configurable Subgraphs. From 3e82046c4d10ab2b1be2d10d7b33678b81206d52 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:10:46 -0500 Subject: [PATCH 0151/1789] New translations api.mdx (Portuguese) --- .../developing/creating/graph-ts/api.mdx | 133 +++++++++--------- 1 file changed, 67 insertions(+), 66 deletions(-) diff --git a/website/src/pages/pt/subgraphs/developing/creating/graph-ts/api.mdx b/website/src/pages/pt/subgraphs/developing/creating/graph-ts/api.mdx index c9069e51a627..347c3fbcd61f 100644 --- a/website/src/pages/pt/subgraphs/developing/creating/graph-ts/api.mdx +++ b/website/src/pages/pt/subgraphs/developing/creating/graph-ts/api.mdx @@ -2,12 +2,12 @@ title: API AssemblyScript --- -> Note: If you created a subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, then you're using an older version of AssemblyScript. It is recommended to review the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/). +> Note: If you created a Subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, then you're using an older version of AssemblyScript. It is recommended to review the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/). -Learn what built-in APIs can be used when writing subgraph mappings. There are two kinds of APIs available out of the box: +Learn what built-in APIs can be used when writing Subgraph mappings. There are two kinds of APIs available out of the box: - The [Graph TypeScript library](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts) (`graph-ts`) -- Code generated from subgraph files by `graph codegen` +- Code generated from Subgraph files by `graph codegen` You can also add other libraries as dependencies, as long as they are compatible with [AssemblyScript](https://github.com/AssemblyScript/assemblyscript). @@ -27,18 +27,18 @@ A biblioteca `@graphprotocol/graph-ts` fornece as seguintes APIs: ### Versões -No manifest do subgraph, `apiVersion` especifica a versão da API de mapeamento, executada pelo Graph Node para um subgraph. +The `apiVersion` in the Subgraph manifest specifies the mapping API version which is run by Graph Node for a given Subgraph. -| Versão | Notas de atualização | -| :-: | --- | -| 0.0.9 | Adiciona novas funções de host [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | -| 0.0.8 | Adiciona validação para existência de campos no schema ao salvar uma entidade. | -| 0.0.7 | Classes `TransactionReceipt` e `Log` adicionadas aos tipos do EthereumCampo
Campo `receipt` adicionado ao objeto Ethereum Event | -| 0.0.6 | Campo `nonce` adicionado ao objeto Ethereum TransactionCampo
`baseFeePerGas` adicionado ao objeto Ethereum Block | -| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/))
`ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | -| 0.0.4 | Campo `functionSignature` adicionado ao objeto Ethereum SmartContractCall | -| 0.0.3 | Added `from` field to the Ethereum Call object
`ethereum.call.address` renamed to `ethereum.call.to` | -| 0.0.2 | Campo `input` adicionado ao objeto Ethereum Transaction | +| Versão | Notas de atualização | +| :----: | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 0.0.9 | Adiciona novas funções de host [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | +| 0.0.8 | Adiciona validação para existência de campos no schema ao salvar uma entidade. | +| 0.0.7 | Classes `TransactionReceipt` e `Log` adicionadas aos tipos do EthereumCampo
Campo `receipt` adicionado ao objeto Ethereum Event | +| 0.0.6 | Campo `nonce` adicionado ao objeto Ethereum TransactionCampo
`baseFeePerGas` adicionado ao objeto Ethereum Block | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/))
`ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | Campo `functionSignature` adicionado ao objeto Ethereum SmartContractCall | +| 0.0.3 | Added `from` field to the Ethereum Call object
`ethereum.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Campo `input` adicionado ao objeto Ethereum Transaction | ### Tipos Embutidos @@ -166,7 +166,8 @@ _Matemática_ import { TypedMap } from '@graphprotocol/graph-ts' ``` -O `TypedMap` pode servir para armazenar pares de chave e valor (key e value ). Confira [este exemplo](https://github.com/graphprotocol/aragon-subgraph/blob/29dd38680c5e5104d9fdc2f90e740298c67e4a31/individual-dao-subgraph/mappings/constants.ts#L51). +O `TypedMap` pode servir para armazenar pares de chave e valor (key e value +). Confira [este exemplo](https://github.com/graphprotocol/aragon-subgraph/blob/29dd38680c5e5104d9fdc2f90e740298c67e4a31/individual-dao-subgraph/mappings/constants.ts#L51). A classe `TypedMap` tem a seguinte API: @@ -223,7 +224,7 @@ import { store } from '@graphprotocol/graph-ts' A API `store` permite carregar, salvar e remover entidades do/para o armazenamento do Graph Node. -As entidades escritas no armazenamento mapeam um-por-um com os tipos de `@entity` definidos no schema GraphQL do subgraph. Para trabalhar com estas entidades de forma conveniente, o comando `graph codegen` fornecido pelo [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) gera classes de entidades, que são subclasses do tipo embutido `Entity`, com getters e setters de propriedade para os campos no schema e métodos para carregar e salvar estas entidades. +Entities written to the store map one-to-one to the `@entity` types defined in the Subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. #### Como criar entidades @@ -282,8 +283,8 @@ Desde o `graph-node` v0.31.0, o `@graphprotocol/graph-ts` v0.30.0 e o `@graphpro The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a transaction from some onchain event, and a later handler wants to access this transaction if it exists. -- In the case where the transaction does not exist, the subgraph will have to go to the database simply to find out that the entity does not exist. If the subgraph author already knows that the entity must have been created in the same block, using `loadInBlock` avoids this database roundtrip. -- For some subgraphs, these missed lookups can contribute significantly to the indexing time. +- In the case where the transaction does not exist, the Subgraph will have to go to the database simply to find out that the entity does not exist. If the Subgraph author already knows that the entity must have been created in the same block, using `loadInBlock` avoids this database roundtrip. +- For some Subgraphs, these missed lookups can contribute significantly to the indexing time. ```typescript let id = event.transaction.hash // ou como a ID for construída @@ -380,11 +381,11 @@ A API do Ethereum fornece acesso a contratos inteligentes, variáveis de estado #### Apoio para Tipos no Ethereum -Assim como em entidades, o `graph codegen` gera classes para todos os contratos inteligentes e eventos usados em um subgraph. Para isto, as ABIs dos contratos devem ser parte da fonte de dados no manifest do subgraph. Tipicamente, os arquivos da ABI são armazenados em uma pasta `abis/`. +As with entities, `graph codegen` generates classes for all smart contracts and events used in a Subgraph. For this, the contract ABIs need to be part of the data source in the Subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. -Com as classes geradas, conversões entre tipos no Ethereum e os [tipos embutidos](#built-in-types) acontecem em segundo plano para que os autores de subgraphs não precisem se preocupar com elas. +With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that Subgraph authors do not have to worry about them. -Veja um exemplo a seguir. Considerando um schema de subgraph como +The following example illustrates this. Given a Subgraph schema like ```graphql type Transfer @entity { @@ -483,7 +484,7 @@ class Log { #### Acesso ao Estado do Contrato Inteligente -O código gerado pelo `graph codegen` também inclui classes para os contratos inteligentes usados no subgraph. Estes servem para acessar variáveis de estado público e funções de chamada do contrato no bloco atual. +The code generated by `graph codegen` also includes classes for the smart contracts used in the Subgraph. These can be used to access public state variables and call functions of the contract at the current block. É comum acessar o contrato de qual origina um evento. Isto é feito com o seguinte código: @@ -506,7 +507,7 @@ O `Transfer` é apelidado de `TransferEvent` aqui para evitar confusões de nome Enquanto o `ERC20Contract` no Ethereum tiver uma função pública de apenas-leitura chamada `symbol`, ele pode ser chamado com o `.symbol()`. Para variáveis de estado público, um método com o mesmo nome é criado automaticamente. -Qualquer outro contrato que seja parte do subgraph pode ser importado do código gerado e ligado a um endereço válido. +Any other contract that is part of the Subgraph can be imported from the generated code and can be bound to a valid address. #### Como Lidar com Chamadas Revertidas @@ -582,7 +583,7 @@ let isContract = ethereum.hasCode(eoa).inner // retorna false import { log } from '@graphprotocol/graph-ts' ``` -A API `log` permite que os subgraphs gravem informações à saída padrão do Graph Node, assim como ao Graph Explorer. Mensagens podem ser gravadas com níveis diferentes de log. É fornecida uma sintaxe básica de formatação de strings para compor mensagens de log do argumento. +The `log` API allows Subgraphs to log information to the Graph Node standard output as well as Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. A API `log` inclui as seguintes funções: @@ -590,7 +591,7 @@ A API `log` inclui as seguintes funções: - `log.info(fmt: string, args: Array): void` - loga uma mensagem de debug. - `log.warning(fmt: string, args: Array): void` - loga um aviso. - `log.error(fmt: string, args: Array): void` - loga uma mensagem de erro. -- `log.critical(fmt: string, args: Array): void` – loga uma mensagem crítica _e_ encerra o subgraph. +- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the Subgraph. A API `log` toma um string de formato e um arranjo de valores de string. Ele então substitui os temporários com os valores de strings do arranjo. O primeiro `{}` temporário é substituído pelo primeiro valor no arranjo, o segundo `{}` temporário é substituído pelo segundo valor, e assim por diante. @@ -721,7 +722,7 @@ ipfs.mapJSON('Qm...', 'processItem', Value.fromString('parentId')) O único flag atualmente apoiado é o `json`, que deve ser passado ao `ipfs.map`. Com o flag `json`, o arquivo IPFS deve consistir de uma série de valores JSON, com um valor por linha. Chamar `ipfs.map`, irá ler cada linha no arquivo, desserializá-lo em um `JSONValue`, e chamar o callback para cada linha. O callback pode então armazenar dados do `JSONValue` com operações de entidade. As mudanças na entidade só serão armazenadas quando o handler que chamou o `ipfs.map` concluir com sucesso; enquanto isso, elas ficam na memória, e o tamanho do arquivo que o `ipfs.map` pode processar é então limitado. -Em caso de sucesso, o `ipfs.map` retorna `void`. Se qualquer invocação do callback causar um erro, o handler que invocou o `ipfs.map` é abortado, e o subgraph é marcado como falho. +On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the Subgraph is marked as failed. ### API de Criptografia @@ -770,44 +771,44 @@ Quando o tipo de um valor é confirmado, ele pode ser convertido num [tipo embut ### Referência de Conversões de Tipos -| Fonte(s) | Destino | Função de conversão | -| -------------------- | -------------------- | ---------------------------- | -| Address | Bytes | nenhum | -| Address | String | s.toHexString() | -| BigDecimal | String | s.toString() | -| BigInt | BigDecimal | s.toBigDecimal() | -| BigInt | String (hexadecimal) | s.toHexString() ou s.toHex() | -| BigInt | String (unicode) | s.toString() | -| BigInt | i32 | s.toI32() | -| Boolean | Boolean | nenhum | -| Bytes (assinado) | BigInt | BigInt.fromSignedBytes(s) | -| Bytes (não assinado) | BigInt | BigInt.fromUnsignedBytes(s) | -| Bytes | String (hexadecimal) | s.toHexString() ou s.toHex() | -| Bytes | String (unicode) | s.toString() | -| Bytes | String (base58) | s.toBase58() | -| Bytes | i32 | s.toI32() | -| Bytes | u32 | s.toU32() | -| Bytes | JSON | json.fromBytes(s) | -| int8 | i32 | nenhum | -| int32 | i32 | nenhum | -| int32 | BigInt | BigInt.fromI32(s) | -| uint24 | i32 | nenhum | -| int64 - int256 | BigInt | nenhum | -| uint32 - uint256 | BigInt | nenhum | -| JSON | boolean | s.toBool() | -| JSON | i64 | s.toI64() | -| JSON | u64 | s.toU64() | -| JSON | f64 | s.toF64() | -| JSON | BigInt | s.toBigInt() | -| JSON | string | s.toString() | -| JSON | Array | s.toArray() | -| JSON | Object | s.toObject() | -| String | Address | Address.fromString(s) | -| Bytes | Address | Address.fromBytes(s) | -| String | BigInt | BigInt.fromString(s) | -| String | BigDecimal | BigDecimal.fromString(s) | -| String (hexadecimal) | Bytes | ByteArray.fromHexString(s) | -| String (UTF-8) | Bytes | ByteArray.fromUTF8(s) | +| Fonte(s) | Destino | Função de conversão | +| ------------------------ | -------------------- | ------------------------------ | +| Address | Bytes | nenhum | +| Address | String | s.toHexString() | +| BigDecimal | String | s.toString() | +| BigInt | BigDecimal | s.toBigDecimal() | +| BigInt | String (hexadecimal) | s.toHexString() ou s.toHex() | +| BigInt | String (unicode) | s.toString() | +| BigInt | i32 | s.toI32() | +| Boolean | Boolean | nenhum | +| Bytes (assinado) | BigInt | BigInt.fromSignedBytes(s) | +| Bytes (não assinado) | BigInt | BigInt.fromUnsignedBytes(s) | +| Bytes | String (hexadecimal) | s.toHexString() ou s.toHex() | +| Bytes | String (unicode) | s.toString() | +| Bytes | String (base58) | s.toBase58() | +| Bytes | i32 | s.toI32() | +| Bytes | u32 | s.toU32() | +| Bytes | JSON | json.fromBytes(s) | +| int8 | i32 | nenhum | +| int32 | i32 | nenhum | +| int32 | BigInt | BigInt.fromI32(s) | +| uint24 | i32 | nenhum | +| int64 - int256 | BigInt | nenhum | +| uint32 - uint256 | BigInt | nenhum | +| JSON | boolean | s.toBool() | +| JSON | i64 | s.toI64() | +| JSON | u64 | s.toU64() | +| JSON | f64 | s.toF64() | +| JSON | BigInt | s.toBigInt() | +| JSON | string | s.toString() | +| JSON | Array | s.toArray() | +| JSON | Object | s.toObject() | +| String | Address | Address.fromString(s) | +| Bytes | Address | Address.fromBytes(s) | +| String | BigInt | BigInt.fromString(s) | +| String | BigDecimal | BigDecimal.fromString(s) | +| String (hexadecimal) | Bytes | ByteArray.fromHexString(s) | +| String (UTF-8) | Bytes | ByteArray.fromUTF8(s) | ### Metadados de Fontes de Dados @@ -836,7 +837,7 @@ A classe base `Entity` e a subclasse `DataSourceContext` têm helpers para deter ### DataSourceContext no Manifest -A seção `context` dentro do `dataSources` lhe permite definir pares key-value acessíveis dentro dos seus mapeamentos de subgraph. Os tipos disponíveis são `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, e `BigInt`. +The `context` section within `dataSources` allows you to define key-value pairs that are accessible within your Subgraph mappings. The available types are `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Aqui está um exemplo de YAML que ilustra o uso de vários tipos na seção `context`: @@ -887,4 +888,4 @@ dataSources: - `List`: Especifica uma lista de itens. Cada item deve especificar o seu tipo e dados. - `BigInt`: Especifica um valor integral largo. É necessário citar este devido ao seu grande tamanho. -Este contexto, então, pode ser acessado nos seus arquivos de mapeamento de subgraph, o que resulta em subgraphs mais dinâmicos e configuráveis. +This context is then accessible in your Subgraph mapping files, enabling more dynamic and configurable Subgraphs. From b8fbff3b6d7f14c0b0b595ed7a8c69e1e894e289 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:10:48 -0500 Subject: [PATCH 0152/1789] New translations api.mdx (Russian) --- .../developing/creating/graph-ts/api.mdx | 140 +++++++++--------- 1 file changed, 71 insertions(+), 69 deletions(-) diff --git a/website/src/pages/ru/subgraphs/developing/creating/graph-ts/api.mdx b/website/src/pages/ru/subgraphs/developing/creating/graph-ts/api.mdx index 88bfcafe7af0..40ba29383852 100644 --- a/website/src/pages/ru/subgraphs/developing/creating/graph-ts/api.mdx +++ b/website/src/pages/ru/subgraphs/developing/creating/graph-ts/api.mdx @@ -2,12 +2,12 @@ title: AssemblyScript API --- -> Note: If you created a subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, then you're using an older version of AssemblyScript. It is recommended to review the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/). +> Note: If you created a Subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, then you're using an older version of AssemblyScript. It is recommended to review the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/). -Узнайте, какие встроенные API можно использовать при написании мэппингов субграфов. По умолчанию доступны два типа API: +Learn what built-in APIs can be used when writing Subgraph mappings. There are two kinds of APIs available out of the box: - [Библиотека The Graph TypeScript](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts) (`graph-ts`) -- Код, сгенерированный из файлов субграфов с помощью `graph codegen` +- Code generated from Subgraph files by `graph codegen` Вы также можете добавлять другие библиотеки в качестве зависимостей, если они совместимы с [AssemblyScript](https://github.com/AssemblyScript/assemblyscript). @@ -27,18 +27,18 @@ title: AssemblyScript API ### Версии -`apiVersion` в манифесте субграфа указывает версию мэппинга API, которая запускается посредством Graph Node для данного субграфа. +The `apiVersion` in the Subgraph manifest specifies the mapping API version which is run by Graph Node for a given Subgraph. -| Версия | Примечания к релизу | -| :-: | --- | -| 0.0.9 | Добавлены новые функции хоста [`eth_get_balance`](#balance-of-an-address) и [`hasCode`](#check-if-an-address-a-contract-or-eoa) | -| 0.0.8 | Добавлена проверка наличия полей в схеме при сохранении объекта. | -| 0.0.7 | К типам Ethereum добавлены классы `TransactionReceipt` и `Log`
К объекту Ethereum Event добавлено поле `receipt` | -| 0.0.6 | В объект Ethereum Transaction добавлено поле `nonce`
В объект Ethereum Block добавлено поле `baseFeePerGas` | -| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/))
`ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | -| 0.0.4 | В объект Ethereum SmartContractCall добавлено поле `functionSignature` | -| 0.0.3 | Added `from` field to the Ethereum Call object
`ethereum.call.address` renamed to `ethereum.call.to` | -| 0.0.2 | В объект Ethereum Transaction добавлено поле `input` | +| Версия | Примечания к релизу | +| :----: | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 0.0.9 | Добавлены новые функции хоста [`eth_get_balance`](#balance-of-an-address) и [`hasCode`](#check-if-an-address-a-contract-or-eoa) | +| 0.0.8 | Добавлена проверка наличия полей в схеме при сохранении объекта. | +| 0.0.7 | К типам Ethereum добавлены классы `TransactionReceipt` и `Log`
К объекту Ethereum Event добавлено поле `receipt` | +| 0.0.6 | В объект Ethereum Transaction добавлено поле `nonce`
В объект Ethereum Block добавлено поле `baseFeePerGas` | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/))
`ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | В объект Ethereum SmartContractCall добавлено поле `functionSignature` | +| 0.0.3 | Added `from` field to the Ethereum Call object
`ethereum.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | В объект Ethereum Transaction добавлено поле `input` | ### Встроенные типы @@ -223,7 +223,7 @@ import { store } from '@graphprotocol/graph-ts' API `store` позволяет загружать, сохранять и удалять объекты из хранилища the Graph Node и в него. -Объекты, записанные в хранилище карты, сопоставляются один к одному с типами `@entity`, определенными в схеме субграфов GraphQL. Чтобы сделать работу с этими объектами удобной, команда `graph codegen`, предоставляемая [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) генерирует классы объектов, которые являются подклассами встроенного типа `Entity`, с геттерами и сеттерами свойств для полей в схеме, а также методами загрузки и сохранения этих объектов. +Entities written to the store map one-to-one to the `@entity` types defined in the Subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. #### Создание объектов @@ -233,7 +233,7 @@ API `store` позволяет загружать, сохранять и уда // Импорт класса событий Transfer, сгенерированного из ERC20 ABI import { Transfer as TransferEvent } from '../generated/ERC20/ERC20' -// Импорт типа объекта Transfer, сгенерированного из схемы GraphQL +// Импорт типа объекта Transfer, сгенерированного из схемы GraphQL import { Transfer } from '../generated/schema' событие // Обработчик события передачи @@ -269,6 +269,7 @@ if (transfer == null) { transfer = new Transfer(id) } + // Используйте объект Transfer, как и раньше ``` @@ -282,8 +283,8 @@ if (transfer == null) { The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a transaction from some onchain event, and a later handler wants to access this transaction if it exists. -- В случае, если транзакция не существует, субграф должен будет обратиться к базе данных просто для того, чтобы узнать, что объект не существует. Если автор субграфа уже знает, что объект должен быть создан в том же блоке, использование `loadInBlock` позволяет избежать этого обращения к базе данных. -- Для некоторых субграфов эти пропущенные поиски могут существенно увеличить время индексации. +- In the case where the transaction does not exist, the Subgraph will have to go to the database simply to find out that the entity does not exist. If the Subgraph author already knows that the entity must have been created in the same block, using `loadInBlock` avoids this database roundtrip. +- For some Subgraphs, these missed lookups can contribute significantly to the indexing time. ```typescript let id = event.transaction.hash // или некоторым образом создается идентификатор @@ -292,6 +293,7 @@ if (transfer == null) { transfer = new Transfer(id) } + // Используйте объект Transfer, как и раньше ``` @@ -380,11 +382,11 @@ Ethereum API предоставляет доступ к смарт-контра #### Поддержка типов Ethereum -Как и в случае с объектами, `graph codegen` генерирует классы для всех смарт-контрактов и событий, используемых в субграфе. Для этого ABI контракта должны быть частью источника данных в манифесте субграфа. Как правило, файлы ABI хранятся в папке `abis/`. +As with entities, `graph codegen` generates classes for all smart contracts and events used in a Subgraph. For this, the contract ABIs need to be part of the data source in the Subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. -С помощью сгенерированных классов преобразования между типами Ethereum и [встроенными типами] (#built-in-types) происходят за кулисами, так что авторам субграфов не нужно беспокоиться о них. +With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that Subgraph authors do not have to worry about them. -Следующий пример иллюстрирует это. С учётом схемы субграфа, такой как +The following example illustrates this. Given a Subgraph schema like ```graphql type Transfer @entity { @@ -483,7 +485,7 @@ class Log { #### Доступ к состоянию смарт-контракта -Код, сгенерированный с помощью `graph codegen`, также включает классы для смарт-контрактов, используемых в субграфе. Они могут быть использованы для доступа к общедоступным переменным состояния и вызова функций контракта в текущем блоке. +The code generated by `graph codegen` also includes classes for the smart contracts used in the Subgraph. These can be used to access public state variables and call functions of the contract at the current block. Распространенным шаблоном является доступ к контракту, из которого исходит событие. Это достигается с помощью следующего кода: @@ -506,7 +508,7 @@ export function handleTransfer(event: TransferEvent) { Пока `ERC20Contract` в Ethereum имеет общедоступную функцию только для чтения, называемую `symbol`, ее можно вызвать с помощью `.symbol()`. Для общедоступных переменных состояния автоматически создается метод с таким же именем. -Любой другой контракт, который является частью субграфа, может быть импортирован из сгенерированного кода и привязан к действительному адресу. +Any other contract that is part of the Subgraph can be imported from the generated code and can be bound to a valid address. #### Обработка возвращенных вызовов @@ -582,7 +584,7 @@ let isContract = ethereum.hasCode(eoa).inner // возвращает ложно import { log } from '@graphprotocol/graph-ts' ``` -API `log` позволяет субграфам записывать информацию в стандартный вывод Graph Node, а также в Graph Explorer. Сообщения могут быть зарегистрированы с использованием различных уровней ведения лога. Для составления сообщений лога из аргумента предусмотрен синтаксис строки базового формата. +The `log` API allows Subgraphs to log information to the Graph Node standard output as well as Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. API `log` включает в себя следующие функции: @@ -590,7 +592,7 @@ API `log` включает в себя следующие функции: - `log.info (fmt: string, args: Array): void` - регистрирует информационное сообщение. - `log.warning(fmt: string, args: Array): void` - регистрирует предупреждение. - `log.error(fmt: string, args: Array): void` - регистрирует сообщение об ошибке. -- `log.critical(fmt: string, args: Array): void` – регистрирует критическое сообщение и завершает работу субграфа. +- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the Subgraph. API `log` принимает строку формата и массив строковых значений. Затем он заменяет заполнители строковыми значениями из массива. Первый `{}` заполнитель заменяется первым значением в массиве, второй `{}` заполнитель заменяется вторым значением и так далее. @@ -695,8 +697,8 @@ let data = ipfs.cat(path) import { JSONValue, Value } from '@graphprotocol/graph-ts' export function processItem(value: JSONValue, userData: Value): void { - // Смотрите документацию по JsonValue для получения подробной информации о работе - // со значениями JSON +// Смотрите документацию по JsonValue для получения подробной информации о работе +// со значениями JSON let obj = value.toObject() let id = obj.get('id') let title = obj.get('title') @@ -705,7 +707,7 @@ export function processItem(value: JSONValue, userData: Value): void { return } - // Обратные вызовы также могут создавать объекты +// Обратные вызовы также могут создавать объекты let newItem = new Item(id) newItem.title = title.toString() newitem.parent = userData.toString() // Установите для родителя значение "parentId" @@ -721,7 +723,7 @@ ipfs.mapJSON('Qm...', 'processItem', Value.fromString('parentId')) Единственным поддерживаемым в настоящее время флагом является `json`, который должен быть передан в `ipfs.map`. С флагом `json` файл IPFS должен состоять из серии значений JSON, по одному значению в строке. Вызов `ipfs.map` прочитает каждую строку в файле, десериализует ее в `JSONValue` и совершит обратный вызов для каждой из них. Затем обратный вызов может использовать операции с объектами для хранения данных из `JSONValue`. Изменения объекта сохраняются только после успешного завершения обработчика, вызвавшего `ipfs.map`; в то же время они хранятся в памяти, и поэтому размер файла, который может обработать `ipfs.map`, ограничен. -При успешном завершении `ipfs.map` возвращает `void`. Если какое-либо совершение обратного вызова приводит к ошибке, обработчик, вызвавший `ipfs.map`, прерывается, а субграф помечается как давший сбой. +On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the Subgraph is marked as failed. ### Crypto API @@ -770,44 +772,44 @@ if (value.kind == JSONValueKind.BOOL) { ### Справка по преобразованию типов -| Источник(и) | Место назначения | Функция преобразования | -| -------------------- | -------------------- | ----------------------------- | -| Address | Bytes | отсутствует | -| Address | String | s.toHexString() | -| BigDecimal | String | s.toString() | -| BigInt | BigDecimal | s.toBigDecimal() | -| BigInt | String (hexadecimal) | s.toHexString() или s.toHex() | -| BigInt | String (unicode) | s.toString() | -| BigInt | i32 | s.toI32() | -| Boolean | Boolean | отсутствует | -| Bytes (signed) | BigInt | BigInt.fromSignedBytes(s) | -| Bytes (unsigned) | BigInt | BigInt.fromUnsignedBytes(s) | -| Bytes | String (hexadecimal) | s.toHexString() или s.toHex() | -| Bytes | String (unicode) | s.toString() | -| Bytes | String (base58) | s.toBase58() | -| Bytes | i32 | s.toI32() | -| Bytes | u32 | s.toU32() | -| Bytes | JSON | json.fromBytes(s) | -| int8 | i32 | отсутствует | -| int32 | i32 | отсутствует | -| int32 | BigInt | BigInt.fromI32(s) | -| uint24 | i32 | отсутствует | -| int64 - int256 | BigInt | отсутствует | -| uint32 - uint256 | BigInt | отсутствует | -| JSON | boolean | s.toBool() | -| JSON | i64 | s.toU64() | -| JSON | u64 | s.toU64() | -| JSON | f64 | s.toF64() | -| JSON | BigInt | s.toBigInt() | -| JSON | string | s.toString() | -| JSON | Array | s.toArray() | -| JSON | Object | s.toObject() | -| String | Address | Address.fromString(s) | -| Bytes | Address | Address.fromBytes(s) | -| String | BigInt | BigInt.fromString(s) | -| String | BigDecimal | BigDecimal.fromString(s) | -| String (hexadecimal) | Bytes | ByteArray.fromHexString(s) | -| String (UTF-8) | Bytes | ByteArray.fromUTF8(s) | +| Источник(и) | Место назначения | Функция преобразования | +| ---------------------- | ------------------------- | ----------------------------------- | +| Address | Bytes | отсутствует | +| Address | String | s.toHexString() | +| BigDecimal | String | s.toString() | +| BigInt | BigDecimal | s.toBigDecimal() | +| BigInt | String (hexadecimal) | s.toHexString() или s.toHex() | +| BigInt | String (unicode) | s.toString() | +| BigInt | i32 | s.toI32() | +| Boolean | Boolean | отсутствует | +| Bytes (signed) | BigInt | BigInt.fromSignedBytes(s) | +| Bytes (unsigned) | BigInt | BigInt.fromUnsignedBytes(s) | +| Bytes | String (hexadecimal) | s.toHexString() или s.toHex() | +| Bytes | String (unicode) | s.toString() | +| Bytes | String (base58) | s.toBase58() | +| Bytes | i32 | s.toI32() | +| Bytes | u32 | s.toU32() | +| Bytes | JSON | json.fromBytes(s) | +| int8 | i32 | отсутствует | +| int32 | i32 | отсутствует | +| int32 | BigInt | BigInt.fromI32(s) | +| uint24 | i32 | отсутствует | +| int64 - int256 | BigInt | отсутствует | +| uint32 - uint256 | BigInt | отсутствует | +| JSON | boolean | s.toBool() | +| JSON | i64 | s.toU64() | +| JSON | u64 | s.toU64() | +| JSON | f64 | s.toF64() | +| JSON | BigInt | s.toBigInt() | +| JSON | string | s.toString() | +| JSON | Array | s.toArray() | +| JSON | Object | s.toObject() | +| String | Address | Address.fromString(s) | +| Bytes | Address | Address.fromBytes(s) | +| String | BigInt | BigInt.fromString(s) | +| String | BigDecimal | BigDecimal.fromString(s) | +| String (hexadecimal) | Bytes | ByteArray.fromHexString(s) | +| String (UTF-8) | Bytes | ByteArray.fromUTF8(s) | ### Метаданные источника данных @@ -836,7 +838,7 @@ if (value.kind == JSONValueKind.BOOL) { ### DataSourceContext в манифесте -Раздел `context` в `dataSources` позволяет Вам определять пары ключ-значение, которые доступны в Ваших мэппингах субграфа. Доступные типы: `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List` и `BigInt`. +The `context` section within `dataSources` allows you to define key-value pairs that are accessible within your Subgraph mappings. The available types are `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Ниже приведен пример YAML, иллюстрирующий использование различных типов в разделе `context`: @@ -887,4 +889,4 @@ dataSources: - `List`: Определяет список элементов. Для каждого элемента необходимо указать его тип и данные. - `BigInt`: Определяет большое целочисленное значение. Необходимо заключить в кавычки из-за большого размера. -Затем этот контекст становится доступным в Ваших мэппинговых файлах субграфов, что позволяет сделать субграфы более динамичными и настраиваемыми. +This context is then accessible in your Subgraph mapping files, enabling more dynamic and configurable Subgraphs. From 2afd120c86a9a46759885b70984fb0885e8002b9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:10:49 -0500 Subject: [PATCH 0153/1789] New translations api.mdx (Swedish) --- .../developing/creating/graph-ts/api.mdx | 198 +++++++++--------- 1 file changed, 104 insertions(+), 94 deletions(-) diff --git a/website/src/pages/sv/subgraphs/developing/creating/graph-ts/api.mdx b/website/src/pages/sv/subgraphs/developing/creating/graph-ts/api.mdx index dd9fb343dd68..d001175da07e 100644 --- a/website/src/pages/sv/subgraphs/developing/creating/graph-ts/api.mdx +++ b/website/src/pages/sv/subgraphs/developing/creating/graph-ts/api.mdx @@ -2,12 +2,12 @@ title: API för AssemblyScript --- -> Note: If you created a subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, then you're using an older version of AssemblyScript. It is recommended to review the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/). +> Note: If you created a Subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, then you're using an older version of AssemblyScript. It is recommended to review the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/). -Learn what built-in APIs can be used when writing subgraph mappings. There are two kinds of APIs available out of the box: +Learn what built-in APIs can be used when writing Subgraph mappings. There are two kinds of APIs available out of the box: - The [Graph TypeScript library](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts) (`graph-ts`) -- Code generated from subgraph files by `graph codegen` +- Code generated from Subgraph files by `graph codegen` You can also add other libraries as dependencies, as long as they are compatible with [AssemblyScript](https://github.com/AssemblyScript/assemblyscript). @@ -27,18 +27,18 @@ The `@graphprotocol/graph-ts` library provides the following APIs: ### Versioner -The `apiVersion` in the subgraph manifest specifies the mapping API version which is run by Graph Node for a given subgraph. +The `apiVersion` in the Subgraph manifest specifies the mapping API version which is run by Graph Node for a given Subgraph. -| Version | Versionsanteckningar | -| :-: | --- | -| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | -| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | -| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
Added `receipt` field to the Ethereum Event object | -| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
Added `baseFeePerGas` to the Ethereum Block object | -| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/))
`ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | -| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | -| 0.0.3 | Added `from` field to the Ethereum Call object
`ethereum.call.address` renamed to `ethereum.call.to` | -| 0.0.2 | Added `input` field to the Ethereum Transaction object | +| Version | Versionsanteckningar | +| :-----: | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | +| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | +| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
Added `receipt` field to the Ethereum Event object | +| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
Added `baseFeePerGas` to the Ethereum Block object | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/))
`ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | +| 0.0.3 | Added `from` field to the Ethereum Call object
`ethereum.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Added `input` field to the Ethereum Transaction object | ### Inbyggda typer @@ -163,7 +163,7 @@ _Math_ #### TypedMap ```typescript -import { TypedMap } from '@graphprotocol/graph-ts' +import { TypedMap } from "@graphprotocol/graph-ts"; ``` `TypedMap` can be used to store key-value pairs. See [this example](https://github.com/graphprotocol/aragon-subgraph/blob/29dd38680c5e5104d9fdc2f90e740298c67e4a31/individual-dao-subgraph/mappings/constants.ts#L51). @@ -179,7 +179,7 @@ The `TypedMap` class has the following API: #### Bytes ```typescript -import { Bytes } from '@graphprotocol/graph-ts' +import { Bytes } from "@graphprotocol/graph-ts"; ``` `Bytes` is used to represent arbitrary-length arrays of bytes. This includes Ethereum values of type `bytes`, `bytes32`, etc. @@ -205,7 +205,7 @@ _Operators_ #### Address ```typescript -import { Address } from '@graphprotocol/graph-ts' +import { Address } from "@graphprotocol/graph-ts"; ``` `Address` extends `Bytes` to represent Ethereum `address` values. @@ -218,12 +218,12 @@ It adds the following method on top of the `Bytes` API: ### Store API ```typescript -import { store } from '@graphprotocol/graph-ts' +import { store } from "@graphprotocol/graph-ts"; ``` The `store` API allows to load, save and remove entities from and to the Graph Node store. -Entities written to the store map one-to-one to the `@entity` types defined in the subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. +Entities written to the store map one-to-one to the `@entity` types defined in the Subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. #### Skapa entiteter @@ -231,24 +231,24 @@ Följande är ett vanligt mönster för att skapa entiteter från Ethereum-händ ```typescript // Importera händelseklassen Transfer som genererats från ERC20 ABI -import { Transfer as TransferEvent } from '../generated/ERC20/ERC20' +import { Transfer as TransferEvent } from "../generated/ERC20/ERC20"; // Importera entitetstypen Transfer som genererats från GraphQL-schemat -import { Transfer } from '../generated/schema' +import { Transfer } from "../generated/schema"; // Händelsehanterare för överföring export function handleTransfer(event: TransferEvent): void { // Skapa en Transfer-entitet, med transaktionshash som enhets-ID - let id = event.transaction.hash - let transfer = new Transfer(id) + let id = event.transaction.hash; + let transfer = new Transfer(id); // Ange egenskaper för entiteten med hjälp av händelseparametrarna - transfer.from = event.params.from - transfer.to = event.params.to - transfer.amount = event.params.amount + transfer.from = event.params.from; + transfer.to = event.params.to; + transfer.amount = event.params.amount; // Spara entiteten till lagret - transfer.save() + transfer.save(); } ``` @@ -263,10 +263,10 @@ Each entity must have a unique ID to avoid collisions with other entities. It is Om en entitet redan finns kan den laddas från lagret med följande: ```typescript -let id = event.transaction.hash // eller hur ID konstrueras -let transfer = Transfer.load(id) +let id = event.transaction.hash; // eller hur ID konstrueras +let transfer = Transfer.load(id); if (transfer == null) { - transfer = new Transfer(id) + transfer = new Transfer(id); } // Använd överföringsenheten som tidigare @@ -282,14 +282,14 @@ As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 and `@graphprotoco The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a transaction from some onchain event, and a later handler wants to access this transaction if it exists. -- In the case where the transaction does not exist, the subgraph will have to go to the database simply to find out that the entity does not exist. If the subgraph author already knows that the entity must have been created in the same block, using `loadInBlock` avoids this database roundtrip. -- For some subgraphs, these missed lookups can contribute significantly to the indexing time. +- In the case where the transaction does not exist, the Subgraph will have to go to the database simply to find out that the entity does not exist. If the Subgraph author already knows that the entity must have been created in the same block, using `loadInBlock` avoids this database roundtrip. +- For some Subgraphs, these missed lookups can contribute significantly to the indexing time. ```typescript -let id = event.transaction.hash // eller hur ID konstrueras -let transfer = Transfer.loadInBlock(id) +let id = event.transaction.hash; // eller hur ID konstrueras +let transfer = Transfer.loadInBlock(id); if (transfer == null) { - transfer = new Transfer(id) + transfer = new Transfer(id); } // Använd överföringsenheten som tidigare @@ -343,7 +343,7 @@ transfer.amount = ... Det är också möjligt att avaktivera egenskaper med en av följande två instruktioner: ```typescript -transfer.from.unset() +transfer.from.unset(); transfer.from = null ``` @@ -353,14 +353,14 @@ Updating array properties is a little more involved, as the getting an array fro ```typescript // Detta kommer inte att fungera -entity.numbers.push(BigInt.fromI32(1)) -entity.save() +entity.numbers.push(BigInt.fromI32(1)); +entity.save(); // Detta kommer att fungera -let numbers = entity.numbers -numbers.push(BigInt.fromI32(1)) -entity.numbers = numbers -entity.save() +let numbers = entity.numbers; +numbers.push(BigInt.fromI32(1)); +entity.numbers = numbers; +entity.save(); ``` #### Ta bort entiteter från lagret @@ -380,11 +380,11 @@ Ethereum API ger tillgång till smarta kontrakt, offentliga tillståndsvariabler #### Stöd för Ethereum-typer -As with entities, `graph codegen` generates classes for all smart contracts and events used in a subgraph. For this, the contract ABIs need to be part of the data source in the subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. +As with entities, `graph codegen` generates classes for all smart contracts and events used in a Subgraph. For this, the contract ABIs need to be part of the data source in the Subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. -With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that subgraph authors do not have to worry about them. +With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that Subgraph authors do not have to worry about them. -Följande exempel illustrerar detta. Med en subgraph-schema som +The following example illustrates this. Given a Subgraph schema like ```graphql type Transfer @entity { @@ -398,12 +398,12 @@ type Transfer @entity { and a `Transfer(address,address,uint256)` event signature on Ethereum, the `from`, `to` and `amount` values of type `address`, `address` and `uint256` are converted to `Address` and `BigInt`, allowing them to be passed on to the `Bytes!` and `BigInt!` properties of the `Transfer` entity: ```typescript -let id = event.transaction.hash -let transfer = new Transfer(id) -transfer.from = event.params.from -transfer.to = event.params.to -transfer.amount = event.params.amount -transfer.save() +let id = event.transaction.hash; +let transfer = new Transfer(id); +transfer.from = event.params.from; +transfer.to = event.params.to; +transfer.amount = event.params.amount; +transfer.save(); ``` #### Händelser och Block/Transaktionsdata @@ -483,22 +483,25 @@ class Log { #### Åtkomst till Smart Contract-tillstånd -The code generated by `graph codegen` also includes classes for the smart contracts used in the subgraph. These can be used to access public state variables and call functions of the contract at the current block. +The code generated by `graph codegen` also includes classes for the smart contracts used in the Subgraph. These can be used to access public state variables and call functions of the contract at the current block. En vanlig mönster är att komma åt kontraktet från vilket en händelse härstammar. Detta uppnås med följande kod: ```typescript // Importera den genererade kontraktsklassen och den genererade klassen för överföringshändelser -import { ERC20Contract, Transfer as TransferEvent } from '../generated/ERC20Contract/ERC20Contract' +import { + ERC20Contract, + Transfer as TransferEvent, +} from "../generated/ERC20Contract/ERC20Contract"; // Importera den genererade entitetsklassen -import { Transfer } from '../generated/schema' +import { Transfer } from "../generated/schema"; export function handleTransfer(event: TransferEvent) { // Bind kontraktet till den adress som skickade händelsen - let contract = ERC20Contract.bind(event.address) + let contract = ERC20Contract.bind(event.address); // Åtkomst till tillståndsvariabler och funktioner genom att anropa dem - let erc20Symbol = contract.symbol() + let erc20Symbol = contract.symbol(); } ``` @@ -506,7 +509,7 @@ export function handleTransfer(event: TransferEvent) { As long as the `ERC20Contract` on Ethereum has a public read-only function called `symbol`, it can be called with `.symbol()`. For public state variables a method with the same name is created automatically. -Andra kontrakt som är en del av subgraphen kan importeras från den genererade koden och bindas till en giltig adress. +Any other contract that is part of the Subgraph can be imported from the generated code and can be bound to a valid address. #### Hantering av återkallade anrop @@ -515,12 +518,12 @@ If the read-only methods of your contract may revert, then you should handle tha - For example, the Gravity contract exposes the `gravatarToOwner` method. This code would be able to handle a revert in that method: ```typescript -let gravitera = gravitera.bind(event.address) -let callResult = gravitera_gravatarToOwner(gravatar) +let gravitera = gravitera.bind(event.address); +let callResult = gravitera_gravatarToOwner(gravatar); if (callResult.reverted) { - log.info('getGravatar reverted', []) + log.info("getGravatar reverted", []); } else { - let owner = callResult.value + let owner = callResult.value; } ``` @@ -579,10 +582,10 @@ let isContract = ethereum.hasCode(eoa).inner // returns false ### API för loggning ```typescript -import { log } from '@graphprotocol/graph-ts' +import { log } from "@graphprotocol/graph-ts"; ``` -The `log` API allows subgraphs to log information to the Graph Node standard output as well as Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. +The `log` API allows Subgraphs to log information to the Graph Node standard output as well as Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. The `log` API includes the following functions: @@ -590,12 +593,16 @@ The `log` API includes the following functions: - `log.info(fmt: string, args: Array): void` - logs an informational message. - `log.warning(fmt: string, args: Array): void` - logs a warning. - `log.error(fmt: string, args: Array): void` - logs an error message. -- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the subgraph. +- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the Subgraph. The `log` API takes a format string and an array of string values. It then replaces placeholders with the string values from the array. The first `{}` placeholder gets replaced by the first value in the array, the second `{}` placeholder gets replaced by the second value and so on. ```typescript -log.info('Message to be displayed: {}, {}, {}', [value.toString(), anotherValue.toString(), 'already a string']) +log.info("Message to be displayed: {}, {}, {}", [ + value.toString(), + anotherValue.toString(), + "already a string", +]); ``` #### Loggning av ett eller flera värden @@ -618,11 +625,11 @@ export function handleSomeEvent(event: SomeEvent): void { I exemplet nedan loggas endast det första värdet i argument arrayen, trots att arrayen innehåller tre värden. ```typescript -let myArray = ['A', 'B', 'C'] +let myArray = ["A", "B", "C"]; export function handleSomeEvent(event: SomeEvent): void { // Visar : "Mitt värde är: A" (Även om tre värden skickas till `log.info`) - log.info('Mitt värde är: {}', myArray) + log.info("Mitt värde är: {}", myArray); } ``` @@ -631,11 +638,14 @@ export function handleSomeEvent(event: SomeEvent): void { Each entry in the arguments array requires its own placeholder `{}` in the log message string. The below example contains three placeholders `{}` in the log message. Because of this, all three values in `myArray` are logged. ```typescript -let myArray = ['A', 'B', 'C'] +let myArray = ["A", "B", "C"]; export function handleSomeEvent(event: SomeEvent): void { // Visar: "Mitt första värde är: A, andra värdet är: B, tredje värdet är: C" - log.info('My first value is: {}, second value is: {}, third value is: {}', myArray) + log.info( + "My first value is: {}, second value is: {}, third value is: {}", + myArray + ); } ``` @@ -646,7 +656,7 @@ För att visa ett specifikt värde i arrayen måste det indexeras och tillhandah ```typescript export function handleSomeEvent(event: SomeEvent): void { // Visar : "Mitt tredje värde är C" - log.info('My third value is: {}', [myArray[2]]) + log.info("My third value is: {}", [myArray[2]]); } ``` @@ -655,21 +665,21 @@ export function handleSomeEvent(event: SomeEvent): void { I exemplet nedan loggas blocknummer, blockhash och transaktionshash från en händelse: ```typescript -import { log } from '@graphprotocol/graph-ts' +import { log } from "@graphprotocol/graph-ts"; export function handleSomeEvent(event: SomeEvent): void { - log.debug('Block number: {}, block hash: {}, transaction hash: {}', [ + log.debug("Block number: {}, block hash: {}, transaction hash: {}", [ event.block.number.toString(), // "47596000" event.block.hash.toHexString(), // "0x..." event.transaction.hash.toHexString(), // "0x..." - ]) + ]); } ``` ### IPFS API ```typescript -import { ipfs } from '@graphprotocol/graph-ts' +import { ipfs } from "@graphprotocol/graph-ts" ``` Smart contracts occasionally anchor IPFS files onchain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. @@ -678,13 +688,13 @@ För att läsa en fil från IPFS med en given IPFS-hash eller sökväg görs fö ```typescript // Placera detta i en händelsehanterare i mappningen -let hash = 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D' -let data = ipfs.cat(hash) +let hash = "QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D"; +let data = ipfs.cat(hash); // Sökvägar som `QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D/Makefile` // som inkluderar filer i kataloger stöds också -let path = 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D/Makefile' -let data = ipfs.cat(path) +let path = "QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D/Makefile"; +let data = ipfs.cat(path); ``` **Note:** `ipfs.cat` is not deterministic at the moment. If the file cannot be retrieved over the IPFS network before the request times out, it will return `null`. Due to this, it's always worth checking the result for `null`. @@ -692,41 +702,41 @@ let data = ipfs.cat(path) It is also possible to process larger files in a streaming fashion with `ipfs.map`. The function expects the hash or path for an IPFS file, the name of a callback, and flags to modify its behavior: ```typescript -import { JSONValue, Value } from '@graphprotocol/graph-ts' +import { JSONValue, Value } from "@graphprotocol/graph-ts"; export function processItem(value: JSONValue, userData: Value): void { // Se JSONValue-dokumentationen för mer information om hur man hanterar // med JSON-värden - let obj = value.toObject() - let id = obj.get('id') - let title = obj.get('title') + let obj = value.toObject(); + let id = obj.get("id"); + let title = obj.get("title"); if (!id || !title) { - return + return; } // Callbacks kan också skapa enheter - let newItem = new Item(id) - newItem.title = title.toString() - newitem.parent = userData.toString() // Ange parent till "parentId" - newitem.save() + let newItem = new Item(id); + newItem.title = title.toString(); + newitem.parent = userData.toString(); // Ange parent till "parentId" + newitem.save(); } // Placera detta i en händelsehanterare i mappningen -ipfs.map('Qm...', 'processItem', Value.fromString('parentId'), ['json']) +ipfs.map("Qm...", "processItem", Value.fromString("parentId"), ["json"]); // Alternativt kan du använda `ipfs.mapJSON`. -ipfs.mapJSON('Qm...', 'processItem', Value.fromString('parentId')) +ipfs.mapJSON("Qm...", "processItem", Value.fromString("parentId")); ``` The only flag currently supported is `json`, which must be passed to `ipfs.map`. With the `json` flag, the IPFS file must consist of a series of JSON values, one value per line. The call to `ipfs.map` will read each line in the file, deserialize it into a `JSONValue` and call the callback for each of them. The callback can then use entity operations to store data from the `JSONValue`. Entity changes are stored only when the handler that called `ipfs.map` finishes successfully; in the meantime, they are kept in memory, and the size of the file that `ipfs.map` can process is therefore limited. -On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the subgraph is marked as failed. +On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the Subgraph is marked as failed. ### Crypto API ```typescript -import { crypto } from '@graphprotocol/graph-ts' +import { crypto } from "@graphprotocol/graph-ts"; ``` The `crypto` API makes a cryptographic functions available for use in mappings. Right now, there is only one: @@ -736,7 +746,7 @@ The `crypto` API makes a cryptographic functions available for use in mappings. ### JSON API ```typescript -import { json, JSONValueKind } from '@graphprotocol/graph-ts' +import { json, JSONValueKind } from "@graphprotocol/graph-ts" ``` JSON data can be parsed using the `json` API: @@ -836,7 +846,7 @@ The base `Entity` class and the child `DataSourceContext` class have helpers to ### DataSourceContext in Manifest -The `context` section within `dataSources` allows you to define key-value pairs that are accessible within your subgraph mappings. The available types are `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. +The `context` section within `dataSources` allows you to define key-value pairs that are accessible within your Subgraph mappings. The available types are `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Here is a YAML example illustrating the usage of various types in the `context` section: @@ -887,4 +897,4 @@ dataSources: - `List`: Specifies a list of items. Each item needs to specify its type and data. - `BigInt`: Specifies a large integer value. Must be quoted due to its large size. -This context is then accessible in your subgraph mapping files, enabling more dynamic and configurable subgraphs. +This context is then accessible in your Subgraph mapping files, enabling more dynamic and configurable Subgraphs. From 288d2159a6af9abb4dee979a4e5eeba723f51b26 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:10:50 -0500 Subject: [PATCH 0154/1789] New translations api.mdx (Turkish) --- .../developing/creating/graph-ts/api.mdx | 134 +++++++++--------- 1 file changed, 67 insertions(+), 67 deletions(-) diff --git a/website/src/pages/tr/subgraphs/developing/creating/graph-ts/api.mdx b/website/src/pages/tr/subgraphs/developing/creating/graph-ts/api.mdx index e90c754f6c34..93dfb2d4060c 100644 --- a/website/src/pages/tr/subgraphs/developing/creating/graph-ts/api.mdx +++ b/website/src/pages/tr/subgraphs/developing/creating/graph-ts/api.mdx @@ -2,12 +2,12 @@ title: AssemblyScript API'si --- -> Note: If you created a subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, then you're using an older version of AssemblyScript. It is recommended to review the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/). +> Note: If you created a Subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, then you're using an older version of AssemblyScript. It is recommended to review the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/). -Subgraph eşlemeleri yazarken kullanılabilecek yerleşik API'leri öğrenin. Hazır olarak sunulan iki tür API mevcuttur: +Learn what built-in APIs can be used when writing Subgraph mappings. There are two kinds of APIs available out of the box: - [Graph TypeScript kütüphanesi](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts) (`graph-ts`) -- Subgraph dosyalarından `graph codegen` tarafından üretilen kod +- Code generated from Subgraph files by `graph codegen` [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) ile uyumlu olduğu sürece diğer kütüphaneleri de bağımlılık olarak ekleyebilirsiniz. @@ -27,18 +27,18 @@ Dil eşlemeleri AssemblyScript ile yazıldığından, [AssemblyScript wiki'sinde ### Sürümler -Subgraph manifestosundaki `apiVersion`, bir subgraph için Graph Düğümü tarafından çalıştırılan eşleme (mapping) API'sinin sürümünü belirtir. +The `apiVersion` in the Subgraph manifest specifies the mapping API version which is run by Graph Node for a given Subgraph. -| Sürüm | Sürüm Notları | -| :-: | --- | -| 0.0.9 | Yeni host fonksiyonları ekler: [`eth_get_balance`](#balance-of-an-address) ve [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | -| 0.0.8 | Bir varlığı kaydederken şemadaki alanların varlığını doğrulama mekanizması ekler. | -| 0.0.7 | Ethereum türlerine `TransactionReceipt` ve `Log` sınıfları eklendi
Ethereum Event nesnesine `receipt` alanı eklendi | -| 0.0.6 | Ethereum Transaction nesnesine `nonce` alanı eklendi
Ethereum Block nesnesine `baseFeePerGas` eklendi | +| Sürüm | Sürüm Notları | +| :---: | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 0.0.9 | Yeni host fonksiyonları ekler: [`eth_get_balance`](#balance-of-an-address) ve [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | +| 0.0.8 | Bir varlığı kaydederken şemadaki alanların varlığını doğrulama mekanizması ekler. | +| 0.0.7 | Ethereum türlerine `TransactionReceipt` ve `Log` sınıfları eklendi
Ethereum Event nesnesine `receipt` alanı eklendi | +| 0.0.6 | Ethereum Transaction nesnesine `nonce` alanı eklendi
Ethereum Block nesnesine `baseFeePerGas` eklendi | | 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/))
`ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | -| 0.0.4 | Ethereum SmartContractCall nesnesine `functionSignature` alanı eklendi | -| 0.0.3 | Added `from` field to the Ethereum Call object
`ethereum.call.address` renamed to `ethereum.call.to` | -| 0.0.2 | Ethereum Transaction nesnesine `input` alanı eklendi | +| 0.0.4 | Ethereum SmartContractCall nesnesine `functionSignature` alanı eklendi | +| 0.0.3 | Added `from` field to the Ethereum Call object
`ethereum.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Ethereum Transaction nesnesine `input` alanı eklendi | ### Dahili Türler @@ -223,7 +223,7 @@ import { store } from '@graphprotocol/graph-ts' `store` API'si, varlıkları Graph Düğümü deposundan yüklemeye, depoya kaydetmeye ve depodan kaldırmaya olanak tanır. -Depoya yazılan varlıklar, subgraph'in GraphQL şemasında tanımlanan `@entity` türleriyle bire bir eşleşir. Bu varlıklarla çalışmayı kolaylaştırmak için [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) tarafından sağlanan `graph codegen` komutu varlık sınıfları oluşturur. Varlık sınıfları, şemadaki alanlar için özellik alıcıları ve ayarlayıcılarının yanı sıra bu varlıkları yüklemek ve kaydetmek için metotlar içeren, yerleşik `Entity` türünün alt sınıflarıdır. +Entities written to the store map one-to-one to the `@entity` types defined in the Subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. #### Unsurların Oluşturulması @@ -241,15 +241,15 @@ export function handleTransfer(event: TransferEvent): void { // İşlem hash'ını olay kimliği olarak kullanarak bir Transfer varlığı oluşturun let id = event.transaction.hash let transfer = new Transfer(id) - + // Olay parametrelerini kullanarak varlığın özelliklerini ayarlayın transfer.from = event.params.from transfer.to = event.params.to transfer.amount = event.params.amount - + // Varlığı depoya kaydedin transfer.save() -} + } ``` Zincir işlenirken bir `Transfer` olayıyla karşılaşıldığında, oluşturulan `Transfer` türü (burada varlık türüyle adlandırma çakışmasını önlemek için `TransferEvent` olarak adlandırılmıştır) kullanılarak `handleTransfer` olay işleyicisine aktarılır. Bu tür, olayın ana işlemi ve parametreleri gibi verilere erişim sağlar. @@ -282,8 +282,8 @@ Varlık henüz depoda mevcut olmayabileceğinden, `load` yöntemi `Transfer | nu The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a transaction from some onchain event, and a later handler wants to access this transaction if it exists. -- Eğer işlem mevcut değilse subgraph sırf varlığın mevcut olmadığını öğrenmek için veritabanına başvurmak zorunda kalacaktır. Ancak, subgraph yazarı varlığın aynı blokta oluşturulmuş olması gerektiğini zaten biliyorsa, `loadInBlock` kullanmak bu veritabanı sorgusunu ortadan kaldırır. -- Bazı subgraph'lerde bu başarısız aramalar endeksleme süresine önemli ölçüde etki edebilir. +- In the case where the transaction does not exist, the Subgraph will have to go to the database simply to find out that the entity does not exist. If the Subgraph author already knows that the entity must have been created in the same block, using `loadInBlock` avoids this database roundtrip. +- For some Subgraphs, these missed lookups can contribute significantly to the indexing time. ```typescript let id = event.transaction.hash // veya ID nasıl oluşturulurmuşsa @@ -380,11 +380,11 @@ Ethereum API'si, akıllı sözleşmelere, genel durum değişkenlerine, sözleş #### Ethereum Türleri İçin Destek -Varlıklarda olduğu gibi `graph codegen`, bir subgraph'te kullanılan tüm akıllı sözleşmeler ve olaylar için sınıflar oluşturur. Bunun için, sözleşme ABI'lerinin subgraph manifestosundaki veri kaynağının bir parçası olması gerekir. ABI dosyaları genelde `abis/` klasöründe saklanır. +As with entities, `graph codegen` generates classes for all smart contracts and events used in a Subgraph. For this, the contract ABIs need to be part of the data source in the Subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. -Oluşturulan sınıflarla sayesinde Ethereum türleri ile [yerleşik türler](#built-in-types) arasındaki dönüşümler arka planda gerçekleşir, böylece subgraph yazarlarının bu dönüşümlerle ilgilenmesine gerek kalmaz. +With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that Subgraph authors do not have to worry about them. -Aşağıdaki örnek bunu açıklar. Aşağıdaki gibi bir subgraph şeması verildiğinde +The following example illustrates this. Given a Subgraph schema like ```graphql type Transfer @entity { @@ -483,7 +483,7 @@ class Log { #### Akıllı Sözleşme Durumuna Erişim -`graph codegen` tarafından oluşturulan kod, subgraph'te kullanılan akıllı sözleşmeler için sınıflar da içerir. Bu sınıflar, mevcut blokta sözleşmenin genel durum değişkenlerine erişmek ve sözleşme fonksiyonlarını çağırmak için kullanılabilir. +The code generated by `graph codegen` also includes classes for the smart contracts used in the Subgraph. These can be used to access public state variables and call functions of the contract at the current block. Yaygın bir model, bir olayın kaynaklandığı sözleşmeye erişmektir. Bu, aşağıdaki kodla elde edilir: @@ -506,7 +506,7 @@ Burada `Transfer`, varlık türüyle adlandırma çakışmasını önlemek için Ethereum üzerindeki `ERC20Contract` sözleşmesi `symbol` adında herkese açık ve salt okunur bir fonksiyona sahip olduğu sürece, `.symbol()` ile çağrılabilir. Genel durum değişkenleri için otomatik olarak aynı ada sahip bir metot oluşturulur. -Subgraph parçası olan diğer tüm sözleşmelerde oluşturulan koddan içe aktarılabilir ve geçerli bir adrese bağlanabilir. +Any other contract that is part of the Subgraph can be imported from the generated code and can be bound to a valid address. #### Geri Dönen Çağrıları Yönetme @@ -582,7 +582,7 @@ let isContract = ethereum.hasCode(eoa).inner // false döndürür import { log } from '@graphprotocol/graph-ts' ``` -`log` API'si, subgraph'lerin bilgileri Graph Düğümü standart çıktısına ve Graph Gezgini'ne kaydetmesine olanak tanır. Mesajlar farklı günlük seviyeleri kullanılarak kaydedilebilir. Verilen argümanlardan günlük mesajlarını oluşturmak için temel bir biçimlendirme dizesi sentaksı sunulmaktadır. +The `log` API allows Subgraphs to log information to the Graph Node standard output as well as Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. `log` API'si aşağıdaki fonksiyonları içerir: @@ -590,7 +590,7 @@ import { log } from '@graphprotocol/graph-ts' - `log.info(fmt: string, args: Array): void` – bir bilgilendirme mesajı kaydeder. - `log.warning(fmt: string, args: Array): void` – bir uyarı mesajı kaydeder. - `log.error(fmt: string, args: Array): void` – bir hata mesajı kaydeder. -- `log.critical(fmt: string, args: Array): void` – kritik bir mesaj kaydeder **ve** subgraph'i sonlandırır. +- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the Subgraph. `log` API'si bir format dizesi ve bir dize değerleri dizisini alır. Daha sonra, dizideki dize değerlerini format dizesindeki yer tutucuların yerine koyar. İlk `{}` yer tutucusu dizideki ilk değerle, ikinci `{}` yer tutucusu ikinci değerle ve bu şekilde devam ederek değiştirilir. @@ -722,7 +722,7 @@ ipfs.mapJSON('Qm...', 'processItem', Value.fromString('parentId')) Şu anda desteklenen tek bayrak `ipfs.map`'e iletilmesi gereken `json` bayrağıdır. `json` bayrağı ile IPFS dosyası, her satırda bir JSON değeri olacak şekilde bir dizi JSON değerinden oluşmalıdır. `ipfs.map` çağrısı, dosyadaki her satırı okur, bir `JSONValue` olarak ayrıştırır (deserialize eder) ve her biri için geri çağırma (callback) fonksiyonunu çağırır. Geri çağırma fonksiyonu daha sonra `JSONValue`dan gelen verileri depolamak için varlık operasyonlarını kullanabilir. Varlık değişiklikleri yalnızca `ipfs.map`'i çağıran işleyici başarıyla tamamlandığında depolanır; bu sırada değişiklikler bellekte tutulur ve bu nedenle `ipfs.map`'in işleyebileceği dosya boyutu sınırlıdır. -Başarılı olduğunda `ipfs.map`, `void` döndürür. Geri çağırma fonksiyonunun herhangi bir çağrısı bir hataya neden olursa, `ipfs.map`'i çağıran işleyici durdurulur ve subgraph başarısız olarak işaretlenir. +On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the Subgraph is marked as failed. ### Kripto(Crypto) API'si @@ -771,44 +771,44 @@ Bir değerin türü kesin olduğunda, aşağıdaki yöntemlerden biri kullanıla ### Tip Dönüşümleri Referansı -| Kaynak(lar) | Hedef | Dönüşüm fonksiyonu | -| ----------------- | ----------------- | ---------------------------- | -| Address | Bytes | yok | -| Address | String | s.toHexString() | -| BigDecimal | String | s.toString() | -| BigInt | BigDecimal | s.toBigDecimal() | -| BigInt | Dizgi (onaltılık) | s.toHexString() or s.toHex() | -| BigInt | String (unicode) | s.toString() | -| BigInt | i32 | s.toI32() | -| Boolean | Boolean | yok | -| Bytes (işaretli) | BigInt | BigInt.fromSignedBytes(s) | -| Bytes (işaretsiz) | BigInt | BigInt.fromUnsignedBytes(s) | -| Bytes | Dizgi (onaltılık) | s.toHexString() or s.toHex() | -| Bytes | String (unicode) | s.toString() | -| Bytes | String (base58) | s.toBase58() | -| Bytes | i32 | s.toI32() | -| Bytes | u32 | s.toU32() | -| Bytes | JSON | json.fromBytes(s) | -| int8 | i32 | yok | -| int32 | i32 | yok | -| int32 | BigInt | BigInt.fromI32(s) | -| uint24 | i32 | yok | -| int64 - int256 | BigInt | yok | -| uint32 - uint256 | BigInt | yok | -| JSON | boolean | s.toBool() | -| JSON | i64 | s.toI64() | -| JSON | u64 | s.toU64() | -| JSON | f64 | s.toF64() | -| JSON | BigInt | s.toBigInt() | -| JSON | string | s.toString() | -| JSON | Array | s.toArray() | -| JSON | Object | s.toObject() | -| String | Address | Address.fromString(s) | -| Bytes | Address | Address.fromBytes(s) | -| String | BigInt | BigInt.fromString(s) | -| String | BigDecimal | BigDecimal.fromString(s) | -| Dizgi (onaltılık) | Bytes | ByteArray.fromHexString(s) | -| String (UTF-8) | Bytes | ByteArray.fromUTF8(s) | +| Kaynak(lar) | Hedef | Dönüşüm fonksiyonu | +| ---------------------- | -------------------- | ---------------------------- | +| Address | Bytes | yok | +| Address | String | s.toHexString() | +| BigDecimal | String | s.toString() | +| BigInt | BigDecimal | s.toBigDecimal() | +| BigInt | Dizgi (onaltılık) | s.toHexString() or s.toHex() | +| BigInt | String (unicode) | s.toString() | +| BigInt | i32 | s.toI32() | +| Boolean | Boolean | yok | +| Bytes (işaretli) | BigInt | BigInt.fromSignedBytes(s) | +| Bytes (işaretsiz) | BigInt | BigInt.fromUnsignedBytes(s) | +| Bytes | Dizgi (onaltılık) | s.toHexString() or s.toHex() | +| Bytes | String (unicode) | s.toString() | +| Bytes | String (base58) | s.toBase58() | +| Bytes | i32 | s.toI32() | +| Bytes | u32 | s.toU32() | +| Bytes | JSON | json.fromBytes(s) | +| int8 | i32 | yok | +| int32 | i32 | yok | +| int32 | BigInt | BigInt.fromI32(s) | +| uint24 | i32 | yok | +| int64 - int256 | BigInt | yok | +| uint32 - uint256 | BigInt | yok | +| JSON | boolean | s.toBool() | +| JSON | i64 | s.toI64() | +| JSON | u64 | s.toU64() | +| JSON | f64 | s.toF64() | +| JSON | BigInt | s.toBigInt() | +| JSON | string | s.toString() | +| JSON | Array | s.toArray() | +| JSON | Object | s.toObject() | +| String | Address | Address.fromString(s) | +| Bytes | Address | Address.fromBytes(s) | +| String | BigInt | BigInt.fromString(s) | +| String | BigDecimal | BigDecimal.fromString(s) | +| Dizgi (onaltılık) | Bytes | ByteArray.fromHexString(s) | +| String (UTF-8) | Bytes | ByteArray.fromUTF8(s) | ### Veri Kaynağı Meta Verileri @@ -837,7 +837,7 @@ Temel `Entity` sınıfı ve alt sınıf olan `DataSourceContext` sınıfı, alan ### Manifest'teki DataSourceContext -`dataSources` içindeki `context` bölümü, subgraph eşlemeleriniz içinde erişilebilen anahtar-değer çiftlerini tanımlamanıza olanak tanır. Kullanılabilir türler şunlardır: `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List` ve `BigInt`. +The `context` section within `dataSources` allows you to define key-value pairs that are accessible within your Subgraph mappings. The available types are `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. İşte `context` bölümünde çeşitli türlerin kullanımını gösteren bir YAML örneği: @@ -888,4 +888,4 @@ dataSources: - `List`: Elemanlardan oluşan bir liste belirtir. Her elemanın türü ve verisi belirtilmelidir. - `BigInt`: Büyük bir tamsayı değeri belirtir. Büyük boyutu nedeniyle tırnak içinde yazılması gerekir. -Bu bağlama daha sonra subgraph eşleştirme dosyalarınızdan erişilebilir ve böylece daha dinamik ve yapılandırılabilir subgraphlar elde edebilirsiniz. +This context is then accessible in your Subgraph mapping files, enabling more dynamic and configurable Subgraphs. From 2e3bce7e4d1d95c0c251b654135bd75fc56c1d65 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:10:51 -0500 Subject: [PATCH 0155/1789] New translations api.mdx (Ukrainian) --- .../developing/creating/graph-ts/api.mdx | 54 +++++++++---------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/website/src/pages/uk/subgraphs/developing/creating/graph-ts/api.mdx b/website/src/pages/uk/subgraphs/developing/creating/graph-ts/api.mdx index 35bb04826c98..2e256ae18190 100644 --- a/website/src/pages/uk/subgraphs/developing/creating/graph-ts/api.mdx +++ b/website/src/pages/uk/subgraphs/developing/creating/graph-ts/api.mdx @@ -2,12 +2,12 @@ title: AssemblyScript API --- -> Note: If you created a subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, then you're using an older version of AssemblyScript. It is recommended to review the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/). +> Note: If you created a Subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, then you're using an older version of AssemblyScript. It is recommended to review the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/). -Learn what built-in APIs can be used when writing subgraph mappings. There are two kinds of APIs available out of the box: +Learn what built-in APIs can be used when writing Subgraph mappings. There are two kinds of APIs available out of the box: - The [Graph TypeScript library](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts) (`graph-ts`) -- Code generated from subgraph files by `graph codegen` +- Code generated from Subgraph files by `graph codegen` You can also add other libraries as dependencies, as long as they are compatible with [AssemblyScript](https://github.com/AssemblyScript/assemblyscript). @@ -27,18 +27,18 @@ The `@graphprotocol/graph-ts` library provides the following APIs: ### Versions -The `apiVersion` in the subgraph manifest specifies the mapping API version which is run by Graph Node for a given subgraph. +The `apiVersion` in the Subgraph manifest specifies the mapping API version which is run by Graph Node for a given Subgraph. -| Version | Release notes | -| :-: | --- | -| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | -| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | -| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
Added `receipt` field to the Ethereum Event object | -| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
Added `baseFeePerGas` to the Ethereum Block object | -| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/))
`ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | -| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | -| 0.0.3 | Added `from` field to the Ethereum Call object
`ethereum.call.address` renamed to `ethereum.call.to` | -| 0.0.2 | Added `input` field to the Ethereum Transaction object | +| Version | Release notes | +| :-----: | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | +| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | +| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
Added `receipt` field to the Ethereum Event object | +| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
Added `baseFeePerGas` to the Ethereum Block object | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/))
`ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | +| 0.0.3 | Added `from` field to the Ethereum Call object
`ethereum.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Added `input` field to the Ethereum Transaction object | ### Built-in Types @@ -223,7 +223,7 @@ import { store } from '@graphprotocol/graph-ts' The `store` API allows to load, save and remove entities from and to the Graph Node store. -Entities written to the store map one-to-one to the `@entity` types defined in the subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. +Entities written to the store map one-to-one to the `@entity` types defined in the Subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. #### Creating entities @@ -282,8 +282,8 @@ As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 and `@graphprotoco The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a transaction from some onchain event, and a later handler wants to access this transaction if it exists. -- In the case where the transaction does not exist, the subgraph will have to go to the database simply to find out that the entity does not exist. If the subgraph author already knows that the entity must have been created in the same block, using `loadInBlock` avoids this database roundtrip. -- For some subgraphs, these missed lookups can contribute significantly to the indexing time. +- In the case where the transaction does not exist, the Subgraph will have to go to the database simply to find out that the entity does not exist. If the Subgraph author already knows that the entity must have been created in the same block, using `loadInBlock` avoids this database roundtrip. +- For some Subgraphs, these missed lookups can contribute significantly to the indexing time. ```typescript let id = event.transaction.hash // or however the ID is constructed @@ -380,11 +380,11 @@ The Ethereum API provides access to smart contracts, public state variables, con #### Support for Ethereum Types -As with entities, `graph codegen` generates classes for all smart contracts and events used in a subgraph. For this, the contract ABIs need to be part of the data source in the subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. +As with entities, `graph codegen` generates classes for all smart contracts and events used in a Subgraph. For this, the contract ABIs need to be part of the data source in the Subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. -With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that subgraph authors do not have to worry about them. +With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that Subgraph authors do not have to worry about them. -The following example illustrates this. Given a subgraph schema like +The following example illustrates this. Given a Subgraph schema like ```graphql type Transfer @entity { @@ -483,7 +483,7 @@ class Log { #### Access to Smart Contract State -The code generated by `graph codegen` also includes classes for the smart contracts used in the subgraph. These can be used to access public state variables and call functions of the contract at the current block. +The code generated by `graph codegen` also includes classes for the smart contracts used in the Subgraph. These can be used to access public state variables and call functions of the contract at the current block. A common pattern is to access the contract from which an event originates. This is achieved with the following code: @@ -506,7 +506,7 @@ export function handleTransfer(event: TransferEvent) { As long as the `ERC20Contract` on Ethereum has a public read-only function called `symbol`, it can be called with `.symbol()`. For public state variables a method with the same name is created automatically. -Any other contract that is part of the subgraph can be imported from the generated code and can be bound to a valid address. +Any other contract that is part of the Subgraph can be imported from the generated code and can be bound to a valid address. #### Handling Reverted Calls @@ -582,7 +582,7 @@ let isContract = ethereum.hasCode(eoa).inner // returns false import { log } from '@graphprotocol/graph-ts' ``` -The `log` API allows subgraphs to log information to the Graph Node standard output as well as Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. +The `log` API allows Subgraphs to log information to the Graph Node standard output as well as Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. The `log` API includes the following functions: @@ -590,7 +590,7 @@ The `log` API includes the following functions: - `log.info(fmt: string, args: Array): void` - logs an informational message. - `log.warning(fmt: string, args: Array): void` - logs a warning. - `log.error(fmt: string, args: Array): void` - logs an error message. -- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the subgraph. +- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the Subgraph. The `log` API takes a format string and an array of string values. It then replaces placeholders with the string values from the array. The first `{}` placeholder gets replaced by the first value in the array, the second `{}` placeholder gets replaced by the second value and so on. @@ -721,7 +721,7 @@ ipfs.mapJSON('Qm...', 'processItem', Value.fromString('parentId')) The only flag currently supported is `json`, which must be passed to `ipfs.map`. With the `json` flag, the IPFS file must consist of a series of JSON values, one value per line. The call to `ipfs.map` will read each line in the file, deserialize it into a `JSONValue` and call the callback for each of them. The callback can then use entity operations to store data from the `JSONValue`. Entity changes are stored only when the handler that called `ipfs.map` finishes successfully; in the meantime, they are kept in memory, and the size of the file that `ipfs.map` can process is therefore limited. -On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the subgraph is marked as failed. +On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the Subgraph is marked as failed. ### Crypto API @@ -836,7 +836,7 @@ The base `Entity` class and the child `DataSourceContext` class have helpers to ### DataSourceContext in Manifest -The `context` section within `dataSources` allows you to define key-value pairs that are accessible within your subgraph mappings. The available types are `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. +The `context` section within `dataSources` allows you to define key-value pairs that are accessible within your Subgraph mappings. The available types are `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Here is a YAML example illustrating the usage of various types in the `context` section: @@ -887,4 +887,4 @@ dataSources: - `List`: Specifies a list of items. Each item needs to specify its type and data. - `BigInt`: Specifies a large integer value. Must be quoted due to its large size. -This context is then accessible in your subgraph mapping files, enabling more dynamic and configurable subgraphs. +This context is then accessible in your Subgraph mapping files, enabling more dynamic and configurable Subgraphs. From 1ab852039cb9beb6bd0272c11d71d2e196b2c942 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:10:52 -0500 Subject: [PATCH 0156/1789] New translations api.mdx (Chinese Simplified) --- .../developing/creating/graph-ts/api.mdx | 62 +++++++++---------- 1 file changed, 31 insertions(+), 31 deletions(-) diff --git a/website/src/pages/zh/subgraphs/developing/creating/graph-ts/api.mdx b/website/src/pages/zh/subgraphs/developing/creating/graph-ts/api.mdx index 2a35d4ba56d4..218c0002abd2 100644 --- a/website/src/pages/zh/subgraphs/developing/creating/graph-ts/api.mdx +++ b/website/src/pages/zh/subgraphs/developing/creating/graph-ts/api.mdx @@ -1,13 +1,13 @@ --- -title: AssemblyScript API +title: 汇编脚本API --- -> Note: If you created a subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, then you're using an older version of AssemblyScript. It is recommended to review the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/). +> Note: If you created a Subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, then you're using an older version of AssemblyScript. It is recommended to review the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/). -Learn what built-in APIs can be used when writing subgraph mappings. There are two kinds of APIs available out of the box: +Learn what built-in APIs can be used when writing Subgraph mappings. There are two kinds of APIs available out of the box: - The [Graph TypeScript library](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts) (`graph-ts`) -- Code generated from subgraph files by `graph codegen` +- Code generated from Subgraph files by `graph codegen` You can also add other libraries as dependencies, as long as they are compatible with [AssemblyScript](https://github.com/AssemblyScript/assemblyscript). @@ -27,18 +27,18 @@ Since language mappings are written in AssemblyScript, it is useful to review th ### 版本 -子图清单中的 `apiVersion` 指定了由 Graph Node 运行的特定子图的映射 API 版本。 +The `apiVersion` in the Subgraph manifest specifies the mapping API version which is run by Graph Node for a given Subgraph. -| 版本 | Release 说明 | -| :-: | --- | -| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | -| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | -| 0.0.7 | 添加了 `TransactionReceipt` 和 `Log` 类到以太坊类型。
已将 `receipt` 字段添加到Ethereum Event对象。 | -| 0.0.6 | 向Ethereum Transaction对象添加了 nonce 字段 向 Etherum Block对象添加
baseFeePerGas字段 | +| 版本 | Release 说明 | +| :---: | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | +| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | +| 0.0.7 | 添加了 `TransactionReceipt` 和 `Log` 类到以太坊类型。
已将 `receipt` 字段添加到Ethereum Event对象。 | +| 0.0.6 | 向Ethereum Transaction对象添加了 nonce 字段 向 Etherum Block对象添加
baseFeePerGas字段 | | 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/))
`ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | -| 0.0.4 | 已向 Ethereum SmartContractCall对象添加了 `functionSignature` 字段。 | -| 0.0.3 | Added `from` field to the Ethereum Call object
`ethereum.call.address` renamed to `ethereum.call.to` | -| 0.0.2 | 已向Ethereum Transaction对象添加了 `input` 字段。 | +| 0.0.4 | 已向 Ethereum SmartContractCall对象添加了 `functionSignature` 字段。 | +| 0.0.3 | Added `from` field to the Ethereum Call object
`ethereum.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | 已向Ethereum Transaction对象添加了 `input` 字段。 | ### 内置类型 @@ -223,7 +223,7 @@ TypedMap 类具有以下 API: `store` API 允许从和到 Graph Node 存储加载、保存和删除实体。 -Entities written to the store map one-to-one to the `@entity` types defined in the subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. +Entities written to the store map one-to-one to the `@entity` types defined in the Subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. #### 创建实体 @@ -282,8 +282,8 @@ As the entity may not exist in the store yet, the `load` method returns a value The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a transaction from some onchain event, and a later handler wants to access this transaction if it exists. -- In the case where the transaction does not exist, the subgraph will have to go to the database simply to find out that the entity does not exist. If the subgraph author already knows that the entity must have been created in the same block, using `loadInBlock` avoids this database roundtrip. -- For some subgraphs, these missed lookups can contribute significantly to the indexing time. +- In the case where the transaction does not exist, the Subgraph will have to go to the database simply to find out that the entity does not exist. If the Subgraph author already knows that the entity must have been created in the same block, using `loadInBlock` avoids this database roundtrip. +- For some Subgraphs, these missed lookups can contribute significantly to the indexing time. ```typescript let id = event.transaction.hash // or however the ID is constructed @@ -380,11 +380,11 @@ store.remove('Transfer', id) #### 对以太坊类型的支持 -与实体一样,`graph codegen` 为子图中使用的所有智能合约和事件生成类。为此,合约 ABI 需要作为子图清单中数据源的一部分。通常,ABI 文件存储在一个名为 `abis/` 的文件夹中。 +As with entities, `graph codegen` generates classes for all smart contracts and events used in a Subgraph. For this, the contract ABIs need to be part of the data source in the Subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. -通过生成的类,以太坊类型和内置类型之间的转换在幕后进行,这样子图作者就不必担心它们。 +With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that Subgraph authors do not have to worry about them. -以下示例说明了这一点。 给定一个子图模式,如 +The following example illustrates this. Given a Subgraph schema like ```graphql type Transfer @entity { @@ -482,7 +482,7 @@ class Log { #### 访问智能合约状态 -由 `graph codegen` 生成的代码还包括用于子图中使用的智能合约的类。这些类可以用于访问智能合约在当前区块上的公共状态变量和调用函数。 +The code generated by `graph codegen` also includes classes for the smart contracts used in the Subgraph. These can be used to access public state variables and call functions of the contract at the current block. 一种常见的模式是访问事件起源的合约。 这是通过以下代码实现的: @@ -505,7 +505,7 @@ export function handleTransfer(event: TransferEvent) { 只要以太坊上的 `ERC20Contract` 有一个名为 `symbol` 的公共只读函数,就可以使用 `.symbol()` 来调用它。对于公共状态变量,将自动生成一个同名的方法。 -作为子图一部分的任何其他合约都可以从生成的代码中导入,并且可以绑定到一个有效地址。 +Any other contract that is part of the Subgraph can be imported from the generated code and can be bound to a valid address. #### 处理重复调用 @@ -581,7 +581,7 @@ let isContract = ethereum.hasCode(eoa).inner // returns false 从 '@graphprotocol/graph-ts'导入 { log } ``` -The `log` API allows subgraphs to log information to the Graph Node standard output as well as Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. +The `log` API allows Subgraphs to log information to the Graph Node standard output as well as Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. `log` API 包括以下函数: @@ -589,7 +589,7 @@ The `log` API allows subgraphs to log information to the Graph Node standard out - `log.info(fmt: string, args: Array): void` - 记录信息消息。 - `log.warning(fmt: string, args: Array): void` - 记录警告消息。 - `log.error(fmt: string, args: Array): void` - 记录错误消息。 -- `log.critical(fmt: string, args: Array): void` – 记录关键消息并终止子图。 +- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the Subgraph. `log` API 接受一个格式字符串和一个字符串值数组。然后,它用数组中的字符串值替换占位符。第一个 `{}` 占位符会被数组中的第一个值替换,第二个 `{}` 占位符会被第二个值替换,依此类推。 @@ -720,9 +720,9 @@ ipfs.mapJSON('Qm...', 'processItem', Value.fromString('parentId')) 当前仅支持的标志是 `json`,必须传递给 `ipfs.map`。使用 `json` 标志时,IPFS 文件必须由一系列 JSON 值组成,每行一个值。`ipfs.map` 的调用将读取文件中的每一行,将其反序列化为 `JSONValue` 并为每个值调用回调函数。然后,回调函数可以使用实体操作将数据存储到 `JSONValue` 中。仅当调用 `ipfs.map` 的处理程序成功完成时,才会存储实体更改;在此期间,它们将保留在内存中,因此 `ipfs.map` 可以处理的文件大小受到限制。 -成功时,`ipfs.map` 返回 `void`。如果回调的任何调用导致错误,则调用 `ipfs.map` 的处理程序将被中止,并且子图被标记为失败。 +On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the Subgraph is marked as failed. -### Crypto API +### 加密API ```typescript 从'@graphprotocol/graph-ts'导入{ crypto } @@ -769,19 +769,19 @@ if (value.kind == JSONValueKind.BOOL) { ### 类型转换参考 -| 源类型 | 目标类型 | 转换函数 | +| 源类型 | 目标类型 | 转换函数 | | -------------------- | -------------------- | ---------------------------- | | Address | Bytes | none | | Address | String | s.toHexString() | | BigDecimal | String | s.toString() | | BigInt | BigDecimal | s.toBigDecimal() | -| BigInt | String (hexadecimal) | s.toHexString() 或 s.toHex() | +| BigInt | String (hexadecimal) | s.toHexString() 或 s.toHex() | | BigInt | String (unicode) | s.toString() | | BigInt | i32 | s.toI32() | | Boolean | Boolean | none | | Bytes (signed) | BigInt | BigInt.fromSignedBytes(s) | | Bytes (unsigned) | BigInt | BigInt.fromUnsignedBytes(s) | -| Bytes | String (hexadecimal) | s.toHexString() 或 s.toHex() | +| Bytes | String (hexadecimal) | s.toHexString() 或 s.toHex() | | Bytes | String (unicode) | s.toString() | | Bytes | String (base58) | s.toBase58() | | Bytes | i32 | s.toI32() | @@ -835,7 +835,7 @@ if (value.kind == JSONValueKind.BOOL) { ### 清单文件中的 DataSourceContext -`dataSources` 内的 `context` 部分允许您定义在子图映射中可访问的键值对。可用的类型包括 `Bool`、`String`、`Int`、`Int8`、`BigDecimal`、`Bytes`、`List` 和 `BigInt`。 +The `context` section within `dataSources` allows you to define key-value pairs that are accessible within your Subgraph mappings. The available types are `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. 以下是一个 YAML 示例,说明了在 context 部分使用各种类型的情况: @@ -886,4 +886,4 @@ dataSources: - `List`:指定一个项目列表。每个项目都需要指定其类型和数据。 - `BigInt`:指定一个大整数值。由于其较大,必须用引号括起来。 -该上下文可以在您的子图映射文件中访问,从而实现更加动态和可配置的子图。 +This context is then accessible in your Subgraph mapping files, enabling more dynamic and configurable Subgraphs. From 60cf162374d2ff62bda58a97403830249144e811 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:10:54 -0500 Subject: [PATCH 0157/1789] New translations api.mdx (Urdu (Pakistan)) --- .../developing/creating/graph-ts/api.mdx | 52 +++++++++---------- 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/website/src/pages/ur/subgraphs/developing/creating/graph-ts/api.mdx b/website/src/pages/ur/subgraphs/developing/creating/graph-ts/api.mdx index e8e3e92cc489..638b47728dee 100644 --- a/website/src/pages/ur/subgraphs/developing/creating/graph-ts/api.mdx +++ b/website/src/pages/ur/subgraphs/developing/creating/graph-ts/api.mdx @@ -2,12 +2,12 @@ title: اسمبلی اسکرپٹ API --- -> Note: If you created a subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, then you're using an older version of AssemblyScript. It is recommended to review the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/). +> Note: If you created a Subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, then you're using an older version of AssemblyScript. It is recommended to review the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/). -Learn what built-in APIs can be used when writing subgraph mappings. There are two kinds of APIs available out of the box: +Learn what built-in APIs can be used when writing Subgraph mappings. There are two kinds of APIs available out of the box: - The [Graph TypeScript library](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts) (`graph-ts`) -- Code generated from subgraph files by `graph codegen` +- Code generated from Subgraph files by `graph codegen` You can also add other libraries as dependencies, as long as they are compatible with [AssemblyScript](https://github.com/AssemblyScript/assemblyscript). @@ -27,18 +27,18 @@ The `@graphprotocol/graph-ts` library provides the following APIs: ### ورژنز -The `apiVersion` in the subgraph manifest specifies the mapping API version which is run by Graph Node for a given subgraph. +The `apiVersion` in the Subgraph manifest specifies the mapping API version which is run by Graph Node for a given Subgraph. -| ورزن | جاری کردہ نوٹس | -| :-: | --- | -| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | -| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | -| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
Added `receipt` field to the Ethereum Event object | -| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
Added `baseFeePerGas` to the Ethereum Block object | +| ورزن | جاری کردہ نوٹس | +| :---: | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | +| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | +| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
Added `receipt` field to the Ethereum Event object | +| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
Added `baseFeePerGas` to the Ethereum Block object | | 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/))
`ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | -| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | -| 0.0.3 | Added `from` field to the Ethereum Call object
`ethereum.call.address` renamed to `ethereum.call.to` | -| 0.0.2 | Added `input` field to the Ethereum Transaction object | +| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | +| 0.0.3 | Added `from` field to the Ethereum Call object
`ethereum.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Added `input` field to the Ethereum Transaction object | ### بلٹ ان اقسام @@ -223,7 +223,7 @@ import { store } from '@graphprotocol/graph-ts' The `store` API allows to load, save and remove entities from and to the Graph Node store. -Entities written to the store map one-to-one to the `@entity` types defined in the subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. +Entities written to the store map one-to-one to the `@entity` types defined in the Subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. #### Creating entities @@ -282,8 +282,8 @@ As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 and `@graphprotoco The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a transaction from some onchain event, and a later handler wants to access this transaction if it exists. -- In the case where the transaction does not exist, the subgraph will have to go to the database simply to find out that the entity does not exist. If the subgraph author already knows that the entity must have been created in the same block, using `loadInBlock` avoids this database roundtrip. -- For some subgraphs, these missed lookups can contribute significantly to the indexing time. +- In the case where the transaction does not exist, the Subgraph will have to go to the database simply to find out that the entity does not exist. If the Subgraph author already knows that the entity must have been created in the same block, using `loadInBlock` avoids this database roundtrip. +- For some Subgraphs, these missed lookups can contribute significantly to the indexing time. ```typescript let id = event.transaction.hash // or however the ID is constructed @@ -380,11 +380,11 @@ store.remove('Transfer', id) #### ایتھیریم کی اقسام کے لیے سپورٹ -As with entities, `graph codegen` generates classes for all smart contracts and events used in a subgraph. For this, the contract ABIs need to be part of the data source in the subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. +As with entities, `graph codegen` generates classes for all smart contracts and events used in a Subgraph. For this, the contract ABIs need to be part of the data source in the Subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. -With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that subgraph authors do not have to worry about them. +With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that Subgraph authors do not have to worry about them. -مندرجہ ذیل مثال اس کی وضاحت کرتی ہے۔ جیسا کہ سب گراف اسکیما دیا گیا +The following example illustrates this. Given a Subgraph schema like ```graphql type Transfer @entity { @@ -483,7 +483,7 @@ class Log { #### سمارٹ کنٹریکٹ اسٹیٹ تک رسائی -The code generated by `graph codegen` also includes classes for the smart contracts used in the subgraph. These can be used to access public state variables and call functions of the contract at the current block. +The code generated by `graph codegen` also includes classes for the smart contracts used in the Subgraph. These can be used to access public state variables and call functions of the contract at the current block. ایک عام نمونہ اس کنٹریکٹ تک رسائی حاصل کرنا ہے جہاں سے کوئی واقعہ شروع ہوتا ہے۔ یہ مندرجہ ذیل کوڈ کے ساتھ حاصل کیا جاتا ہے: @@ -506,7 +506,7 @@ export function handleTransfer(event: TransferEvent) { As long as the `ERC20Contract` on Ethereum has a public read-only function called `symbol`, it can be called with `.symbol()`. For public state variables a method with the same name is created automatically. -کوئی بھی دوسرا کنٹریکٹ جو سب گراف کا حصہ ہے، تیار کردہ کوڈ سے درآمد کیا جا سکتا ہے اور اسے ایک درست ایڈریس کا پابند کیا جا سکتا ہے. +Any other contract that is part of the Subgraph can be imported from the generated code and can be bound to a valid address. #### واپس آنے والی کالوں کو ہینڈل کرنا @@ -582,7 +582,7 @@ let isContract = ethereum.hasCode(eoa).inner // returns false import { log } from '@graphprotocol/graph-ts' ``` -The `log` API allows subgraphs to log information to the Graph Node standard output as well as Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. +The `log` API allows Subgraphs to log information to the Graph Node standard output as well as Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. The `log` API includes the following functions: @@ -590,7 +590,7 @@ The `log` API includes the following functions: - `log.info(fmt: string, args: Array): void` - logs an informational message. - `log.warning(fmt: string, args: Array): void` - logs a warning. - `log.error(fmt: string, args: Array): void` - logs an error message. -- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the subgraph. +- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the Subgraph. The `log` API takes a format string and an array of string values. It then replaces placeholders with the string values from the array. The first `{}` placeholder gets replaced by the first value in the array, the second `{}` placeholder gets replaced by the second value and so on. @@ -721,7 +721,7 @@ ipfs.mapJSON('Qm...', 'processItem', Value.fromString('parentId')) The only flag currently supported is `json`, which must be passed to `ipfs.map`. With the `json` flag, the IPFS file must consist of a series of JSON values, one value per line. The call to `ipfs.map` will read each line in the file, deserialize it into a `JSONValue` and call the callback for each of them. The callback can then use entity operations to store data from the `JSONValue`. Entity changes are stored only when the handler that called `ipfs.map` finishes successfully; in the meantime, they are kept in memory, and the size of the file that `ipfs.map` can process is therefore limited. -On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the subgraph is marked as failed. +On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the Subgraph is marked as failed. ### کرپٹو API @@ -836,7 +836,7 @@ The base `Entity` class and the child `DataSourceContext` class have helpers to ### DataSourceContext in Manifest -The `context` section within `dataSources` allows you to define key-value pairs that are accessible within your subgraph mappings. The available types are `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. +The `context` section within `dataSources` allows you to define key-value pairs that are accessible within your Subgraph mappings. The available types are `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Here is a YAML example illustrating the usage of various types in the `context` section: @@ -887,4 +887,4 @@ dataSources: - `List`: Specifies a list of items. Each item needs to specify its type and data. - `BigInt`: Specifies a large integer value. Must be quoted due to its large size. -This context is then accessible in your subgraph mapping files, enabling more dynamic and configurable subgraphs. +This context is then accessible in your Subgraph mapping files, enabling more dynamic and configurable Subgraphs. From 7e2cee5322f69fd6c4be94b5f5da2b425afce725 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:10:55 -0500 Subject: [PATCH 0158/1789] New translations api.mdx (Vietnamese) --- .../developing/creating/graph-ts/api.mdx | 54 +++++++++---------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/website/src/pages/vi/subgraphs/developing/creating/graph-ts/api.mdx b/website/src/pages/vi/subgraphs/developing/creating/graph-ts/api.mdx index 7fea4f954429..ae9afa6b45bc 100644 --- a/website/src/pages/vi/subgraphs/developing/creating/graph-ts/api.mdx +++ b/website/src/pages/vi/subgraphs/developing/creating/graph-ts/api.mdx @@ -2,12 +2,12 @@ title: AssemblyScript API --- -> Note: If you created a subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, then you're using an older version of AssemblyScript. It is recommended to review the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/). +> Note: If you created a Subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, then you're using an older version of AssemblyScript. It is recommended to review the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/). -Learn what built-in APIs can be used when writing subgraph mappings. There are two kinds of APIs available out of the box: +Learn what built-in APIs can be used when writing Subgraph mappings. There are two kinds of APIs available out of the box: - The [Graph TypeScript library](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts) (`graph-ts`) -- Code generated from subgraph files by `graph codegen` +- Code generated from Subgraph files by `graph codegen` You can also add other libraries as dependencies, as long as they are compatible with [AssemblyScript](https://github.com/AssemblyScript/assemblyscript). @@ -27,18 +27,18 @@ The `@graphprotocol/graph-ts` library provides the following APIs: ### Các phiên bản -The `apiVersion` in the subgraph manifest specifies the mapping API version which is run by Graph Node for a given subgraph. +The `apiVersion` in the Subgraph manifest specifies the mapping API version which is run by Graph Node for a given Subgraph. -| Phiên bản | Ghi chú phát hành | -| :-: | --- | -| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | -| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | -| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
Added `receipt` field to the Ethereum Event object | -| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
Added `baseFeePerGas` to the Ethereum Block object | -| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/))
`ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | -| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | -| 0.0.3 | Added `from` field to the Ethereum Call object
`ethereum.call.address` renamed to `ethereum.call.to` | -| 0.0.2 | Added `input` field to the Ethereum Transaction object | +| Phiên bản | Ghi chú phát hành | +| :-------: | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | +| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | +| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
Added `receipt` field to the Ethereum Event object | +| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
Added `baseFeePerGas` to the Ethereum Block object | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/))
`ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | +| 0.0.3 | Added `from` field to the Ethereum Call object
`ethereum.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Added `input` field to the Ethereum Transaction object | ### Các loại cài sẵn @@ -223,7 +223,7 @@ import { store } from '@graphprotocol/graph-ts' The `store` API allows to load, save and remove entities from and to the Graph Node store. -Entities written to the store map one-to-one to the `@entity` types defined in the subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. +Entities written to the store map one-to-one to the `@entity` types defined in the Subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. #### Tạo các thực thể @@ -282,8 +282,8 @@ As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 and `@graphprotoco The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a transaction from some onchain event, and a later handler wants to access this transaction if it exists. -- In the case where the transaction does not exist, the subgraph will have to go to the database simply to find out that the entity does not exist. If the subgraph author already knows that the entity must have been created in the same block, using `loadInBlock` avoids this database roundtrip. -- For some subgraphs, these missed lookups can contribute significantly to the indexing time. +- In the case where the transaction does not exist, the Subgraph will have to go to the database simply to find out that the entity does not exist. If the Subgraph author already knows that the entity must have been created in the same block, using `loadInBlock` avoids this database roundtrip. +- For some Subgraphs, these missed lookups can contribute significantly to the indexing time. ```typescript let id = event.transaction.hash // or however the ID is constructed @@ -380,11 +380,11 @@ The Ethereum API provides access to smart contracts, public state variables, con #### Hỗ trợ các loại Ethereum -As with entities, `graph codegen` generates classes for all smart contracts and events used in a subgraph. For this, the contract ABIs need to be part of the data source in the subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. +As with entities, `graph codegen` generates classes for all smart contracts and events used in a Subgraph. For this, the contract ABIs need to be part of the data source in the Subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. -With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that subgraph authors do not have to worry about them. +With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that Subgraph authors do not have to worry about them. -The following example illustrates this. Given a subgraph schema like +The following example illustrates this. Given a Subgraph schema like ```graphql type Transfer @entity { @@ -483,7 +483,7 @@ class Log { #### Quyền truy cập vào Trạng thái Hợp đồng Thông minh -The code generated by `graph codegen` also includes classes for the smart contracts used in the subgraph. These can be used to access public state variables and call functions of the contract at the current block. +The code generated by `graph codegen` also includes classes for the smart contracts used in the Subgraph. These can be used to access public state variables and call functions of the contract at the current block. A common pattern is to access the contract from which an event originates. This is achieved with the following code: @@ -506,7 +506,7 @@ export function handleTransfer(event: TransferEvent) { As long as the `ERC20Contract` on Ethereum has a public read-only function called `symbol`, it can be called with `.symbol()`. For public state variables a method with the same name is created automatically. -Any other contract that is part of the subgraph can be imported from the generated code and can be bound to a valid address. +Any other contract that is part of the Subgraph can be imported from the generated code and can be bound to a valid address. #### Xử lý các lệnh gọi được hoàn nguyên @@ -582,7 +582,7 @@ let isContract = ethereum.hasCode(eoa).inner // returns false import { log } from '@graphprotocol/graph-ts' ``` -The `log` API allows subgraphs to log information to the Graph Node standard output as well as Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. +The `log` API allows Subgraphs to log information to the Graph Node standard output as well as Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. The `log` API includes the following functions: @@ -590,7 +590,7 @@ The `log` API includes the following functions: - `log.info(fmt: string, args: Array): void` - logs an informational message. - `log.warning(fmt: string, args: Array): void` - logs a warning. - `log.error(fmt: string, args: Array): void` - logs an error message. -- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the subgraph. +- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the Subgraph. The `log` API takes a format string and an array of string values. It then replaces placeholders with the string values from the array. The first `{}` placeholder gets replaced by the first value in the array, the second `{}` placeholder gets replaced by the second value and so on. @@ -721,7 +721,7 @@ ipfs.mapJSON('Qm...', 'processItem', Value.fromString('parentId')) The only flag currently supported is `json`, which must be passed to `ipfs.map`. With the `json` flag, the IPFS file must consist of a series of JSON values, one value per line. The call to `ipfs.map` will read each line in the file, deserialize it into a `JSONValue` and call the callback for each of them. The callback can then use entity operations to store data from the `JSONValue`. Entity changes are stored only when the handler that called `ipfs.map` finishes successfully; in the meantime, they are kept in memory, and the size of the file that `ipfs.map` can process is therefore limited. -On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the subgraph is marked as failed. +On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the Subgraph is marked as failed. ### Crypto API @@ -836,7 +836,7 @@ The base `Entity` class and the child `DataSourceContext` class have helpers to ### DataSourceContext in Manifest -The `context` section within `dataSources` allows you to define key-value pairs that are accessible within your subgraph mappings. The available types are `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. +The `context` section within `dataSources` allows you to define key-value pairs that are accessible within your Subgraph mappings. The available types are `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Here is a YAML example illustrating the usage of various types in the `context` section: @@ -887,4 +887,4 @@ dataSources: - `List`: Specifies a list of items. Each item needs to specify its type and data. - `BigInt`: Specifies a large integer value. Must be quoted due to its large size. -This context is then accessible in your subgraph mapping files, enabling more dynamic and configurable subgraphs. +This context is then accessible in your Subgraph mapping files, enabling more dynamic and configurable Subgraphs. From 713338885b3e29f407c639ed3463b25ce3948cd9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:10:56 -0500 Subject: [PATCH 0159/1789] New translations api.mdx (Marathi) --- .../developing/creating/graph-ts/api.mdx | 54 +++++++++---------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/website/src/pages/mr/subgraphs/developing/creating/graph-ts/api.mdx b/website/src/pages/mr/subgraphs/developing/creating/graph-ts/api.mdx index a807b884e30c..c4c2e4f17471 100644 --- a/website/src/pages/mr/subgraphs/developing/creating/graph-ts/api.mdx +++ b/website/src/pages/mr/subgraphs/developing/creating/graph-ts/api.mdx @@ -2,12 +2,12 @@ title: असेंबलीस्क्रिप्ट API --- -> Note: If you created a subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, then you're using an older version of AssemblyScript. It is recommended to review the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/). +> Note: If you created a Subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, then you're using an older version of AssemblyScript. It is recommended to review the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/). -Learn what built-in APIs can be used when writing subgraph mappings. There are two kinds of APIs available out of the box: +Learn what built-in APIs can be used when writing Subgraph mappings. There are two kinds of APIs available out of the box: - The [Graph TypeScript library](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts) (`graph-ts`) -- Code generated from subgraph files by `graph codegen` +- Code generated from Subgraph files by `graph codegen` You can also add other libraries as dependencies, as long as they are compatible with [AssemblyScript](https://github.com/AssemblyScript/assemblyscript). @@ -27,18 +27,18 @@ The `@graphprotocol/graph-ts` library provides the following APIs: ### आवृत्त्या -The `apiVersion` in the subgraph manifest specifies the mapping API version which is run by Graph Node for a given subgraph. +The `apiVersion` in the Subgraph manifest specifies the mapping API version which is run by Graph Node for a given Subgraph. -| आवृत्ती | रिलीझ नोट्स | -| :-: | --- | -| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | -| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | -| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
Added `receipt` field to the Ethereum Event object | -| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
Added `baseFeePerGas` to the Ethereum Block object | -| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/))
`ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | -| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | -| 0.0.3 | Added `from` field to the Ethereum Call object
`ethereum.call.address` renamed to `ethereum.call.to` | -| 0.0.2 | Added `input` field to the Ethereum Transaction object | +| आवृत्ती | रिलीझ नोट्स | +| :-----: | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | +| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | +| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
Added `receipt` field to the Ethereum Event object | +| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
Added `baseFeePerGas` to the Ethereum Block object | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/))
`ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | +| 0.0.3 | Added `from` field to the Ethereum Call object
`ethereum.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Added `input` field to the Ethereum Transaction object | ### अंगभूत प्रकार @@ -223,7 +223,7 @@ It adds the following method on top of the `Bytes` API: The `store` API allows to load, save and remove entities from and to the Graph Node store. -Entities written to the store map one-to-one to the `@entity` types defined in the subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. +Entities written to the store map one-to-one to the `@entity` types defined in the Subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. #### अंदाज निर्मिती करणे @@ -282,8 +282,8 @@ As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 and `@graphprotoco The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a transaction from some onchain event, and a later handler wants to access this transaction if it exists. -- In the case where the transaction does not exist, the subgraph will have to go to the database simply to find out that the entity does not exist. If the subgraph author already knows that the entity must have been created in the same block, using `loadInBlock` avoids this database roundtrip. -- For some subgraphs, these missed lookups can contribute significantly to the indexing time. +- In the case where the transaction does not exist, the Subgraph will have to go to the database simply to find out that the entity does not exist. If the Subgraph author already knows that the entity must have been created in the same block, using `loadInBlock` avoids this database roundtrip. +- For some Subgraphs, these missed lookups can contribute significantly to the indexing time. ```typescript let id = event.transaction.hash // or however the ID is constructed @@ -380,11 +380,11 @@ The Ethereum API provides access to smart contracts, public state variables, con #### इथरियम प्रकारांसाठी समर्थन -As with entities, `graph codegen` generates classes for all smart contracts and events used in a subgraph. For this, the contract ABIs need to be part of the data source in the subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. +As with entities, `graph codegen` generates classes for all smart contracts and events used in a Subgraph. For this, the contract ABIs need to be part of the data source in the Subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. -With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that subgraph authors do not have to worry about them. +With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that Subgraph authors do not have to worry about them. -पुढील उदाहरण हे स्पष्ट करते. सारखी सबग्राफ स्कीमा दिली +The following example illustrates this. Given a Subgraph schema like ```graphql type Transfer @entity { @@ -483,7 +483,7 @@ class Log { #### स्मार्ट कॉन्ट्रॅक्ट स्टेटमध्ये प्रवेश -The code generated by `graph codegen` also includes classes for the smart contracts used in the subgraph. These can be used to access public state variables and call functions of the contract at the current block. +The code generated by `graph codegen` also includes classes for the smart contracts used in the Subgraph. These can be used to access public state variables and call functions of the contract at the current block. कॉन्ट्रॅक्टमध्ये प्रवेश करणे हा एक सामान्य पॅटर्न आहे ज्यातून इव्हेंटची उत्पत्ती होते. हे खालील कोडसह साध्य केले आहे: @@ -506,7 +506,7 @@ export function handleTransfer(event: TransferEvent) { As long as the `ERC20Contract` on Ethereum has a public read-only function called `symbol`, it can be called with `.symbol()`. For public state variables a method with the same name is created automatically. -सबग्राफचा भाग असलेला इतर कोणताही करार व्युत्पन्न केलेल्या कोडमधून आयात केला जाऊ शकतो आणि वैध पत्त्यावर बांधला जाऊ शकतो. +Any other contract that is part of the Subgraph can be imported from the generated code and can be bound to a valid address. #### रिव्हर्ट केलेले कॉल हाताळणे @@ -582,7 +582,7 @@ let isContract = ethereum.hasCode(eoa).inner // returns false '@graphprotocol/graph-ts' वरून { log } आयात करा ``` -The `log` API allows subgraphs to log information to the Graph Node standard output as well as Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. +The `log` API allows Subgraphs to log information to the Graph Node standard output as well as Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. The `log` API includes the following functions: @@ -590,7 +590,7 @@ The `log` API includes the following functions: - `log.info(fmt: string, args: Array): void` - logs an informational message. - `log.warning(fmt: string, args: Array): void` - logs a warning. - `log.error(fmt: string, args: Array): void` - logs an error message. -- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the subgraph. +- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the Subgraph. The `log` API takes a format string and an array of string values. It then replaces placeholders with the string values from the array. The first `{}` placeholder gets replaced by the first value in the array, the second `{}` placeholder gets replaced by the second value and so on. @@ -721,7 +721,7 @@ ipfs.mapJSON('Qm...', 'processItem', Value.fromString('parentId')) The only flag currently supported is `json`, which must be passed to `ipfs.map`. With the `json` flag, the IPFS file must consist of a series of JSON values, one value per line. The call to `ipfs.map` will read each line in the file, deserialize it into a `JSONValue` and call the callback for each of them. The callback can then use entity operations to store data from the `JSONValue`. Entity changes are stored only when the handler that called `ipfs.map` finishes successfully; in the meantime, they are kept in memory, and the size of the file that `ipfs.map` can process is therefore limited. -On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the subgraph is marked as failed. +On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the Subgraph is marked as failed. ### क्रिप्टो API @@ -836,7 +836,7 @@ The base `Entity` class and the child `DataSourceContext` class have helpers to ### DataSourceContext in Manifest -The `context` section within `dataSources` allows you to define key-value pairs that are accessible within your subgraph mappings. The available types are `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. +The `context` section within `dataSources` allows you to define key-value pairs that are accessible within your Subgraph mappings. The available types are `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Here is a YAML example illustrating the usage of various types in the `context` section: @@ -887,4 +887,4 @@ dataSources: - `List`: Specifies a list of items. Each item needs to specify its type and data. - `BigInt`: Specifies a large integer value. Must be quoted due to its large size. -This context is then accessible in your subgraph mapping files, enabling more dynamic and configurable subgraphs. +This context is then accessible in your Subgraph mapping files, enabling more dynamic and configurable Subgraphs. From 92c825727ed7519971f9bdb875c9489bd2a0836f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:10:57 -0500 Subject: [PATCH 0160/1789] New translations api.mdx (Hindi) --- .../developing/creating/graph-ts/api.mdx | 60 +++++++++---------- 1 file changed, 28 insertions(+), 32 deletions(-) diff --git a/website/src/pages/hi/subgraphs/developing/creating/graph-ts/api.mdx b/website/src/pages/hi/subgraphs/developing/creating/graph-ts/api.mdx index e967ffa1b80b..da5244b3e395 100644 --- a/website/src/pages/hi/subgraphs/developing/creating/graph-ts/api.mdx +++ b/website/src/pages/hi/subgraphs/developing/creating/graph-ts/api.mdx @@ -2,12 +2,12 @@ title: AssemblyScript API --- -> Note: If you created a subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, then you're using an older version of AssemblyScript. It is recommended to review the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/). +> Note: If you created a Subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, then you're using an older version of AssemblyScript. It is recommended to review the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/). -यह पृष्ठ दस्तावेज करता है कि Subgraph मैपिंग लिखते समय किन अंतर्निहित एपीआई का उपयोग किया जा सकता है। बॉक्स से बाहर दो प्रकार के एपीआई उपलब्ध हैं: +Learn what built-in APIs can be used when writing Subgraph mappings. There are two kinds of APIs available out of the box: - The Graph TypeScript लाइब्रेरी (https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts) (graph-ts) -- `graph codegen` द्वारा subgraph files से उत्पन्न code +- Code generated from Subgraph files by `graph codegen` आप अन्य पुस्तकालयों को भी निर्भरताओं के रूप में जोड़ सकते हैं, बशर्ते कि वे [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) के साथ संगत हों। @@ -27,18 +27,18 @@ The `@graphprotocol/graph-ts` library provides the following APIs: ### Versions -The `apiVersion` in the subgraph manifest specifies the mapping API version which is run by Graph Node for a given subgraph. +The `apiVersion` in the Subgraph manifest specifies the mapping API version which is run by Graph Node for a given Subgraph. -| Version | Release notes | -| :-: | --- | -| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | -| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | -| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
Added `receipt` field to the Ethereum Event object | -| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
Added `baseFeePerGas` to the Ethereum Block object | -| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/))
`ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | -| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | -| 0.0.3 | Added `from` field to the Ethereum Call object
`ethereum.call.address` renamed to `ethereum.call.to` | -| 0.0.2 | Added `input` field to the Ethereum Transaction object | +| Version | Release notes | +| :-----: | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | +| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | +| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
Added `receipt` field to the Ethereum Event object | +| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
Added `baseFeePerGas` to the Ethereum Block object | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/))
`ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | +| 0.0.3 | Added `from` field to the Ethereum Call object
`ethereum.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Added `input` field to the Ethereum Transaction object | ### Built-in Types @@ -223,7 +223,7 @@ import { store } from '@graphprotocol/graph-ts' The `store` API allows to load, save and remove entities from and to the Graph Node store. -Entities written to the store map one-to-one to the `@entity` types defined in the subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. +Entities written to the store map one-to-one to the `@entity` types defined in the Subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. #### संस्थाओं का निर्माण @@ -282,8 +282,8 @@ As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 and `@graphprotoco The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a transaction from some onchain event, and a later handler wants to access this transaction if it exists. -- यदि लेन-देन मौजूद नहीं है, तो subgraph को केवल यह पता लगाने के लिए डेटाबेस में जाना होगा कि Entity मौजूद नहीं है। यदि subgraph लेखक पहले से जानता है कि Entity उसी ब्लॉक में बनाई जानी चाहिए थी, तो `loadInBlock` का उपयोग इस डेटाबेस राउंडट्रिप से बचाता है। -- कुछ subgraphs के लिए, ये छूटे हुए लुकअप्स indexing समय में महत्वपूर्ण योगदान दे सकते हैं। +- In the case where the transaction does not exist, the Subgraph will have to go to the database simply to find out that the entity does not exist. If the Subgraph author already knows that the entity must have been created in the same block, using `loadInBlock` avoids this database roundtrip. +- For some Subgraphs, these missed lookups can contribute significantly to the indexing time. ```typescript let id = event.transaction.hash // or however the ID is constructed @@ -380,11 +380,11 @@ store.remove('Transfer', id) #### एथेरियम प्रकार के लिए समर्थन -As with entities, `graph codegen` generates classes for all smart contracts and events used in a subgraph. For this, the contract ABIs need to be part of the data source in the subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. +As with entities, `graph codegen` generates classes for all smart contracts and events used in a Subgraph. For this, the contract ABIs need to be part of the data source in the Subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. -With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that subgraph authors do not have to worry about them. +With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that Subgraph authors do not have to worry about them. -The following example illustrates this. Given a subgraph schema like +The following example illustrates this. Given a Subgraph schema like ```graphql type Transfer @entity { @@ -483,7 +483,7 @@ class Log { #### Access to Smart Contract State -The code generated by `graph codegen` also includes classes for the smart contracts used in the subgraph. These can be used to access public state variables and call functions of the contract at the current block. +The code generated by `graph codegen` also includes classes for the smart contracts used in the Subgraph. These can be used to access public state variables and call functions of the contract at the current block. एक सामान्य पैटर्न उस अनुबंध का उपयोग करना है जिससे कोई घटना उत्पन्न होती है। यह निम्नलिखित कोड के साथ हासिल किया गया है: @@ -506,7 +506,7 @@ export function handleTransfer(event: TransferEvent) { As long as the `ERC20Contract` on Ethereum has a public read-only function called `symbol`, it can be called with `.symbol()`. For public state variables a method with the same name is created automatically. -कोई अन्य अनुबंध जो सबग्राफ का हिस्सा है, उत्पन्न कोड से आयात किया जा सकता है और एक वैध पते के लिए बाध्य किया जा सकता है। +Any other contract that is part of the Subgraph can be imported from the generated code and can be bound to a valid address. #### रिवर्टेड कॉल्स को हैंडल करना @@ -582,7 +582,7 @@ let isContract = ethereum.hasCode(eoa).inner // returns false import { log } from '@graphprotocol/graph-ts' ``` -The `log` API allows subgraphs to log information to the Graph Node standard output as well as Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. +The `log` API allows Subgraphs to log information to the Graph Node standard output as well as Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. The `log` API includes the following functions: @@ -590,16 +590,12 @@ The `log` API includes the following functions: - `log.info(fmt: string, args: Array): void` - logs an informational message. - `log.warning(fmt: string, args: Array): void` - logs a warning. - `log.error(fmt: string, args: Array): void` - logs an error message. -- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the subgraph. +- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the Subgraph. The `log` API takes a format string and an array of string values. It then replaces placeholders with the string values from the array. The first `{}` placeholder gets replaced by the first value in the array, the second `{}` placeholder gets replaced by the second value and so on. ```typescript -log.info('संदेश प्रदर्शित किया जाना है: {}, {}, {}', [ - value.toString(), - OtherValue.toString(), - 'पहले से ही एक स्ट्रिंग', -]) +log.info ('संदेश प्रदर्शित किया जाना है: {}, {}, {}', [value.toString (), OtherValue.toString (), 'पहले से ही एक स्ट्रिंग']) ``` #### एक या अधिक मान लॉग करना @@ -725,7 +721,7 @@ ipfs.mapJSON('Qm...', 'processItem', Value.fromString('parentId')) The only flag currently supported is `json`, which must be passed to `ipfs.map`. With the `json` flag, the IPFS file must consist of a series of JSON values, one value per line. The call to `ipfs.map` will read each line in the file, deserialize it into a `JSONValue` and call the callback for each of them. The callback can then use entity operations to store data from the `JSONValue`. Entity changes are stored only when the handler that called `ipfs.map` finishes successfully; in the meantime, they are kept in memory, and the size of the file that `ipfs.map` can process is therefore limited. -On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the subgraph is marked as failed. +On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the Subgraph is marked as failed. ### क्रिप्टो एपीआई @@ -840,7 +836,7 @@ The base `Entity` class and the child `DataSourceContext` class have helpers to ### DataSourceContext in Manifest -The `context` section within `dataSources` allows you to define key-value pairs that are accessible within your subgraph mappings. The available types are `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. +The `context` section within `dataSources` allows you to define key-value pairs that are accessible within your Subgraph mappings. The available types are `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Here is a YAML example illustrating the usage of various types in the `context` section: @@ -891,4 +887,4 @@ dataSources: - `List`: Specifies a list of items. Each item needs to specify its type and data. - `BigInt`: Specifies a large integer value. Must be quoted due to its large size. -This context is then accessible in your subgraph mapping files, enabling more dynamic and configurable subgraphs. +This context is then accessible in your Subgraph mapping files, enabling more dynamic and configurable Subgraphs. From cada597b90931343cae4f9df7aab315e9b43a792 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:10:59 -0500 Subject: [PATCH 0161/1789] New translations api.mdx (Swahili) --- .../developing/creating/graph-ts/api.mdx | 890 ++++++++++++++++++ 1 file changed, 890 insertions(+) create mode 100644 website/src/pages/sw/subgraphs/developing/creating/graph-ts/api.mdx diff --git a/website/src/pages/sw/subgraphs/developing/creating/graph-ts/api.mdx b/website/src/pages/sw/subgraphs/developing/creating/graph-ts/api.mdx new file mode 100644 index 000000000000..2e256ae18190 --- /dev/null +++ b/website/src/pages/sw/subgraphs/developing/creating/graph-ts/api.mdx @@ -0,0 +1,890 @@ +--- +title: AssemblyScript API +--- + +> Note: If you created a Subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, then you're using an older version of AssemblyScript. It is recommended to review the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/). + +Learn what built-in APIs can be used when writing Subgraph mappings. There are two kinds of APIs available out of the box: + +- The [Graph TypeScript library](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts) (`graph-ts`) +- Code generated from Subgraph files by `graph codegen` + +You can also add other libraries as dependencies, as long as they are compatible with [AssemblyScript](https://github.com/AssemblyScript/assemblyscript). + +Since language mappings are written in AssemblyScript, it is useful to review the language and standard library features from the [AssemblyScript wiki](https://github.com/AssemblyScript/assemblyscript/wiki). + +## API Reference + +The `@graphprotocol/graph-ts` library provides the following APIs: + +- An `ethereum` API for working with Ethereum smart contracts, events, blocks, transactions, and Ethereum values. +- A `store` API to load and save entities from and to the Graph Node store. +- A `log` API to log messages to the Graph Node output and Graph Explorer. +- An `ipfs` API to load files from IPFS. +- A `json` API to parse JSON data. +- A `crypto` API to use cryptographic functions. +- Low-level primitives to translate between different type systems such as Ethereum, JSON, GraphQL and AssemblyScript. + +### Versions + +The `apiVersion` in the Subgraph manifest specifies the mapping API version which is run by Graph Node for a given Subgraph. + +| Version | Release notes | +| :-----: | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | +| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | +| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
Added `receipt` field to the Ethereum Event object | +| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
Added `baseFeePerGas` to the Ethereum Block object | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/migration-guides/assemblyscript-migration-guide/))
`ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | +| 0.0.3 | Added `from` field to the Ethereum Call object
`ethereum.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Added `input` field to the Ethereum Transaction object | + +### Built-in Types + +Documentation on the base types built into AssemblyScript can be found in the [AssemblyScript wiki](https://www.assemblyscript.org/types.html). + +The following additional types are provided by `@graphprotocol/graph-ts`. + +#### ByteArray + +```typescript +import { ByteArray } from '@graphprotocol/graph-ts' +``` + +`ByteArray` represents an array of `u8`. + +_Construction_ + +- `fromI32(x: i32): ByteArray` - Decomposes `x` into bytes. +- `fromHexString(hex: string): ByteArray` - Input length must be even. Prefixing with `0x` is optional. + +_Type conversions_ + +- `toHexString(): string` - Converts to a hex string prefixed with `0x`. +- `toString(): string` - Interprets the bytes as a UTF-8 string. +- `toBase58(): string` - Encodes the bytes into a base58 string. +- `toU32(): u32` - Interprets the bytes as a little-endian `u32`. Throws in case of overflow. +- `toI32(): i32` - Interprets the byte array as a little-endian `i32`. Throws in case of overflow. + +_Operators_ + +- `equals(y: ByteArray): bool` – can be written as `x == y`. +- `concat(other: ByteArray) : ByteArray` - return a new `ByteArray` consisting of `this` directly followed by `other` +- `concatI32(other: i32) : ByteArray` - return a new `ByteArray` consisting of `this` directly followed by the byte representation of `other` + +#### BigDecimal + +```typescript +import { BigDecimal } from '@graphprotocol/graph-ts' +``` + +`BigDecimal` is used to represent arbitrary precision decimals. + +> Note: [Internally](https://github.com/graphprotocol/graph-node/blob/master/graph/src/data/store/scalar/bigdecimal.rs) `BigDecimal` is stored in [IEEE-754 decimal128 floating-point format](https://en.wikipedia.org/wiki/Decimal128_floating-point_format), which supports 34 decimal digits of significand. This makes `BigDecimal` unsuitable for representing fixed-point types that can span wider than 34 digits, such as a Solidity [`ufixed256x18`](https://docs.soliditylang.org/en/latest/types.html#fixed-point-numbers) or equivalent. + +_Construction_ + +- `constructor(bigInt: BigInt)` – creates a `BigDecimal` from an `BigInt`. +- `static fromString(s: string): BigDecimal` – parses from a decimal string. + +_Type conversions_ + +- `toString(): string` – prints to a decimal string. + +_Math_ + +- `plus(y: BigDecimal): BigDecimal` – can be written as `x + y`. +- `minus(y: BigDecimal): BigDecimal` – can be written as `x - y`. +- `times(y: BigDecimal): BigDecimal` – can be written as `x * y`. +- `div(y: BigDecimal): BigDecimal` – can be written as `x / y`. +- `equals(y: BigDecimal): bool` – can be written as `x == y`. +- `notEqual(y: BigDecimal): bool` – can be written as `x != y`. +- `lt(y: BigDecimal): bool` – can be written as `x < y`. +- `le(y: BigDecimal): bool` – can be written as `x <= y`. +- `gt(y: BigDecimal): bool` – can be written as `x > y`. +- `ge(y: BigDecimal): bool` – can be written as `x >= y`. +- `neg(): BigDecimal` - can be written as `-x`. + +#### BigInt + +```typescript +import { BigInt } from '@graphprotocol/graph-ts' +``` + +`BigInt` is used to represent big integers. This includes Ethereum values of type `uint32` to `uint256` and `int64` to `int256`. Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. + +The `BigInt` class has the following API: + +_Construction_ + +- `BigInt.fromI32(x: i32): BigInt` – creates a `BigInt` from an `i32`. + +- `BigInt.fromString(s: string): BigInt`– Parses a `BigInt` from a string. + +- `BigInt.fromUnsignedBytes(x: Bytes): BigInt` – Interprets `bytes` as an unsigned, little-endian integer. If your input is big-endian, call `.reverse()` first. + +- `BigInt.fromSignedBytes(x: Bytes): BigInt` – Interprets `bytes` as a signed, little-endian integer. If your input is big-endian, call `.reverse()` first. + + _Type conversions_ + +- `x.toHex(): string` – turns `BigInt` into a string of hexadecimal characters. + +- `x.toString(): string` – turns `BigInt` into a decimal number string. + +- `x.toI32(): i32` – returns the `BigInt` as an `i32`; fails if the value does not fit into `i32`. It's a good idea to first check `x.isI32()`. + +- `x.toBigDecimal(): BigDecimal` - converts into a decimal with no fractional part. + +_Math_ + +- `x.plus(y: BigInt): BigInt` – can be written as `x + y`. +- `x.minus(y: BigInt): BigInt` – can be written as `x - y`. +- `x.times(y: BigInt): BigInt` – can be written as `x * y`. +- `x.div(y: BigInt): BigInt` – can be written as `x / y`. +- `x.mod(y: BigInt): BigInt` – can be written as `x % y`. +- `x.equals(y: BigInt): bool` – can be written as `x == y`. +- `x.notEqual(y: BigInt): bool` – can be written as `x != y`. +- `x.lt(y: BigInt): bool` – can be written as `x < y`. +- `x.le(y: BigInt): bool` – can be written as `x <= y`. +- `x.gt(y: BigInt): bool` – can be written as `x > y`. +- `x.ge(y: BigInt): bool` – can be written as `x >= y`. +- `x.neg(): BigInt` – can be written as `-x`. +- `x.divDecimal(y: BigDecimal): BigDecimal` – divides by a decimal, giving a decimal result. +- `x.isZero(): bool` – Convenience for checking if the number is zero. +- `x.isI32(): bool` – Check if the number fits in an `i32`. +- `x.abs(): BigInt` – Absolute value. +- `x.pow(exp: u8): BigInt` – Exponentiation. +- `bitOr(x: BigInt, y: BigInt): BigInt` – can be written as `x | y`. +- `bitAnd(x: BigInt, y: BigInt): BigInt` – can be written as `x & y`. +- `leftShift(x: BigInt, bits: u8): BigInt` – can be written as `x << y`. +- `rightShift(x: BigInt, bits: u8): BigInt` – can be written as `x >> y`. + +#### TypedMap + +```typescript +import { TypedMap } from '@graphprotocol/graph-ts' +``` + +`TypedMap` can be used to store key-value pairs. See [this example](https://github.com/graphprotocol/aragon-subgraph/blob/29dd38680c5e5104d9fdc2f90e740298c67e4a31/individual-dao-subgraph/mappings/constants.ts#L51). + +The `TypedMap` class has the following API: + +- `new TypedMap()` – creates an empty map with keys of type `K` and values of type `V` +- `map.set(key: K, value: V): void` – sets the value of `key` to `value` +- `map.getEntry(key: K): TypedMapEntry | null` – returns the key-value pair for a `key` or `null` if the `key` does not exist in the map +- `map.get(key: K): V | null` – returns the value for a `key` or `null` if the `key` does not exist in the map +- `map.isSet(key: K): bool` – returns `true` if the `key` exists in the map and `false` if it does not + +#### Bytes + +```typescript +import { Bytes } from '@graphprotocol/graph-ts' +``` + +`Bytes` is used to represent arbitrary-length arrays of bytes. This includes Ethereum values of type `bytes`, `bytes32`, etc. + +The `Bytes` class extends AssemblyScript's [Uint8Array](https://github.com/AssemblyScript/assemblyscript/blob/3b1852bc376ae799d9ebca888e6413afac7b572f/std/assembly/typedarray.ts#L64) and this supports all the `Uint8Array` functionality, plus the following new methods: + +_Construction_ + +- `fromHexString(hex: string) : Bytes` - Convert the string `hex` which must consist of an even number of hexadecimal digits to a `ByteArray`. The string `hex` can optionally start with `0x` +- `fromI32(i: i32) : Bytes` - Convert `i` to an array of bytes + +_Type conversions_ + +- `b.toHex()` – returns a hexadecimal string representing the bytes in the array +- `b.toString()` – converts the bytes in the array to a string of unicode characters +- `b.toBase58()` – turns an Ethereum Bytes value to base58 encoding (used for IPFS hashes) + +_Operators_ + +- `b.concat(other: Bytes) : Bytes` - - return new `Bytes` consisting of `this` directly followed by `other` +- `b.concatI32(other: i32) : ByteArray` - return new `Bytes` consisting of `this` directly follow by the byte representation of `other` + +#### Address + +```typescript +import { Address } from '@graphprotocol/graph-ts' +``` + +`Address` extends `Bytes` to represent Ethereum `address` values. + +It adds the following method on top of the `Bytes` API: + +- `Address.fromString(s: string): Address` – creates an `Address` from a hexadecimal string +- `Address.fromBytes(b: Bytes): Address` – create an `Address` from `b` which must be exactly 20 bytes long. Passing in a value with fewer or more bytes will result in an error + +### Store API + +```typescript +import { store } from '@graphprotocol/graph-ts' +``` + +The `store` API allows to load, save and remove entities from and to the Graph Node store. + +Entities written to the store map one-to-one to the `@entity` types defined in the Subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. + +#### Creating entities + +The following is a common pattern for creating entities from Ethereum events. + +```typescript +// Import the Transfer event class generated from the ERC20 ABI +import { Transfer as TransferEvent } from '../generated/ERC20/ERC20' + +// Import the Transfer entity type generated from the GraphQL schema +import { Transfer } from '../generated/schema' + +// Transfer event handler +export function handleTransfer(event: TransferEvent): void { + // Create a Transfer entity, using the transaction hash as the entity ID + let id = event.transaction.hash + let transfer = new Transfer(id) + + // Set properties on the entity, using the event parameters + transfer.from = event.params.from + transfer.to = event.params.to + transfer.amount = event.params.amount + + // Save the entity to the store + transfer.save() +} +``` + +When a `Transfer` event is encountered while processing the chain, it is passed to the `handleTransfer` event handler using the generated `Transfer` type (aliased to `TransferEvent` here to avoid a naming conflict with the entity type). This type allows accessing data such as the event's parent transaction and its parameters. + +Each entity must have a unique ID to avoid collisions with other entities. It is fairly common for event parameters to include a unique identifier that can be used. + +> Note: Using the transaction hash as the ID assumes that no other events in the same transaction create entities with this hash as the ID. + +#### Loading entities from the store + +If an entity already exists, it can be loaded from the store with the following: + +```typescript +let id = event.transaction.hash // or however the ID is constructed +let transfer = Transfer.load(id) +if (transfer == null) { + transfer = new Transfer(id) +} + +// Use the Transfer entity as before +``` + +As the entity may not exist in the store yet, the `load` method returns a value of type `Transfer | null`. It may be necessary to check for the `null` case before using the value. + +> Note: Loading entities is only necessary if the changes made in the mapping depend on the previous data of an entity. See the next section for the two ways of updating existing entities. + +#### Looking up entities created withing a block + +As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 and `@graphprotocol/graph-cli` v0.49.0 the `loadInBlock` method is available on all entity types. + +The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a transaction from some onchain event, and a later handler wants to access this transaction if it exists. + +- In the case where the transaction does not exist, the Subgraph will have to go to the database simply to find out that the entity does not exist. If the Subgraph author already knows that the entity must have been created in the same block, using `loadInBlock` avoids this database roundtrip. +- For some Subgraphs, these missed lookups can contribute significantly to the indexing time. + +```typescript +let id = event.transaction.hash // or however the ID is constructed +let transfer = Transfer.loadInBlock(id) +if (transfer == null) { + transfer = new Transfer(id) +} + +// Use the Transfer entity as before +``` + +> Note: If there is no entity created in the given block, `loadInBlock` will return `null` even if there is an entity with the given ID in the store. + +#### Looking up derived entities + +As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.31.0 and `@graphprotocol/graph-cli` v0.51.0 the `loadRelated` method is available. + +This enables loading derived entity fields from within an event handler. For example, given the following schema: + +```graphql +type Token @entity { + id: ID! + holder: Holder! + color: String +} + +type Holder @entity { + id: ID! + tokens: [Token!]! @derivedFrom(field: "holder") +} +``` + +The following code will load the `Token` entity that the `Holder` entity was derived from: + +```typescript +let holder = Holder.load('test-id') +// Load the Token entities associated with a given holder +let tokens = holder.tokens.load() +``` + +#### Updating existing entities + +There are two ways to update an existing entity: + +1. Load the entity with e.g. `Transfer.load(id)`, set properties on the entity, then `.save()` it back to the store. +2. Simply create the entity with e.g. `new Transfer(id)`, set properties on the entity, then `.save()` it to the store. If the entity already exists, the changes are merged into it. + +Changing properties is straight forward in most cases, thanks to the generated property setters: + +```typescript +let transfer = new Transfer(id) +transfer.from = ... +transfer.to = ... +transfer.amount = ... +``` + +It is also possible to unset properties with one of the following two instructions: + +```typescript +transfer.from.unset() +transfer.from = null +``` + +This only works with optional properties, i.e. properties that are declared without a `!` in GraphQL. Two examples would be `owner: Bytes` or `amount: BigInt`. + +Updating array properties is a little more involved, as the getting an array from an entity creates a copy of that array. This means array properties have to be set again explicitly after changing the array. The following assumes `entity` has a `numbers: [BigInt!]!` field. + +```typescript +// This won't work +entity.numbers.push(BigInt.fromI32(1)) +entity.save() + +// This will work +let numbers = entity.numbers +numbers.push(BigInt.fromI32(1)) +entity.numbers = numbers +entity.save() +``` + +#### Removing entities from the store + +There is currently no way to remove an entity via the generated types. Instead, removing an entity requires passing the name of the entity type and the entity ID to `store.remove`: + +```typescript +import { store } from '@graphprotocol/graph-ts' +... +let id = event.transaction.hash +store.remove('Transfer', id) +``` + +### Ethereum API + +The Ethereum API provides access to smart contracts, public state variables, contract functions, events, transactions, blocks and the encoding/decoding Ethereum data. + +#### Support for Ethereum Types + +As with entities, `graph codegen` generates classes for all smart contracts and events used in a Subgraph. For this, the contract ABIs need to be part of the data source in the Subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. + +With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that Subgraph authors do not have to worry about them. + +The following example illustrates this. Given a Subgraph schema like + +```graphql +type Transfer @entity { + id: Bytes! + from: Bytes! + to: Bytes! + amount: BigInt! +} +``` + +and a `Transfer(address,address,uint256)` event signature on Ethereum, the `from`, `to` and `amount` values of type `address`, `address` and `uint256` are converted to `Address` and `BigInt`, allowing them to be passed on to the `Bytes!` and `BigInt!` properties of the `Transfer` entity: + +```typescript +let id = event.transaction.hash +let transfer = new Transfer(id) +transfer.from = event.params.from +transfer.to = event.params.to +transfer.amount = event.params.amount +transfer.save() +``` + +#### Events and Block/Transaction Data + +Ethereum events passed to event handlers, such as the `Transfer` event in the previous examples, not only provide access to the event parameters but also to their parent transaction and the block they are part of. The following data can be obtained from `event` instances (these classes are a part of the `ethereum` module in `graph-ts`): + +```typescript +class Event { + address: Address + logIndex: BigInt + transactionLogIndex: BigInt + logType: string | null + block: Block + transaction: Transaction + parameters: Array + receipt: TransactionReceipt | null +} + +class Block { + hash: Bytes + parentHash: Bytes + unclesHash: Bytes + author: Address + stateRoot: Bytes + transactionsRoot: Bytes + receiptsRoot: Bytes + number: BigInt + gasUsed: BigInt + gasLimit: BigInt + timestamp: BigInt + difficulty: BigInt + totalDifficulty: BigInt + size: BigInt | null + baseFeePerGas: BigInt | null +} + +class Transaction { + hash: Bytes + index: BigInt + from: Address + to: Address | null + value: BigInt + gasLimit: BigInt + gasPrice: BigInt + input: Bytes + nonce: BigInt +} + +class TransactionReceipt { + transactionHash: Bytes + transactionIndex: BigInt + blockHash: Bytes + blockNumber: BigInt + cumulativeGasUsed: BigInt + gasUsed: BigInt + contractAddress: Address + logs: Array + status: BigInt + root: Bytes + logsBloom: Bytes +} + +class Log { + address: Address + topics: Array + data: Bytes + blockHash: Bytes + blockNumber: Bytes + transactionHash: Bytes + transactionIndex: BigInt + logIndex: BigInt + transactionLogIndex: BigInt + logType: string + removed: bool | null +} +``` + +#### Access to Smart Contract State + +The code generated by `graph codegen` also includes classes for the smart contracts used in the Subgraph. These can be used to access public state variables and call functions of the contract at the current block. + +A common pattern is to access the contract from which an event originates. This is achieved with the following code: + +```typescript +// Import the generated contract class and generated Transfer event class +import { ERC20Contract, Transfer as TransferEvent } from '../generated/ERC20Contract/ERC20Contract' +// Import the generated entity class +import { Transfer } from '../generated/schema' + +export function handleTransfer(event: TransferEvent) { + // Bind the contract to the address that emitted the event + let contract = ERC20Contract.bind(event.address) + + // Access state variables and functions by calling them + let erc20Symbol = contract.symbol() +} +``` + +`Transfer` is aliased to `TransferEvent` here to avoid a naming conflict with the entity type + +As long as the `ERC20Contract` on Ethereum has a public read-only function called `symbol`, it can be called with `.symbol()`. For public state variables a method with the same name is created automatically. + +Any other contract that is part of the Subgraph can be imported from the generated code and can be bound to a valid address. + +#### Handling Reverted Calls + +If the read-only methods of your contract may revert, then you should handle that by calling the generated contract method prefixed with `try_`. + +- For example, the Gravity contract exposes the `gravatarToOwner` method. This code would be able to handle a revert in that method: + +```typescript +let gravity = Gravity.bind(event.address) +let callResult = gravity.try_gravatarToOwner(gravatar) +if (callResult.reverted) { + log.info('getGravatar reverted', []) +} else { + let owner = callResult.value +} +``` + +> Note: A Graph node connected to a Geth or Infura client may not detect all reverts. If you rely on this, we recommend using a Graph Node connected to a Parity client. + +#### Encoding/Decoding ABI + +Data can be encoded and decoded according to Ethereum's ABI encoding format using the `encode` and `decode` functions in the `ethereum` module. + +```typescript +import { Address, BigInt, ethereum } from '@graphprotocol/graph-ts' + +let tupleArray: Array = [ + ethereum.Value.fromAddress(Address.fromString('0x0000000000000000000000000000000000000420')), + ethereum.Value.fromUnsignedBigInt(BigInt.fromI32(62)), +] + +let tuple = tupleArray as ethereum.Tuple + +let encoded = ethereum.encode(ethereum.Value.fromTuple(tuple))! + +let decoded = ethereum.decode('(address,uint256)', encoded) +``` + +For more information: + +- [ABI Spec](https://docs.soliditylang.org/en/v0.7.4/abi-spec.html#types) +- Encoding/decoding [Rust library/CLI](https://github.com/rust-ethereum/ethabi) +- More [complex example](https://github.com/graphprotocol/graph-node/blob/08da7cb46ddc8c09f448c5ea4b210c9021ea05ad/tests/integration-tests/host-exports/src/mapping.ts#L86). + +#### Balance of an Address + +The native token balance of an address can be retrieved using the `ethereum` module. This feature is available from `apiVersion: 0.0.9` which is defined `subgraph.yaml`. The `getBalance()` retrieves the balance of the specified address as of the end of the block in which the event is triggered. + +```typescript +import { ethereum } from '@graphprotocol/graph-ts' + +let address = Address.fromString('0xd8dA6BF26964aF9D7eEd9e03E53415D37aA96045') +let balance = ethereum.getBalance(address) // returns balance in BigInt +``` + +#### Check if an Address is a Contract or EOA + +To check whether an address is a smart contract address or an externally owned address (EOA), use the `hasCode()` function from the `ethereum` module which will return `boolean`. This feature is available from `apiVersion: 0.0.9` which is defined `subgraph.yaml`. + +```typescript +import { ethereum } from '@graphprotocol/graph-ts' + +let contractAddr = Address.fromString('0x2E645469f354BB4F5c8a05B3b30A929361cf77eC') +let isContract = ethereum.hasCode(contractAddr).inner // returns true + +let eoa = Address.fromString('0xd8dA6BF26964aF9D7eEd9e03E53415D37aA96045') +let isContract = ethereum.hasCode(eoa).inner // returns false +``` + +### Logging API + +```typescript +import { log } from '@graphprotocol/graph-ts' +``` + +The `log` API allows Subgraphs to log information to the Graph Node standard output as well as Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. + +The `log` API includes the following functions: + +- `log.debug(fmt: string, args: Array): void` - logs a debug message. +- `log.info(fmt: string, args: Array): void` - logs an informational message. +- `log.warning(fmt: string, args: Array): void` - logs a warning. +- `log.error(fmt: string, args: Array): void` - logs an error message. +- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the Subgraph. + +The `log` API takes a format string and an array of string values. It then replaces placeholders with the string values from the array. The first `{}` placeholder gets replaced by the first value in the array, the second `{}` placeholder gets replaced by the second value and so on. + +```typescript +log.info('Message to be displayed: {}, {}, {}', [value.toString(), anotherValue.toString(), 'already a string']) +``` + +#### Logging one or more values + +##### Logging a single value + +In the example below, the string value "A" is passed into an array to become`['A']` before being logged: + +```typescript +let myValue = 'A' + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My value is: A" + log.info('My value is: {}', [myValue]) +} +``` + +##### Logging a single entry from an existing array + +In the example below, only the first value of the argument array is logged, despite the array containing three values. + +```typescript +let myArray = ['A', 'B', 'C'] + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My value is: A" (Even though three values are passed to `log.info`) + log.info('My value is: {}', myArray) +} +``` + +#### Logging multiple entries from an existing array + +Each entry in the arguments array requires its own placeholder `{}` in the log message string. The below example contains three placeholders `{}` in the log message. Because of this, all three values in `myArray` are logged. + +```typescript +let myArray = ['A', 'B', 'C'] + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My first value is: A, second value is: B, third value is: C" + log.info('My first value is: {}, second value is: {}, third value is: {}', myArray) +} +``` + +##### Logging a specific entry from an existing array + +To display a specific value in the array, the indexed value must be provided. + +```typescript +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My third value is C" + log.info('My third value is: {}', [myArray[2]]) +} +``` + +##### Logging event information + +The example below logs the block number, block hash and transaction hash from an event: + +```typescript +import { log } from '@graphprotocol/graph-ts' + +export function handleSomeEvent(event: SomeEvent): void { + log.debug('Block number: {}, block hash: {}, transaction hash: {}', [ + event.block.number.toString(), // "47596000" + event.block.hash.toHexString(), // "0x..." + event.transaction.hash.toHexString(), // "0x..." + ]) +} +``` + +### IPFS API + +```typescript +import { ipfs } from '@graphprotocol/graph-ts' +``` + +Smart contracts occasionally anchor IPFS files onchain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. + +Given an IPFS hash or path, reading a file from IPFS is done as follows: + +```typescript +// Put this inside an event handler in the mapping +let hash = 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D' +let data = ipfs.cat(hash) + +// Paths like `QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D/Makefile` +// that include files in directories are also supported +let path = 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D/Makefile' +let data = ipfs.cat(path) +``` + +**Note:** `ipfs.cat` is not deterministic at the moment. If the file cannot be retrieved over the IPFS network before the request times out, it will return `null`. Due to this, it's always worth checking the result for `null`. + +It is also possible to process larger files in a streaming fashion with `ipfs.map`. The function expects the hash or path for an IPFS file, the name of a callback, and flags to modify its behavior: + +```typescript +import { JSONValue, Value } from '@graphprotocol/graph-ts' + +export function processItem(value: JSONValue, userData: Value): void { + // See the JSONValue documentation for details on dealing + // with JSON values + let obj = value.toObject() + let id = obj.get('id') + let title = obj.get('title') + + if (!id || !title) { + return + } + + // Callbacks can also created entities + let newItem = new Item(id) + newItem.title = title.toString() + newitem.parent = userData.toString() // Set parent to "parentId" + newitem.save() +} + +// Put this inside an event handler in the mapping +ipfs.map('Qm...', 'processItem', Value.fromString('parentId'), ['json']) + +// Alternatively, use `ipfs.mapJSON` +ipfs.mapJSON('Qm...', 'processItem', Value.fromString('parentId')) +``` + +The only flag currently supported is `json`, which must be passed to `ipfs.map`. With the `json` flag, the IPFS file must consist of a series of JSON values, one value per line. The call to `ipfs.map` will read each line in the file, deserialize it into a `JSONValue` and call the callback for each of them. The callback can then use entity operations to store data from the `JSONValue`. Entity changes are stored only when the handler that called `ipfs.map` finishes successfully; in the meantime, they are kept in memory, and the size of the file that `ipfs.map` can process is therefore limited. + +On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the Subgraph is marked as failed. + +### Crypto API + +```typescript +import { crypto } from '@graphprotocol/graph-ts' +``` + +The `crypto` API makes a cryptographic functions available for use in mappings. Right now, there is only one: + +- `crypto.keccak256(input: ByteArray): ByteArray` + +### JSON API + +```typescript +import { json, JSONValueKind } from '@graphprotocol/graph-ts' +``` + +JSON data can be parsed using the `json` API: + +- `json.fromBytes(data: Bytes): JSONValue` – parses JSON data from a `Bytes` array interpreted as a valid UTF-8 sequence +- `json.try_fromBytes(data: Bytes): Result` – safe version of `json.fromBytes`, it returns an error variant if the parsing failed +- `json.fromString(data: string): JSONValue` – parses JSON data from a valid UTF-8 `String` +- `json.try_fromString(data: string): Result` – safe version of `json.fromString`, it returns an error variant if the parsing failed + +The `JSONValue` class provides a way to pull values out of an arbitrary JSON document. Since JSON values can be booleans, numbers, arrays and more, `JSONValue` comes with a `kind` property to check the type of a value: + +```typescript +let value = json.fromBytes(...) +if (value.kind == JSONValueKind.BOOL) { + ... +} +``` + +In addition, there is a method to check if the value is `null`: + +- `value.isNull(): boolean` + +When the type of a value is certain, it can be converted to a [built-in type](#built-in-types) using one of the following methods: + +- `value.toBool(): boolean` +- `value.toI64(): i64` +- `value.toF64(): f64` +- `value.toBigInt(): BigInt` +- `value.toString(): string` +- `value.toArray(): Array` - (and then convert `JSONValue` with one of the 5 methods above) + +### Type Conversions Reference + +| Source(s) | Destination | Conversion function | +| -------------------- | -------------------- | ---------------------------- | +| Address | Bytes | none | +| Address | String | s.toHexString() | +| BigDecimal | String | s.toString() | +| BigInt | BigDecimal | s.toBigDecimal() | +| BigInt | String (hexadecimal) | s.toHexString() or s.toHex() | +| BigInt | String (unicode) | s.toString() | +| BigInt | i32 | s.toI32() | +| Boolean | Boolean | none | +| Bytes (signed) | BigInt | BigInt.fromSignedBytes(s) | +| Bytes (unsigned) | BigInt | BigInt.fromUnsignedBytes(s) | +| Bytes | String (hexadecimal) | s.toHexString() or s.toHex() | +| Bytes | String (unicode) | s.toString() | +| Bytes | String (base58) | s.toBase58() | +| Bytes | i32 | s.toI32() | +| Bytes | u32 | s.toU32() | +| Bytes | JSON | json.fromBytes(s) | +| int8 | i32 | none | +| int32 | i32 | none | +| int32 | BigInt | BigInt.fromI32(s) | +| uint24 | i32 | none | +| int64 - int256 | BigInt | none | +| uint32 - uint256 | BigInt | none | +| JSON | boolean | s.toBool() | +| JSON | i64 | s.toI64() | +| JSON | u64 | s.toU64() | +| JSON | f64 | s.toF64() | +| JSON | BigInt | s.toBigInt() | +| JSON | string | s.toString() | +| JSON | Array | s.toArray() | +| JSON | Object | s.toObject() | +| String | Address | Address.fromString(s) | +| Bytes | Address | Address.fromBytes(s) | +| String | BigInt | BigInt.fromString(s) | +| String | BigDecimal | BigDecimal.fromString(s) | +| String (hexadecimal) | Bytes | ByteArray.fromHexString(s) | +| String (UTF-8) | Bytes | ByteArray.fromUTF8(s) | + +### Data Source Metadata + +You can inspect the contract address, network and context of the data source that invoked the handler through the `dataSource` namespace: + +- `dataSource.address(): Address` +- `dataSource.network(): string` +- `dataSource.context(): DataSourceContext` + +### Entity and DataSourceContext + +The base `Entity` class and the child `DataSourceContext` class have helpers to dynamically set and get fields: + +- `setString(key: string, value: string): void` +- `setI32(key: string, value: i32): void` +- `setBigInt(key: string, value: BigInt): void` +- `setBytes(key: string, value: Bytes): void` +- `setBoolean(key: string, value: bool): void` +- `setBigDecimal(key, value: BigDecimal): void` +- `getString(key: string): string` +- `getI32(key: string): i32` +- `getBigInt(key: string): BigInt` +- `getBytes(key: string): Bytes` +- `getBoolean(key: string): boolean` +- `getBigDecimal(key: string): BigDecimal` + +### DataSourceContext in Manifest + +The `context` section within `dataSources` allows you to define key-value pairs that are accessible within your Subgraph mappings. The available types are `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. + +Here is a YAML example illustrating the usage of various types in the `context` section: + +```yaml +dataSources: + - kind: ethereum/contract + name: ContractName + network: mainnet + context: + bool_example: + type: Bool + data: true + string_example: + type: String + data: 'hello' + int_example: + type: Int + data: 42 + int8_example: + type: Int8 + data: 127 + big_decimal_example: + type: BigDecimal + data: '10.99' + bytes_example: + type: Bytes + data: '0x68656c6c6f' + list_example: + type: List + data: + - type: Int + data: 1 + - type: Int + data: 2 + - type: Int + data: 3 + big_int_example: + type: BigInt + data: '1000000000000000000000000' +``` + +- `Bool`: Specifies a Boolean value (`true` or `false`). +- `String`: Specifies a String value. +- `Int`: Specifies a 32-bit integer. +- `Int8`: Specifies an 8-bit integer. +- `BigDecimal`: Specifies a decimal number. Must be quoted. +- `Bytes`: Specifies a hexadecimal string. +- `List`: Specifies a list of items. Each item needs to specify its type and data. +- `BigInt`: Specifies a large integer value. Must be quoted due to its large size. + +This context is then accessible in your Subgraph mapping files, enabling more dynamic and configurable Subgraphs. From 77a125dd721486b4b370b33463c3126d7479457f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:11:00 -0500 Subject: [PATCH 0162/1789] New translations common-issues.mdx (Romanian) --- .../ro/subgraphs/developing/creating/graph-ts/common-issues.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/ro/subgraphs/developing/creating/graph-ts/common-issues.mdx b/website/src/pages/ro/subgraphs/developing/creating/graph-ts/common-issues.mdx index f8d0c9c004c2..65e8e3d4a8a3 100644 --- a/website/src/pages/ro/subgraphs/developing/creating/graph-ts/common-issues.mdx +++ b/website/src/pages/ro/subgraphs/developing/creating/graph-ts/common-issues.mdx @@ -2,7 +2,7 @@ title: Common AssemblyScript Issues --- -There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: +There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during Subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: - `Private` class variables are not enforced in [AssemblyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. - Scope is not inherited into [closure functions](https://www.assemblyscript.org/status.html#on-closures), i.e. variables declared outside of closure functions cannot be used. Explanation in [Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). From 47766c93a8817af07606439b994d04d86e0d29d5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:11:01 -0500 Subject: [PATCH 0163/1789] New translations common-issues.mdx (French) --- .../fr/subgraphs/developing/creating/graph-ts/common-issues.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/fr/subgraphs/developing/creating/graph-ts/common-issues.mdx b/website/src/pages/fr/subgraphs/developing/creating/graph-ts/common-issues.mdx index a946b30a71b1..8db1df626d20 100644 --- a/website/src/pages/fr/subgraphs/developing/creating/graph-ts/common-issues.mdx +++ b/website/src/pages/fr/subgraphs/developing/creating/graph-ts/common-issues.mdx @@ -2,7 +2,7 @@ title: Common AssemblyScript Issues --- -Il existe certains problèmes courants avec [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) lors du développement de subgraph. Ces problèmes varient en termes de difficulté de débogage, mais les connaître peut être utile. Voici une liste non exhaustive de ces problèmes : +There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during Subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: - Les variables de classe `Private` ne sont pas appliquées dans [AssemblyScript](https://www.assemblyscript.org/status.html#language-features). Il n'y a aucun moyen de protéger les variables de classe d'une modification directe à partir de l'objet de la classe. - La portée n'est pas héritée dans les [fonctions de fermeture] (https://www.assemblyscript.org/status.html#on-closures), c'est-à-dire que les variables déclarées en dehors des fonctions de fermeture ne peuvent pas être utilisées. Explication dans les [Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). From ea76e703b61eeff97ae632dc80754235d479ac36 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:11:02 -0500 Subject: [PATCH 0164/1789] New translations common-issues.mdx (Spanish) --- .../es/subgraphs/developing/creating/graph-ts/common-issues.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/es/subgraphs/developing/creating/graph-ts/common-issues.mdx b/website/src/pages/es/subgraphs/developing/creating/graph-ts/common-issues.mdx index 9b540b6d07d4..6d2a39b9e67b 100644 --- a/website/src/pages/es/subgraphs/developing/creating/graph-ts/common-issues.mdx +++ b/website/src/pages/es/subgraphs/developing/creating/graph-ts/common-issues.mdx @@ -2,7 +2,7 @@ title: Problemas comunes de AssemblyScript --- -There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: +There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during Subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: - `Private` class variables are not enforced in [AssemblyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. - Scope is not inherited into [closure functions](https://www.assemblyscript.org/status.html#on-closures), i.e. variables declared outside of closure functions cannot be used. Explanation in [Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). From 34d8e38fdc83f874b1c8823aa33684e3a191f67c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:11:03 -0500 Subject: [PATCH 0165/1789] New translations common-issues.mdx (Arabic) --- .../ar/subgraphs/developing/creating/graph-ts/common-issues.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/ar/subgraphs/developing/creating/graph-ts/common-issues.mdx b/website/src/pages/ar/subgraphs/developing/creating/graph-ts/common-issues.mdx index 6c50af984ad0..b0ce00e687e3 100644 --- a/website/src/pages/ar/subgraphs/developing/creating/graph-ts/common-issues.mdx +++ b/website/src/pages/ar/subgraphs/developing/creating/graph-ts/common-issues.mdx @@ -2,7 +2,7 @@ title: مشاكل شائعة في أسمبلي سكريبت (AssemblyScript) --- -هناك بعض مشاكل [أسمبلي سكريبت](https://github.com/AssemblyScript/assemblyscript) المحددة، التي من الشائع الوقوع فيها أثتاء تطوير غرافٍ فرعي. وهي تتراوح في صعوبة تصحيح الأخطاء، ومع ذلك، فإنّ إدراكها قد يساعد. وفيما يلي قائمة غير شاملة لهذه المشاكل: +There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during Subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: - `Private` class variables are not enforced in [AssemblyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. - لا يتم توريث النطاق في [دوال الإغلاق](https://www.assemblyscript.org/status.html#on-closures)، أي لا يمكن استخدام المتغيرات المعلنة خارج دوال الإغلاق. الشرح في [ النقاط الهامة للمطورين #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). From 5b88a3622ee65e7ceb99b9e22b0cb76599710826 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:11:04 -0500 Subject: [PATCH 0166/1789] New translations common-issues.mdx (Czech) --- .../cs/subgraphs/developing/creating/graph-ts/common-issues.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/cs/subgraphs/developing/creating/graph-ts/common-issues.mdx b/website/src/pages/cs/subgraphs/developing/creating/graph-ts/common-issues.mdx index 79ec3df1a827..419f698e68e4 100644 --- a/website/src/pages/cs/subgraphs/developing/creating/graph-ts/common-issues.mdx +++ b/website/src/pages/cs/subgraphs/developing/creating/graph-ts/common-issues.mdx @@ -2,7 +2,7 @@ title: Běžné problémy se AssemblyScript --- -Při vývoji podgrafů se často vyskytují určité problémy [AssemblyScript](https://github.com/AssemblyScript/assemblyscript). Jejich obtížnost při ladění je různá, nicméně jejich znalost může pomoci. Následuje neúplný seznam těchto problémů: +There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during Subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: - `Private` class variables are not enforced in [AssemblyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. - Rozsah se nedědí do [uzavíracích funkcí](https://www.assemblyscript.org/status.html#on-closures), tj. proměnné deklarované mimo uzavírací funkce nelze použít. Vysvětlení v [Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). From 9729bf57256ab3020703f48ce10ced1c5c798c7a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:11:05 -0500 Subject: [PATCH 0167/1789] New translations common-issues.mdx (German) --- .../de/subgraphs/developing/creating/graph-ts/common-issues.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/de/subgraphs/developing/creating/graph-ts/common-issues.mdx b/website/src/pages/de/subgraphs/developing/creating/graph-ts/common-issues.mdx index f8d0c9c004c2..65e8e3d4a8a3 100644 --- a/website/src/pages/de/subgraphs/developing/creating/graph-ts/common-issues.mdx +++ b/website/src/pages/de/subgraphs/developing/creating/graph-ts/common-issues.mdx @@ -2,7 +2,7 @@ title: Common AssemblyScript Issues --- -There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: +There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during Subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: - `Private` class variables are not enforced in [AssemblyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. - Scope is not inherited into [closure functions](https://www.assemblyscript.org/status.html#on-closures), i.e. variables declared outside of closure functions cannot be used. Explanation in [Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). From b62355f4e59450477a8dfc8ce514520831cc84bb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:11:06 -0500 Subject: [PATCH 0168/1789] New translations common-issues.mdx (Italian) --- .../it/subgraphs/developing/creating/graph-ts/common-issues.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/it/subgraphs/developing/creating/graph-ts/common-issues.mdx b/website/src/pages/it/subgraphs/developing/creating/graph-ts/common-issues.mdx index 8d714dad8499..7c21ab8fc43b 100644 --- a/website/src/pages/it/subgraphs/developing/creating/graph-ts/common-issues.mdx +++ b/website/src/pages/it/subgraphs/developing/creating/graph-ts/common-issues.mdx @@ -2,7 +2,7 @@ title: Problemi comuni di AssemblyScript --- -Ci sono alcuni problemi [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) in cui è comune imbattersi durante lo sviluppo di subgraph. La loro difficoltà di debug è variabile, ma conoscerli può essere d'aiuto. Quello che segue è un elenco non esaustivo di questi problemi: +There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during Subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: - `Private` class variables are not enforced in [AssemblyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. - L'ambito non è ereditato nelle [closure functions](https://www.assemblyscript.org/status.html#on-closures), cioè le variabili dichiarate al di fuori delle closure functions non possono essere utilizzate. Spiegazione in [Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). From efba614f7010764f7994025e73a615e7662e0877 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:11:07 -0500 Subject: [PATCH 0169/1789] New translations common-issues.mdx (Japanese) --- .../ja/subgraphs/developing/creating/graph-ts/common-issues.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/ja/subgraphs/developing/creating/graph-ts/common-issues.mdx b/website/src/pages/ja/subgraphs/developing/creating/graph-ts/common-issues.mdx index 9bb0634b57b3..e7622788c797 100644 --- a/website/src/pages/ja/subgraphs/developing/creating/graph-ts/common-issues.mdx +++ b/website/src/pages/ja/subgraphs/developing/creating/graph-ts/common-issues.mdx @@ -2,7 +2,7 @@ title: AssemblyScriptのよくある問題 --- -AssemblyScript](https://github.com/AssemblyScript/assemblyscript)には、サブグラフの開発中によく遭遇する問題があります。これらの問題は、デバッグの難易度に幅がありますが、認識しておくと役に立つかもしれません。以下は、これらの問題の非網羅的なリストです: +There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during Subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: - `Private` class variables are not enforced in [AssemblyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. - スコープは[クロージャー関数](https://www.assemblyscript.org/status.html#on-closures)には継承されません。つまり、クロージャー関数の外で宣言された変数は使用できません。Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s)に説明があります。 From 75197a7ab071e4a87d4ee14a85168f9599b1cd3d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:11:08 -0500 Subject: [PATCH 0170/1789] New translations common-issues.mdx (Korean) --- .../ko/subgraphs/developing/creating/graph-ts/common-issues.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/ko/subgraphs/developing/creating/graph-ts/common-issues.mdx b/website/src/pages/ko/subgraphs/developing/creating/graph-ts/common-issues.mdx index f8d0c9c004c2..65e8e3d4a8a3 100644 --- a/website/src/pages/ko/subgraphs/developing/creating/graph-ts/common-issues.mdx +++ b/website/src/pages/ko/subgraphs/developing/creating/graph-ts/common-issues.mdx @@ -2,7 +2,7 @@ title: Common AssemblyScript Issues --- -There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: +There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during Subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: - `Private` class variables are not enforced in [AssemblyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. - Scope is not inherited into [closure functions](https://www.assemblyscript.org/status.html#on-closures), i.e. variables declared outside of closure functions cannot be used. Explanation in [Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). From ff191f5ee9405e50660ee14f98aa2bc588328773 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:11:09 -0500 Subject: [PATCH 0171/1789] New translations common-issues.mdx (Dutch) --- .../nl/subgraphs/developing/creating/graph-ts/common-issues.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/nl/subgraphs/developing/creating/graph-ts/common-issues.mdx b/website/src/pages/nl/subgraphs/developing/creating/graph-ts/common-issues.mdx index f8d0c9c004c2..65e8e3d4a8a3 100644 --- a/website/src/pages/nl/subgraphs/developing/creating/graph-ts/common-issues.mdx +++ b/website/src/pages/nl/subgraphs/developing/creating/graph-ts/common-issues.mdx @@ -2,7 +2,7 @@ title: Common AssemblyScript Issues --- -There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: +There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during Subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: - `Private` class variables are not enforced in [AssemblyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. - Scope is not inherited into [closure functions](https://www.assemblyscript.org/status.html#on-closures), i.e. variables declared outside of closure functions cannot be used. Explanation in [Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). From fa930610659eb6ec99c8bd9c8bc535f46718f47d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:11:10 -0500 Subject: [PATCH 0172/1789] New translations common-issues.mdx (Polish) --- .../pl/subgraphs/developing/creating/graph-ts/common-issues.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/pl/subgraphs/developing/creating/graph-ts/common-issues.mdx b/website/src/pages/pl/subgraphs/developing/creating/graph-ts/common-issues.mdx index f8d0c9c004c2..65e8e3d4a8a3 100644 --- a/website/src/pages/pl/subgraphs/developing/creating/graph-ts/common-issues.mdx +++ b/website/src/pages/pl/subgraphs/developing/creating/graph-ts/common-issues.mdx @@ -2,7 +2,7 @@ title: Common AssemblyScript Issues --- -There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: +There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during Subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: - `Private` class variables are not enforced in [AssemblyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. - Scope is not inherited into [closure functions](https://www.assemblyscript.org/status.html#on-closures), i.e. variables declared outside of closure functions cannot be used. Explanation in [Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). From 8c855b8ff0803dd607adf41c71f0c2a37dcfb36f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:11:11 -0500 Subject: [PATCH 0173/1789] New translations common-issues.mdx (Portuguese) --- .../pt/subgraphs/developing/creating/graph-ts/common-issues.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/pt/subgraphs/developing/creating/graph-ts/common-issues.mdx b/website/src/pages/pt/subgraphs/developing/creating/graph-ts/common-issues.mdx index 2f5f5b63c40a..1e68b32fb80f 100644 --- a/website/src/pages/pt/subgraphs/developing/creating/graph-ts/common-issues.mdx +++ b/website/src/pages/pt/subgraphs/developing/creating/graph-ts/common-issues.mdx @@ -2,7 +2,7 @@ title: Problemas Comuns no AssemblyScript --- -É comum encontrar certos problemas no [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) durante o desenvolvimento do subgraph. Eles variam em dificuldade de debug, mas vale ter consciência deles. A seguir, uma lista não exaustiva destes problemas: +There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during Subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: - `Private` class variables are not enforced in [AssemblyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. - O escopo não é herdado em [funções de closure](https://www.assemblyscript.org/status.html#on-closures), por ex., não é possível usar variáveis declaradas fora de funções de closure. Há uma explicação [neste vídeo](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). From a59a621ed9b46044fc049e90d48320a2cc8b621e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:11:12 -0500 Subject: [PATCH 0174/1789] New translations common-issues.mdx (Russian) --- .../ru/subgraphs/developing/creating/graph-ts/common-issues.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/ru/subgraphs/developing/creating/graph-ts/common-issues.mdx b/website/src/pages/ru/subgraphs/developing/creating/graph-ts/common-issues.mdx index 74f717af91a4..0903710db4bf 100644 --- a/website/src/pages/ru/subgraphs/developing/creating/graph-ts/common-issues.mdx +++ b/website/src/pages/ru/subgraphs/developing/creating/graph-ts/common-issues.mdx @@ -2,7 +2,7 @@ title: Распространенные проблемы с AssemblyScript --- -Существуют определенные проблемы c [AssemblyScript] (https://github.com/AssemblyScript/assemblyscript), с которыми часто приходится сталкиваться при разработке субграфа. Они различаются по сложности отладки, однако знание о них может помочь. Ниже приведен неполный перечень этих проблем: +There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during Subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: - `Private` class variables are not enforced in [AssemblyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. - Область видимости не наследуется [функциями замыкания](https://www.assemblyscript.org/status.html#on-closures), т.е. переменные, объявленные вне функций замыкания, не могут быть использованы. Пояснения см. в [Рекомендациях для разработчиков #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). From 978060657fc85d98b85530e765d584fc649eafe7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:11:13 -0500 Subject: [PATCH 0175/1789] New translations common-issues.mdx (Swedish) --- .../sv/subgraphs/developing/creating/graph-ts/common-issues.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/sv/subgraphs/developing/creating/graph-ts/common-issues.mdx b/website/src/pages/sv/subgraphs/developing/creating/graph-ts/common-issues.mdx index b1f7b27f220a..dd4d5e876a6a 100644 --- a/website/src/pages/sv/subgraphs/developing/creating/graph-ts/common-issues.mdx +++ b/website/src/pages/sv/subgraphs/developing/creating/graph-ts/common-issues.mdx @@ -2,7 +2,7 @@ title: Vanliga problem med AssemblyScript --- -There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: +There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during Subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: - `Private` class variables are not enforced in [AssemblyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. - Scope is not inherited into [closure functions](https://www.assemblyscript.org/status.html#on-closures), i.e. variables declared outside of closure functions cannot be used. Explanation in [Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). From 2e91af50bd17bc0c9942810c480704e91269e885 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:11:14 -0500 Subject: [PATCH 0176/1789] New translations common-issues.mdx (Turkish) --- .../tr/subgraphs/developing/creating/graph-ts/common-issues.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/tr/subgraphs/developing/creating/graph-ts/common-issues.mdx b/website/src/pages/tr/subgraphs/developing/creating/graph-ts/common-issues.mdx index 681a0a3c6b31..ef24d83f4b94 100644 --- a/website/src/pages/tr/subgraphs/developing/creating/graph-ts/common-issues.mdx +++ b/website/src/pages/tr/subgraphs/developing/creating/graph-ts/common-issues.mdx @@ -2,7 +2,7 @@ title: Genel AssemblyScript Sorunları --- -Subgraph geliştirme sırasında karşılaşılması muhtemel bazı [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) sorunları bulunmaktadır. Bu sorunlar, hata ayıklama zorluğuna göre değişiklik gösterse de bunların farkında olmak faydalı olabilir. Aşağıda, bu sorunların kapsamlı olmayan bir listesi verilmiştir: +There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during Subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: - `Private` class variables are not enforced in [AssemblyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. - Kapsam [Closure fonksiyonlarına] aktarılmaz (https://www.assemblyscript.org/status.html#on-closures) kalıtılmaz, yani closure fonksiyonlarının dışında tanımlanan değişkenler bu fonksiyonlar içinde kullanılamaz. Daha fazla açıklama için [Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s) videosuna bakabilirsiniz. From 8106af0f4a2281357a6b896ff2977ce4637eb230 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:11:15 -0500 Subject: [PATCH 0177/1789] New translations common-issues.mdx (Ukrainian) --- .../uk/subgraphs/developing/creating/graph-ts/common-issues.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/uk/subgraphs/developing/creating/graph-ts/common-issues.mdx b/website/src/pages/uk/subgraphs/developing/creating/graph-ts/common-issues.mdx index f8d0c9c004c2..65e8e3d4a8a3 100644 --- a/website/src/pages/uk/subgraphs/developing/creating/graph-ts/common-issues.mdx +++ b/website/src/pages/uk/subgraphs/developing/creating/graph-ts/common-issues.mdx @@ -2,7 +2,7 @@ title: Common AssemblyScript Issues --- -There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: +There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during Subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: - `Private` class variables are not enforced in [AssemblyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. - Scope is not inherited into [closure functions](https://www.assemblyscript.org/status.html#on-closures), i.e. variables declared outside of closure functions cannot be used. Explanation in [Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). From 34a5653c2abda9a8feab25e0375be1a22bf317a7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:11:16 -0500 Subject: [PATCH 0178/1789] New translations common-issues.mdx (Chinese Simplified) --- .../zh/subgraphs/developing/creating/graph-ts/common-issues.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/zh/subgraphs/developing/creating/graph-ts/common-issues.mdx b/website/src/pages/zh/subgraphs/developing/creating/graph-ts/common-issues.mdx index d8625f05baea..3705ceb6a23d 100644 --- a/website/src/pages/zh/subgraphs/developing/creating/graph-ts/common-issues.mdx +++ b/website/src/pages/zh/subgraphs/developing/creating/graph-ts/common-issues.mdx @@ -2,7 +2,7 @@ title: AssemblyScript的常见问题 --- -在子图开发过程中,常常会遇到某些 AssemblyScript 问题。它们在调试难度范围内,但是,意识到它们可能会有所帮助。以下是这些问题的非详尽清单: +There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during Subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: - `Private` class variables are not enforced in [AssemblyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. - Scope is not inherited into [closure functions](https://www.assemblyscript.org/status.html#on-closures), i.e. variables declared outside of closure functions cannot be used. Explanation in [Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). From f186a4d7d81ab174c0c9f21688a5387fc599fcb7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:11:16 -0500 Subject: [PATCH 0179/1789] New translations common-issues.mdx (Urdu (Pakistan)) --- .../ur/subgraphs/developing/creating/graph-ts/common-issues.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/ur/subgraphs/developing/creating/graph-ts/common-issues.mdx b/website/src/pages/ur/subgraphs/developing/creating/graph-ts/common-issues.mdx index 4b7eaae1c362..40b245255886 100644 --- a/website/src/pages/ur/subgraphs/developing/creating/graph-ts/common-issues.mdx +++ b/website/src/pages/ur/subgraphs/developing/creating/graph-ts/common-issues.mdx @@ -2,7 +2,7 @@ title: مشترکہ اسمبلی اسکرپٹ کے مسائل --- -There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: +There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during Subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: - `Private` class variables are not enforced in [AssemblyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. - Scope is not inherited into [closure functions](https://www.assemblyscript.org/status.html#on-closures), i.e. variables declared outside of closure functions cannot be used. Explanation in [Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). From 8a73fe8480230463fe666152301ce54c9377c371 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:11:17 -0500 Subject: [PATCH 0180/1789] New translations common-issues.mdx (Vietnamese) --- .../vi/subgraphs/developing/creating/graph-ts/common-issues.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/vi/subgraphs/developing/creating/graph-ts/common-issues.mdx b/website/src/pages/vi/subgraphs/developing/creating/graph-ts/common-issues.mdx index f8d0c9c004c2..65e8e3d4a8a3 100644 --- a/website/src/pages/vi/subgraphs/developing/creating/graph-ts/common-issues.mdx +++ b/website/src/pages/vi/subgraphs/developing/creating/graph-ts/common-issues.mdx @@ -2,7 +2,7 @@ title: Common AssemblyScript Issues --- -There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: +There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during Subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: - `Private` class variables are not enforced in [AssemblyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. - Scope is not inherited into [closure functions](https://www.assemblyscript.org/status.html#on-closures), i.e. variables declared outside of closure functions cannot be used. Explanation in [Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). From 0c17948e544e182c092e1a52377ce67d76c8f9f7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:11:18 -0500 Subject: [PATCH 0181/1789] New translations common-issues.mdx (Marathi) --- .../mr/subgraphs/developing/creating/graph-ts/common-issues.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/mr/subgraphs/developing/creating/graph-ts/common-issues.mdx b/website/src/pages/mr/subgraphs/developing/creating/graph-ts/common-issues.mdx index d291033f3ff0..868eab208423 100644 --- a/website/src/pages/mr/subgraphs/developing/creating/graph-ts/common-issues.mdx +++ b/website/src/pages/mr/subgraphs/developing/creating/graph-ts/common-issues.mdx @@ -2,7 +2,7 @@ title: सामान्य असेंब्लीस्क्रिप्ट समस्या --- -There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: +There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during Subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: - `Private` class variables are not enforced in [AssemblyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. - Scope is not inherited into [closure functions](https://www.assemblyscript.org/status.html#on-closures), i.e. variables declared outside of closure functions cannot be used. Explanation in [Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). From 7ec0746149b2ec3458351b2af9e5b618c487c9a6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:11:19 -0500 Subject: [PATCH 0182/1789] New translations common-issues.mdx (Hindi) --- .../hi/subgraphs/developing/creating/graph-ts/common-issues.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/hi/subgraphs/developing/creating/graph-ts/common-issues.mdx b/website/src/pages/hi/subgraphs/developing/creating/graph-ts/common-issues.mdx index 155469a5960b..348c4824f2c5 100644 --- a/website/src/pages/hi/subgraphs/developing/creating/graph-ts/common-issues.mdx +++ b/website/src/pages/hi/subgraphs/developing/creating/graph-ts/common-issues.mdx @@ -2,7 +2,7 @@ title: आम AssemblyScript मुद्दे --- -There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: +There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during Subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: - `Private` class variables are not enforced in [AssemblyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. - Scope is not inherited into [closure functions](https://www.assemblyscript.org/status.html#on-closures), i.e. variables declared outside of closure functions cannot be used. Explanation in [Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). From c28a6e27a852f20ea6bdc3cf324392e4e472282b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:11:20 -0500 Subject: [PATCH 0183/1789] New translations common-issues.mdx (Swahili) --- .../developing/creating/graph-ts/common-issues.mdx | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 website/src/pages/sw/subgraphs/developing/creating/graph-ts/common-issues.mdx diff --git a/website/src/pages/sw/subgraphs/developing/creating/graph-ts/common-issues.mdx b/website/src/pages/sw/subgraphs/developing/creating/graph-ts/common-issues.mdx new file mode 100644 index 000000000000..65e8e3d4a8a3 --- /dev/null +++ b/website/src/pages/sw/subgraphs/developing/creating/graph-ts/common-issues.mdx @@ -0,0 +1,8 @@ +--- +title: Common AssemblyScript Issues +--- + +There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during Subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: + +- `Private` class variables are not enforced in [AssemblyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. +- Scope is not inherited into [closure functions](https://www.assemblyscript.org/status.html#on-closures), i.e. variables declared outside of closure functions cannot be used. Explanation in [Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). From 0f066638b7a37783d4a173c37e6f886b97d33b94 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:11:21 -0500 Subject: [PATCH 0184/1789] New translations unit-testing-framework.mdx (Romanian) --- .../creating/unit-testing-framework.mdx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/website/src/pages/ro/subgraphs/developing/creating/unit-testing-framework.mdx b/website/src/pages/ro/subgraphs/developing/creating/unit-testing-framework.mdx index 2133c1d4b5c9..fdfd2116b563 100644 --- a/website/src/pages/ro/subgraphs/developing/creating/unit-testing-framework.mdx +++ b/website/src/pages/ro/subgraphs/developing/creating/unit-testing-framework.mdx @@ -2,12 +2,12 @@ title: Unit Testing Framework --- -Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their subgraphs. +Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables Subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their Subgraphs. ## Benefits of Using Matchstick - It's written in Rust and optimized for high performance. -- It gives you access to developer features, including the ability to mock contract calls, make assertions about the store state, monitor subgraph failures, check test performance, and many more. +- It gives you access to developer features, including the ability to mock contract calls, make assertions about the store state, monitor Subgraph failures, check test performance, and many more. ## Getting Started @@ -87,7 +87,7 @@ And finally, do not use `graph test` (which uses your global installation of gra ### Using Matchstick -To use **Matchstick** in your subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). +To use **Matchstick** in your Subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). ### CLI options @@ -113,7 +113,7 @@ graph test path/to/file.test.ts ```sh -c, --coverage Run the tests in coverage mode --d, --docker Run the tests in a docker container (Note: Please execute from the root folder of the subgraph) +-d, --docker Run the tests in a docker container (Note: Please execute from the root folder of the Subgraph) -f, --force Binary: Redownloads the binary. Docker: Redownloads the Dockerfile and rebuilds the docker image. -h, --help Show usage information -l, --logs Logs to the console information about the OS, CPU model and download url (debugging purposes) @@ -145,13 +145,13 @@ libsFolder: path/to/libs manifestPath: path/to/subgraph.yaml ``` -### Demo subgraph +### Demo Subgraph You can try out and play around with the examples from this guide by cloning the [Demo Subgraph repo](https://github.com/LimeChain/demo-subgraph) ### Video tutorials -Also you can check out the video series on ["How to use Matchstick to write unit tests for your subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) +Also you can check out the video series on ["How to use Matchstick to write unit tests for your Subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) ## Tests structure @@ -662,7 +662,7 @@ That's a lot to unpack! First off, an important thing to notice is that we're im There we go - we've created our first test! 👏 -Now in order to run our tests you simply need to run the following in your subgraph root folder: +Now in order to run our tests you simply need to run the following in your Subgraph root folder: `graph test Gravity` @@ -1289,7 +1289,7 @@ test('file/ipfs dataSource creation example', () => { ## Test Coverage -Using **Matchstick**, subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. +Using **Matchstick**, Subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. The test coverage tool takes the compiled test `wasm` binaries and converts them to `wat` files, which can then be easily inspected to see whether or not the handlers defined in `subgraph.yaml` have been called. Since code coverage (and testing as whole) is in very early stages in AssemblyScript and WebAssembly, **Matchstick** cannot check for branch coverage. Instead we rely on the assertion that if a given handler has been called, the event/function for it have been properly mocked. @@ -1395,7 +1395,7 @@ The mismatch in arguments is caused by mismatch in `graph-ts` and `matchstick-as ## Additional Resources -For any additional support, check out this [demo subgraph repo using Matchstick](https://github.com/LimeChain/demo-subgraph#readme_). +For any additional support, check out this [demo Subgraph repo using Matchstick](https://github.com/LimeChain/demo-subgraph#readme_). ## Feedback From 4dfd2d0e6bc636c751bccdaf9b3316e3078da553 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:11:23 -0500 Subject: [PATCH 0185/1789] New translations unit-testing-framework.mdx (French) --- .../creating/unit-testing-framework.mdx | 36 +++++++++---------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/website/src/pages/fr/subgraphs/developing/creating/unit-testing-framework.mdx b/website/src/pages/fr/subgraphs/developing/creating/unit-testing-framework.mdx index 4ba4ab8d4111..ba0e793cb96c 100644 --- a/website/src/pages/fr/subgraphs/developing/creating/unit-testing-framework.mdx +++ b/website/src/pages/fr/subgraphs/developing/creating/unit-testing-framework.mdx @@ -2,12 +2,12 @@ title: Cadre pour les tests unitaires --- -Apprenez à utiliser Matchstick, un framework de test unitaire développé par [LimeChain](https://limechain.tech/). Matchstick permet aux développeurs de subgraphs de tester leur logique de mappages dans un environnement sandbox et de déployer avec succès leurs subgraphs. +Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables Subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their Subgraphs. ## Avantages de l'utilisation de Matchstick - Il est écrit en Rust et optimisé pour des hautes performances. -- Il vous donne accès à des fonctionnalités pour développeurs, y compris la possibilité de simuler des appels de contrat, de faire des assertions sur l'état du store, de surveiller les échecs de subgraph, de vérifier les performances des tests, et bien plus encore. +- It gives you access to developer features, including the ability to mock contract calls, make assertions about the store state, monitor Subgraph failures, check test performance, and many more. ## Introduction @@ -87,7 +87,7 @@ Et enfin, n'utilisez pas `graph test` (qui utilise votre installation globale de ### En utilisant Matchstick -Pour utiliser **Matchstick** dans votre projet de ssubgraph, ouvrez un terminal, naviguez jusqu'au dossier racine de votre projet et exécutez simplement `graph test [options] ` - il télécharge le dernier binaire **Matchstick** et exécute le test spécifié ou tous les tests dans un dossier de test (ou tous les tests existants si aucun flag de source de données n'est spécifié). +To use **Matchstick** in your Subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). ### CLI options @@ -112,13 +112,13 @@ graph test path/to/file.test.ts **Options:** ```sh --c, --coverage Exécuter les tests en mode couverture --d, --docker Exécuter les tests dans un conteneur docker (Note : Veuillez exécuter à partir du dossier racine du subgraph) --f, --force Binaire : Retélécharge le binaire. Docker : Retélécharge le fichier Docker et reconstruit l'image Docker. --h, --help Affiche les informations d'utilisation --l, --logs Enregistre dans la console des informations sur le système d'exploitation, le modèle de processeur et l'URL de téléchargement (à des fins de débogage). --r, --recompile Force les tests à être recompilés --v, --version Choisissez la version du binaire rust que vous souhaitez télécharger/utiliser +-c, --coverage Run the tests in coverage mode +-d, --docker Run the tests in a docker container (Note: Please execute from the root folder of the Subgraph) +-f, --force Binary: Redownloads the binary. Docker: Redownloads the Dockerfile and rebuilds the docker image. +-h, --help Show usage information +-l, --logs Logs to the console information about the OS, CPU model and download url (debugging purposes) +-r, --recompile Forces tests to be recompiled +-v, --version Choose the version of the rust binary that you want to be downloaded/used ``` ### Docker @@ -145,13 +145,13 @@ libsFolder: path/to/libs manifestPath: path/to/subgraph.yaml ``` -### Subgraph démonstration +### Demo Subgraph Vous pouvez essayer et jouer avec les exemples de ce guide en clonant le [dépôt du Demo Subgraph.](https://github.com/LimeChain/demo-subgraph) ### Tutoriels vidéos -Vous pouvez également consulter la série de vidéos sur [" Comment utiliser Matchstick pour écrire des tests unitaires pour vos subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) +Also you can check out the video series on ["How to use Matchstick to write unit tests for your Subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) ## Structure des tests @@ -662,7 +662,7 @@ Cela fait beaucoup à décortiquer ! Tout d'abord, une chose importante à noter Et voilà, nous avons formulé notre premier test ! 👏 -Maintenant, afin d'exécuter nos tests, il suffit d'exécuter ce qui suit dans le dossier racine de votre subgraph : +Now in order to run our tests you simply need to run the following in your Subgraph root folder: `graph test Gravity` @@ -1164,7 +1164,7 @@ De même que pour les sources de données dynamiques de contrat, les utilisateur ##### Exemple `subgraph.yaml` ```yaml ---- +... templates: - kind: file/ipfs name: GraphTokenLockMetadata @@ -1216,8 +1216,8 @@ type TokenLockMetadata @entity { ##### Exemple de gestionnaire ```typescript -export function handleMetadata(content: Bytes): void { - // dataSource.stringParams() renvoie le CID du fichier de la source de données +export function handleMetadata(content : Bytes) : void { + // dataSource.stringParams() renvoie le CID du fichier de la source de données // stringParam() sera simulé dans le test du gestionnaire // pour plus d'informations https://thegraph.com/docs/en/developing/creating-a-subgraph/#create-a-new-handler-to-process-files let tokenMetadata = new TokenLockMetadata(dataSource.stringParam()) @@ -1289,7 +1289,7 @@ test('exemple de création d'une dataSource file/ipfs', () => { ## Couverture de test -En utilisant **Matchstick**, les développeurs de subgraphs peuvent exécuter un script qui calculera la couverture des tests unitaires écrits. +Using **Matchstick**, Subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. L'outil de couverture des tests prend les binaires de test compilés `wasm` et les convertit en fichiers `wat`, qui peuvent alors être facilement inspectés pour voir si les gestionnaires définis dans `subgraph.yaml` ont été appelés ou non. Comme la couverture du code (et les tests dans leur ensemble) n'en est qu'à ses débuts en AssemblyScript et WebAssembly, **Matchstick** ne peut pas vérifier la couverture des branches. Au lieu de cela, nous nous appuyons sur l'affirmation que si un gestionnaire donné a été appelé, l'événement/la fonction correspondant(e) a été correctement simulé(e). @@ -1395,7 +1395,7 @@ La non-concordance des arguments est causée par la non-concordance de `graph-ts ## Ressources supplémentaires -Pour toute aide supplémentaire, consultez cette [démo de subgraph utilisant Matchstick](https://github.com/LimeChain/demo-subgraph#readme_). +For any additional support, check out this [demo Subgraph repo using Matchstick](https://github.com/LimeChain/demo-subgraph#readme_). ## Réaction From f25db911703cde4611fd002bd977784edff42efc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:11:24 -0500 Subject: [PATCH 0186/1789] New translations unit-testing-framework.mdx (Spanish) --- .../creating/unit-testing-framework.mdx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/website/src/pages/es/subgraphs/developing/creating/unit-testing-framework.mdx b/website/src/pages/es/subgraphs/developing/creating/unit-testing-framework.mdx index a9ab2a9ef384..51ec256da29d 100644 --- a/website/src/pages/es/subgraphs/developing/creating/unit-testing-framework.mdx +++ b/website/src/pages/es/subgraphs/developing/creating/unit-testing-framework.mdx @@ -2,12 +2,12 @@ title: Marco de Unit Testing --- -Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their subgraphs. +Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables Subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their Subgraphs. ## Benefits of Using Matchstick - It's written in Rust and optimized for high performance. -- It gives you access to developer features, including the ability to mock contract calls, make assertions about the store state, monitor subgraph failures, check test performance, and many more. +- It gives you access to developer features, including the ability to mock contract calls, make assertions about the store state, monitor Subgraph failures, check test performance, and many more. ## Empezando @@ -87,7 +87,7 @@ And finally, do not use `graph test` (which uses your global installation of gra ### Using Matchstick -To use **Matchstick** in your subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). +To use **Matchstick** in your Subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). ### Opciones CLI @@ -113,7 +113,7 @@ graph test path/to/file.test.ts ```sh -c, --coverage Run the tests in coverage mode --d, --docker Run the tests in a docker container (Note: Please execute from the root folder of the subgraph) +-d, --docker Run the tests in a docker container (Note: Please execute from the root folder of the Subgraph) -f, --force Binary: Redownloads the binary. Docker: Redownloads the Dockerfile and rebuilds the docker image. -h, --help Show usage information -l, --logs Logs to the console information about the OS, CPU model and download url (debugging purposes) @@ -145,13 +145,13 @@ libsFolder: path/to/libs manifestPath: path/to/subgraph.yaml ``` -### Subgrafo de demostración +### Demo Subgraph You can try out and play around with the examples from this guide by cloning the [Demo Subgraph repo](https://github.com/LimeChain/demo-subgraph) ### Tutoriales en vídeo -Also you can check out the video series on ["How to use Matchstick to write unit tests for your subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) +Also you can check out the video series on ["How to use Matchstick to write unit tests for your Subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) ## Tests structure @@ -662,7 +662,7 @@ That's a lot to unpack! First off, an important thing to notice is that we're im Ahí vamos: ¡hemos creado nuestra primera prueba! 👏 -Ahora, para ejecutar nuestras pruebas, simplemente necesitas ejecutar lo siguiente en la carpeta raíz de tu subgrafo: +Now in order to run our tests you simply need to run the following in your Subgraph root folder: `graph test Gravity` @@ -1289,7 +1289,7 @@ test('file/ipfs dataSource creation example', () => { ## Cobertura de prueba -Using **Matchstick**, subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. +Using **Matchstick**, Subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. The test coverage tool takes the compiled test `wasm` binaries and converts them to `wat` files, which can then be easily inspected to see whether or not the handlers defined in `subgraph.yaml` have been called. Since code coverage (and testing as whole) is in very early stages in AssemblyScript and WebAssembly, **Matchstick** cannot check for branch coverage. Instead we rely on the assertion that if a given handler has been called, the event/function for it have been properly mocked. @@ -1395,7 +1395,7 @@ The mismatch in arguments is caused by mismatch in `graph-ts` and `matchstick-as ## Recursos Adicionales -For any additional support, check out this [demo subgraph repo using Matchstick](https://github.com/LimeChain/demo-subgraph#readme_). +For any additional support, check out this [demo Subgraph repo using Matchstick](https://github.com/LimeChain/demo-subgraph#readme_). ## Comentario From 14a203495dd59cb519a1e2e0e6d164577efc2da8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:11:25 -0500 Subject: [PATCH 0187/1789] New translations unit-testing-framework.mdx (Arabic) --- .../creating/unit-testing-framework.mdx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/website/src/pages/ar/subgraphs/developing/creating/unit-testing-framework.mdx b/website/src/pages/ar/subgraphs/developing/creating/unit-testing-framework.mdx index e72d68bef7c8..73cd0aaf4222 100644 --- a/website/src/pages/ar/subgraphs/developing/creating/unit-testing-framework.mdx +++ b/website/src/pages/ar/subgraphs/developing/creating/unit-testing-framework.mdx @@ -2,12 +2,12 @@ title: اختبار وحدة Framework --- -Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their subgraphs. +Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables Subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their Subgraphs. ## Benefits of Using Matchstick - It's written in Rust and optimized for high performance. -- It gives you access to developer features, including the ability to mock contract calls, make assertions about the store state, monitor subgraph failures, check test performance, and many more. +- It gives you access to developer features, including the ability to mock contract calls, make assertions about the store state, monitor Subgraph failures, check test performance, and many more. ## Getting Started @@ -87,7 +87,7 @@ And finally, do not use `graph test` (which uses your global installation of gra ### Using Matchstick -To use **Matchstick** in your subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). +To use **Matchstick** in your Subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). ### CLI options @@ -113,7 +113,7 @@ graph test path/to/file.test.ts ```sh -c, --coverage Run the tests in coverage mode --d, --docker Run the tests in a docker container (Note: Please execute from the root folder of the subgraph) +-d, --docker Run the tests in a docker container (Note: Please execute from the root folder of the Subgraph) -f, --force Binary: Redownloads the binary. Docker: Redownloads the Dockerfile and rebuilds the docker image. -h, --help Show usage information -l, --logs Logs to the console information about the OS, CPU model and download url (debugging purposes) @@ -145,13 +145,13 @@ libsFolder: path/to/libs manifestPath: path/to/subgraph.yaml ``` -### Demo subgraph +### Demo Subgraph You can try out and play around with the examples from this guide by cloning the [Demo Subgraph repo](https://github.com/LimeChain/demo-subgraph) ### Video tutorials -Also you can check out the video series on ["How to use Matchstick to write unit tests for your subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) +Also you can check out the video series on ["How to use Matchstick to write unit tests for your Subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) ## Tests structure @@ -662,7 +662,7 @@ That's a lot to unpack! First off, an important thing to notice is that we're im There we go - we've created our first test! 👏 -Now in order to run our tests you simply need to run the following in your subgraph root folder: +Now in order to run our tests you simply need to run the following in your Subgraph root folder: `graph test Gravity` @@ -1289,7 +1289,7 @@ test('file/ipfs dataSource creation example', () => { ## Test Coverage -Using **Matchstick**, subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. +Using **Matchstick**, Subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. The test coverage tool takes the compiled test `wasm` binaries and converts them to `wat` files, which can then be easily inspected to see whether or not the handlers defined in `subgraph.yaml` have been called. Since code coverage (and testing as whole) is in very early stages in AssemblyScript and WebAssembly, **Matchstick** cannot check for branch coverage. Instead we rely on the assertion that if a given handler has been called, the event/function for it have been properly mocked. @@ -1395,7 +1395,7 @@ The mismatch in arguments is caused by mismatch in `graph-ts` and `matchstick-as ## مصادر إضافية -For any additional support, check out this [demo subgraph repo using Matchstick](https://github.com/LimeChain/demo-subgraph#readme_). +For any additional support, check out this [demo Subgraph repo using Matchstick](https://github.com/LimeChain/demo-subgraph#readme_). ## Feedback From 41700e6d23cce801846d63610be76eaab70223ae Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:11:26 -0500 Subject: [PATCH 0188/1789] New translations unit-testing-framework.mdx (Czech) --- .../creating/unit-testing-framework.mdx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/website/src/pages/cs/subgraphs/developing/creating/unit-testing-framework.mdx b/website/src/pages/cs/subgraphs/developing/creating/unit-testing-framework.mdx index fd0130dd672a..f970696fb636 100644 --- a/website/src/pages/cs/subgraphs/developing/creating/unit-testing-framework.mdx +++ b/website/src/pages/cs/subgraphs/developing/creating/unit-testing-framework.mdx @@ -2,12 +2,12 @@ title: Rámec pro testování jednotek --- -Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their subgraphs. +Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables Subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their Subgraphs. ## Benefits of Using Matchstick - It's written in Rust and optimized for high performance. -- It gives you access to developer features, including the ability to mock contract calls, make assertions about the store state, monitor subgraph failures, check test performance, and many more. +- It gives you access to developer features, including the ability to mock contract calls, make assertions about the store state, monitor Subgraph failures, check test performance, and many more. ## Začínáme @@ -87,7 +87,7 @@ And finally, do not use `graph test` (which uses your global installation of gra ### Using Matchstick -To use **Matchstick** in your subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). +To use **Matchstick** in your Subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). ### Možnosti CLI @@ -113,7 +113,7 @@ graph test path/to/file.test.ts ```sh -c, --coverage Run the tests in coverage mode --d, --docker Run the tests in a docker container (Note: Please execute from the root folder of the subgraph) +-d, --docker Run the tests in a docker container (Note: Please execute from the root folder of the Subgraph) -f, --force Binary: Redownloads the binary. Docker: Redownloads the Dockerfile and rebuilds the docker image. -h, --help Show usage information -l, --logs Logs to the console information about the OS, CPU model and download url (debugging purposes) @@ -145,13 +145,13 @@ libsFolder: path/to/libs manifestPath: path/to/subgraph.yaml ``` -### Ukázkový podgraf +### Demo Subgraph You can try out and play around with the examples from this guide by cloning the [Demo Subgraph repo](https://github.com/LimeChain/demo-subgraph) ### Videonávody -Also you can check out the video series on ["How to use Matchstick to write unit tests for your subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) +Also you can check out the video series on ["How to use Matchstick to write unit tests for your Subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) ## Tests structure @@ -662,7 +662,7 @@ That's a lot to unpack! First off, an important thing to notice is that we're im A je to tady - vytvořili jsme první test! 👏 -Pro spuštění našich testů nyní stačí v kořenové složce podgrafu spustit následující příkaz: +Now in order to run our tests you simply need to run the following in your Subgraph root folder: `graph test Gravity` @@ -1289,7 +1289,7 @@ test('file/ipfs dataSource creation example', () => { ## Pokrytí test -Using **Matchstick**, subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. +Using **Matchstick**, Subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. The test coverage tool takes the compiled test `wasm` binaries and converts them to `wat` files, which can then be easily inspected to see whether or not the handlers defined in `subgraph.yaml` have been called. Since code coverage (and testing as whole) is in very early stages in AssemblyScript and WebAssembly, **Matchstick** cannot check for branch coverage. Instead we rely on the assertion that if a given handler has been called, the event/function for it have been properly mocked. @@ -1395,7 +1395,7 @@ The mismatch in arguments is caused by mismatch in `graph-ts` and `matchstick-as ## Další zdroje -For any additional support, check out this [demo subgraph repo using Matchstick](https://github.com/LimeChain/demo-subgraph#readme_). +For any additional support, check out this [demo Subgraph repo using Matchstick](https://github.com/LimeChain/demo-subgraph#readme_). ## Zpětná vazba From b2efb9ba90d233c4ba3eeea3886450ceabf12101 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:11:27 -0500 Subject: [PATCH 0189/1789] New translations unit-testing-framework.mdx (German) --- .../creating/unit-testing-framework.mdx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/website/src/pages/de/subgraphs/developing/creating/unit-testing-framework.mdx b/website/src/pages/de/subgraphs/developing/creating/unit-testing-framework.mdx index 52f7cc2134b8..2cafbea426d4 100644 --- a/website/src/pages/de/subgraphs/developing/creating/unit-testing-framework.mdx +++ b/website/src/pages/de/subgraphs/developing/creating/unit-testing-framework.mdx @@ -2,12 +2,12 @@ title: Rahmen für Einheitstests --- -Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their subgraphs. +Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables Subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their Subgraphs. ## Benefits of Using Matchstick - It's written in Rust and optimized for high performance. -- It gives you access to developer features, including the ability to mock contract calls, make assertions about the store state, monitor subgraph failures, check test performance, and many more. +- It gives you access to developer features, including the ability to mock contract calls, make assertions about the store state, monitor Subgraph failures, check test performance, and many more. ## Erste Schritte @@ -87,7 +87,7 @@ And finally, do not use `graph test` (which uses your global installation of gra ### Using Matchstick -To use **Matchstick** in your subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). +To use **Matchstick** in your Subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). ### CLI-Optionen @@ -113,7 +113,7 @@ graph test path/to/file.test.ts ```sh -c, --coverage Run the tests in coverage mode --d, --docker Run the tests in a docker container (Note: Please execute from the root folder of the subgraph) +-d, --docker Run the tests in a docker container (Note: Please execute from the root folder of the Subgraph) -f, --force Binary: Redownloads the binary. Docker: Redownloads the Dockerfile and rebuilds the docker image. -h, --help Show usage information -l, --logs Logs to the console information about the OS, CPU model and download url (debugging purposes) @@ -145,13 +145,13 @@ libsFolder: path/to/libs manifestPath: path/to/subgraph.yaml ``` -### Demo-Subgraph +### Demo Subgraph You can try out and play around with the examples from this guide by cloning the [Demo Subgraph repo](https://github.com/LimeChain/demo-subgraph) ### Video-Tutorials -Also you can check out the video series on ["How to use Matchstick to write unit tests for your subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) +Also you can check out the video series on ["How to use Matchstick to write unit tests for your Subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) ## Struktur der Tests @@ -662,7 +662,7 @@ That's a lot to unpack! First off, an important thing to notice is that we're im There we go - we've created our first test! 👏 -Now in order to run our tests you simply need to run the following in your subgraph root folder: +Now in order to run our tests you simply need to run the following in your Subgraph root folder: `graph test Gravity` @@ -1289,7 +1289,7 @@ test('file/ipfs dataSource creation example', () => { ## Test Coverage -Using **Matchstick**, subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. +Using **Matchstick**, Subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. The test coverage tool takes the compiled test `wasm` binaries and converts them to `wat` files, which can then be easily inspected to see whether or not the handlers defined in `subgraph.yaml` have been called. Since code coverage (and testing as whole) is in very early stages in AssemblyScript and WebAssembly, **Matchstick** cannot check for branch coverage. Instead we rely on the assertion that if a given handler has been called, the event/function for it have been properly mocked. @@ -1395,7 +1395,7 @@ The mismatch in arguments is caused by mismatch in `graph-ts` and `matchstick-as ## Zusätzliche Ressourcen -For any additional support, check out this [demo subgraph repo using Matchstick](https://github.com/LimeChain/demo-subgraph#readme_). +For any additional support, check out this [demo Subgraph repo using Matchstick](https://github.com/LimeChain/demo-subgraph#readme_). ## Feedback From c6646990372be2ce2557e4897dd274e577c403e5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:11:29 -0500 Subject: [PATCH 0190/1789] New translations unit-testing-framework.mdx (Italian) --- .../creating/unit-testing-framework.mdx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/website/src/pages/it/subgraphs/developing/creating/unit-testing-framework.mdx b/website/src/pages/it/subgraphs/developing/creating/unit-testing-framework.mdx index 77496e8eb092..541206638ad0 100644 --- a/website/src/pages/it/subgraphs/developing/creating/unit-testing-framework.mdx +++ b/website/src/pages/it/subgraphs/developing/creating/unit-testing-framework.mdx @@ -2,12 +2,12 @@ title: Unit Testing Framework --- -Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their subgraphs. +Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables Subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their Subgraphs. ## Benefits of Using Matchstick - It's written in Rust and optimized for high performance. -- It gives you access to developer features, including the ability to mock contract calls, make assertions about the store state, monitor subgraph failures, check test performance, and many more. +- It gives you access to developer features, including the ability to mock contract calls, make assertions about the store state, monitor Subgraph failures, check test performance, and many more. ## Per cominciare @@ -87,7 +87,7 @@ And finally, do not use `graph test` (which uses your global installation of gra ### Using Matchstick -To use **Matchstick** in your subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). +To use **Matchstick** in your Subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). ### CLI options @@ -113,7 +113,7 @@ graph test path/to/file.test.ts ```sh -c, --coverage Run the tests in coverage mode --d, --docker Run the tests in a docker container (Note: Please execute from the root folder of the subgraph) +-d, --docker Run the tests in a docker container (Note: Please execute from the root folder of the Subgraph) -f, --force Binary: Redownloads the binary. Docker: Redownloads the Dockerfile and rebuilds the docker image. -h, --help Show usage information -l, --logs Logs to the console information about the OS, CPU model and download url (debugging purposes) @@ -145,13 +145,13 @@ libsFolder: path/to/libs manifestPath: path/to/subgraph.yaml ``` -### Demo subgraph +### Demo Subgraph You can try out and play around with the examples from this guide by cloning the [Demo Subgraph repo](https://github.com/LimeChain/demo-subgraph) ### Video tutorials -Also you can check out the video series on ["How to use Matchstick to write unit tests for your subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) +Also you can check out the video series on ["How to use Matchstick to write unit tests for your Subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) ## Tests structure @@ -662,7 +662,7 @@ That's a lot to unpack! First off, an important thing to notice is that we're im There we go - we've created our first test! 👏 -Now in order to run our tests you simply need to run the following in your subgraph root folder: +Now in order to run our tests you simply need to run the following in your Subgraph root folder: `graph test Gravity` @@ -1289,7 +1289,7 @@ test('file/ipfs dataSource creation example', () => { ## Test Coverage -Using **Matchstick**, subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. +Using **Matchstick**, Subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. The test coverage tool takes the compiled test `wasm` binaries and converts them to `wat` files, which can then be easily inspected to see whether or not the handlers defined in `subgraph.yaml` have been called. Since code coverage (and testing as whole) is in very early stages in AssemblyScript and WebAssembly, **Matchstick** cannot check for branch coverage. Instead we rely on the assertion that if a given handler has been called, the event/function for it have been properly mocked. @@ -1395,7 +1395,7 @@ The mismatch in arguments is caused by mismatch in `graph-ts` and `matchstick-as ## Additional Resources -For any additional support, check out this [demo subgraph repo using Matchstick](https://github.com/LimeChain/demo-subgraph#readme_). +For any additional support, check out this [demo Subgraph repo using Matchstick](https://github.com/LimeChain/demo-subgraph#readme_). ## Feedback From 0694e52acc267b4967c9dc8ba504e407df0f6b04 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:11:30 -0500 Subject: [PATCH 0191/1789] New translations unit-testing-framework.mdx (Japanese) --- .../creating/unit-testing-framework.mdx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/website/src/pages/ja/subgraphs/developing/creating/unit-testing-framework.mdx b/website/src/pages/ja/subgraphs/developing/creating/unit-testing-framework.mdx index 5a089a93aa50..ef988c505ca7 100644 --- a/website/src/pages/ja/subgraphs/developing/creating/unit-testing-framework.mdx +++ b/website/src/pages/ja/subgraphs/developing/creating/unit-testing-framework.mdx @@ -2,12 +2,12 @@ title: ユニットテストフレームワーク --- -Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their subgraphs. +Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables Subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their Subgraphs. ## Benefits of Using Matchstick - It's written in Rust and optimized for high performance. -- It gives you access to developer features, including the ability to mock contract calls, make assertions about the store state, monitor subgraph failures, check test performance, and many more. +- It gives you access to developer features, including the ability to mock contract calls, make assertions about the store state, monitor Subgraph failures, check test performance, and many more. ## はじめに @@ -87,7 +87,7 @@ And finally, do not use `graph test` (which uses your global installation of gra ### Using Matchstick -To use **Matchstick** in your subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). +To use **Matchstick** in your Subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). ### CLI options @@ -113,7 +113,7 @@ graph test path/to/file.test.ts ```sh -c, --coverage Run the tests in coverage mode --d, --docker Run the tests in a docker container (Note: Please execute from the root folder of the subgraph) +-d, --docker Run the tests in a docker container (Note: Please execute from the root folder of the Subgraph) -f, --force Binary: Redownloads the binary. Docker: Redownloads the Dockerfile and rebuilds the docker image. -h, --help Show usage information -l, --logs Logs to the console information about the OS, CPU model and download url (debugging purposes) @@ -145,13 +145,13 @@ libsFolder: path/to/libs manifestPath: path/to/subgraph.yaml ``` -### デモ・サブグラフ +### Demo Subgraph You can try out and play around with the examples from this guide by cloning the [Demo Subgraph repo](https://github.com/LimeChain/demo-subgraph) ### ビデオチュートリアル -Also you can check out the video series on ["How to use Matchstick to write unit tests for your subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) +Also you can check out the video series on ["How to use Matchstick to write unit tests for your Subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) ## Tests structure @@ -662,7 +662,7 @@ That's a lot to unpack! First off, an important thing to notice is that we're im これで最初のテストが完成しました! 👏 -テストを実行するには、サブグラフのルートフォルダで以下を実行する必要があります: +Now in order to run our tests you simply need to run the following in your Subgraph root folder: `graph test Gravity` @@ -1289,7 +1289,7 @@ test('file/ipfs dataSource creation example', () => { ## テストカバレッジ -Using **Matchstick**, subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. +Using **Matchstick**, Subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. The test coverage tool takes the compiled test `wasm` binaries and converts them to `wat` files, which can then be easily inspected to see whether or not the handlers defined in `subgraph.yaml` have been called. Since code coverage (and testing as whole) is in very early stages in AssemblyScript and WebAssembly, **Matchstick** cannot check for branch coverage. Instead we rely on the assertion that if a given handler has been called, the event/function for it have been properly mocked. @@ -1395,7 +1395,7 @@ The mismatch in arguments is caused by mismatch in `graph-ts` and `matchstick-as ## その他のリソース -For any additional support, check out this [demo subgraph repo using Matchstick](https://github.com/LimeChain/demo-subgraph#readme_). +For any additional support, check out this [demo Subgraph repo using Matchstick](https://github.com/LimeChain/demo-subgraph#readme_). ## フィードバック From cd3f1fab518d4f21d1a0ca6ad52ddb541d5b6b84 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:11:31 -0500 Subject: [PATCH 0192/1789] New translations unit-testing-framework.mdx (Korean) --- .../creating/unit-testing-framework.mdx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/website/src/pages/ko/subgraphs/developing/creating/unit-testing-framework.mdx b/website/src/pages/ko/subgraphs/developing/creating/unit-testing-framework.mdx index 2133c1d4b5c9..fdfd2116b563 100644 --- a/website/src/pages/ko/subgraphs/developing/creating/unit-testing-framework.mdx +++ b/website/src/pages/ko/subgraphs/developing/creating/unit-testing-framework.mdx @@ -2,12 +2,12 @@ title: Unit Testing Framework --- -Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their subgraphs. +Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables Subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their Subgraphs. ## Benefits of Using Matchstick - It's written in Rust and optimized for high performance. -- It gives you access to developer features, including the ability to mock contract calls, make assertions about the store state, monitor subgraph failures, check test performance, and many more. +- It gives you access to developer features, including the ability to mock contract calls, make assertions about the store state, monitor Subgraph failures, check test performance, and many more. ## Getting Started @@ -87,7 +87,7 @@ And finally, do not use `graph test` (which uses your global installation of gra ### Using Matchstick -To use **Matchstick** in your subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). +To use **Matchstick** in your Subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). ### CLI options @@ -113,7 +113,7 @@ graph test path/to/file.test.ts ```sh -c, --coverage Run the tests in coverage mode --d, --docker Run the tests in a docker container (Note: Please execute from the root folder of the subgraph) +-d, --docker Run the tests in a docker container (Note: Please execute from the root folder of the Subgraph) -f, --force Binary: Redownloads the binary. Docker: Redownloads the Dockerfile and rebuilds the docker image. -h, --help Show usage information -l, --logs Logs to the console information about the OS, CPU model and download url (debugging purposes) @@ -145,13 +145,13 @@ libsFolder: path/to/libs manifestPath: path/to/subgraph.yaml ``` -### Demo subgraph +### Demo Subgraph You can try out and play around with the examples from this guide by cloning the [Demo Subgraph repo](https://github.com/LimeChain/demo-subgraph) ### Video tutorials -Also you can check out the video series on ["How to use Matchstick to write unit tests for your subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) +Also you can check out the video series on ["How to use Matchstick to write unit tests for your Subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) ## Tests structure @@ -662,7 +662,7 @@ That's a lot to unpack! First off, an important thing to notice is that we're im There we go - we've created our first test! 👏 -Now in order to run our tests you simply need to run the following in your subgraph root folder: +Now in order to run our tests you simply need to run the following in your Subgraph root folder: `graph test Gravity` @@ -1289,7 +1289,7 @@ test('file/ipfs dataSource creation example', () => { ## Test Coverage -Using **Matchstick**, subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. +Using **Matchstick**, Subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. The test coverage tool takes the compiled test `wasm` binaries and converts them to `wat` files, which can then be easily inspected to see whether or not the handlers defined in `subgraph.yaml` have been called. Since code coverage (and testing as whole) is in very early stages in AssemblyScript and WebAssembly, **Matchstick** cannot check for branch coverage. Instead we rely on the assertion that if a given handler has been called, the event/function for it have been properly mocked. @@ -1395,7 +1395,7 @@ The mismatch in arguments is caused by mismatch in `graph-ts` and `matchstick-as ## Additional Resources -For any additional support, check out this [demo subgraph repo using Matchstick](https://github.com/LimeChain/demo-subgraph#readme_). +For any additional support, check out this [demo Subgraph repo using Matchstick](https://github.com/LimeChain/demo-subgraph#readme_). ## Feedback From 122c195089400b44a12d63c8b9637b77b4a20b6f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:11:32 -0500 Subject: [PATCH 0193/1789] New translations unit-testing-framework.mdx (Dutch) --- .../creating/unit-testing-framework.mdx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/website/src/pages/nl/subgraphs/developing/creating/unit-testing-framework.mdx b/website/src/pages/nl/subgraphs/developing/creating/unit-testing-framework.mdx index 2133c1d4b5c9..fdfd2116b563 100644 --- a/website/src/pages/nl/subgraphs/developing/creating/unit-testing-framework.mdx +++ b/website/src/pages/nl/subgraphs/developing/creating/unit-testing-framework.mdx @@ -2,12 +2,12 @@ title: Unit Testing Framework --- -Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their subgraphs. +Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables Subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their Subgraphs. ## Benefits of Using Matchstick - It's written in Rust and optimized for high performance. -- It gives you access to developer features, including the ability to mock contract calls, make assertions about the store state, monitor subgraph failures, check test performance, and many more. +- It gives you access to developer features, including the ability to mock contract calls, make assertions about the store state, monitor Subgraph failures, check test performance, and many more. ## Getting Started @@ -87,7 +87,7 @@ And finally, do not use `graph test` (which uses your global installation of gra ### Using Matchstick -To use **Matchstick** in your subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). +To use **Matchstick** in your Subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). ### CLI options @@ -113,7 +113,7 @@ graph test path/to/file.test.ts ```sh -c, --coverage Run the tests in coverage mode --d, --docker Run the tests in a docker container (Note: Please execute from the root folder of the subgraph) +-d, --docker Run the tests in a docker container (Note: Please execute from the root folder of the Subgraph) -f, --force Binary: Redownloads the binary. Docker: Redownloads the Dockerfile and rebuilds the docker image. -h, --help Show usage information -l, --logs Logs to the console information about the OS, CPU model and download url (debugging purposes) @@ -145,13 +145,13 @@ libsFolder: path/to/libs manifestPath: path/to/subgraph.yaml ``` -### Demo subgraph +### Demo Subgraph You can try out and play around with the examples from this guide by cloning the [Demo Subgraph repo](https://github.com/LimeChain/demo-subgraph) ### Video tutorials -Also you can check out the video series on ["How to use Matchstick to write unit tests for your subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) +Also you can check out the video series on ["How to use Matchstick to write unit tests for your Subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) ## Tests structure @@ -662,7 +662,7 @@ That's a lot to unpack! First off, an important thing to notice is that we're im There we go - we've created our first test! 👏 -Now in order to run our tests you simply need to run the following in your subgraph root folder: +Now in order to run our tests you simply need to run the following in your Subgraph root folder: `graph test Gravity` @@ -1289,7 +1289,7 @@ test('file/ipfs dataSource creation example', () => { ## Test Coverage -Using **Matchstick**, subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. +Using **Matchstick**, Subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. The test coverage tool takes the compiled test `wasm` binaries and converts them to `wat` files, which can then be easily inspected to see whether or not the handlers defined in `subgraph.yaml` have been called. Since code coverage (and testing as whole) is in very early stages in AssemblyScript and WebAssembly, **Matchstick** cannot check for branch coverage. Instead we rely on the assertion that if a given handler has been called, the event/function for it have been properly mocked. @@ -1395,7 +1395,7 @@ The mismatch in arguments is caused by mismatch in `graph-ts` and `matchstick-as ## Additional Resources -For any additional support, check out this [demo subgraph repo using Matchstick](https://github.com/LimeChain/demo-subgraph#readme_). +For any additional support, check out this [demo Subgraph repo using Matchstick](https://github.com/LimeChain/demo-subgraph#readme_). ## Feedback From fe76e78544ee7b7681d8e3c05b2728821845a7bc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:11:33 -0500 Subject: [PATCH 0194/1789] New translations unit-testing-framework.mdx (Polish) --- .../creating/unit-testing-framework.mdx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/website/src/pages/pl/subgraphs/developing/creating/unit-testing-framework.mdx b/website/src/pages/pl/subgraphs/developing/creating/unit-testing-framework.mdx index 2133c1d4b5c9..fdfd2116b563 100644 --- a/website/src/pages/pl/subgraphs/developing/creating/unit-testing-framework.mdx +++ b/website/src/pages/pl/subgraphs/developing/creating/unit-testing-framework.mdx @@ -2,12 +2,12 @@ title: Unit Testing Framework --- -Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their subgraphs. +Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables Subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their Subgraphs. ## Benefits of Using Matchstick - It's written in Rust and optimized for high performance. -- It gives you access to developer features, including the ability to mock contract calls, make assertions about the store state, monitor subgraph failures, check test performance, and many more. +- It gives you access to developer features, including the ability to mock contract calls, make assertions about the store state, monitor Subgraph failures, check test performance, and many more. ## Getting Started @@ -87,7 +87,7 @@ And finally, do not use `graph test` (which uses your global installation of gra ### Using Matchstick -To use **Matchstick** in your subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). +To use **Matchstick** in your Subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). ### CLI options @@ -113,7 +113,7 @@ graph test path/to/file.test.ts ```sh -c, --coverage Run the tests in coverage mode --d, --docker Run the tests in a docker container (Note: Please execute from the root folder of the subgraph) +-d, --docker Run the tests in a docker container (Note: Please execute from the root folder of the Subgraph) -f, --force Binary: Redownloads the binary. Docker: Redownloads the Dockerfile and rebuilds the docker image. -h, --help Show usage information -l, --logs Logs to the console information about the OS, CPU model and download url (debugging purposes) @@ -145,13 +145,13 @@ libsFolder: path/to/libs manifestPath: path/to/subgraph.yaml ``` -### Demo subgraph +### Demo Subgraph You can try out and play around with the examples from this guide by cloning the [Demo Subgraph repo](https://github.com/LimeChain/demo-subgraph) ### Video tutorials -Also you can check out the video series on ["How to use Matchstick to write unit tests for your subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) +Also you can check out the video series on ["How to use Matchstick to write unit tests for your Subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) ## Tests structure @@ -662,7 +662,7 @@ That's a lot to unpack! First off, an important thing to notice is that we're im There we go - we've created our first test! 👏 -Now in order to run our tests you simply need to run the following in your subgraph root folder: +Now in order to run our tests you simply need to run the following in your Subgraph root folder: `graph test Gravity` @@ -1289,7 +1289,7 @@ test('file/ipfs dataSource creation example', () => { ## Test Coverage -Using **Matchstick**, subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. +Using **Matchstick**, Subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. The test coverage tool takes the compiled test `wasm` binaries and converts them to `wat` files, which can then be easily inspected to see whether or not the handlers defined in `subgraph.yaml` have been called. Since code coverage (and testing as whole) is in very early stages in AssemblyScript and WebAssembly, **Matchstick** cannot check for branch coverage. Instead we rely on the assertion that if a given handler has been called, the event/function for it have been properly mocked. @@ -1395,7 +1395,7 @@ The mismatch in arguments is caused by mismatch in `graph-ts` and `matchstick-as ## Additional Resources -For any additional support, check out this [demo subgraph repo using Matchstick](https://github.com/LimeChain/demo-subgraph#readme_). +For any additional support, check out this [demo Subgraph repo using Matchstick](https://github.com/LimeChain/demo-subgraph#readme_). ## Feedback From 41f485f523169214b05af9688dcb5d921ba572a2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:11:34 -0500 Subject: [PATCH 0195/1789] New translations unit-testing-framework.mdx (Portuguese) --- .../creating/unit-testing-framework.mdx | 34 +++++++------------ 1 file changed, 12 insertions(+), 22 deletions(-) diff --git a/website/src/pages/pt/subgraphs/developing/creating/unit-testing-framework.mdx b/website/src/pages/pt/subgraphs/developing/creating/unit-testing-framework.mdx index 0b92f77c0f4f..efdeed225ffb 100644 --- a/website/src/pages/pt/subgraphs/developing/creating/unit-testing-framework.mdx +++ b/website/src/pages/pt/subgraphs/developing/creating/unit-testing-framework.mdx @@ -2,12 +2,12 @@ title: Estrutura de Testes de Unidades --- -Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their subgraphs. +Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables Subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their Subgraphs. ## Benefits of Using Matchstick - It's written in Rust and optimized for high performance. -- It gives you access to developer features, including the ability to mock contract calls, make assertions about the store state, monitor subgraph failures, check test performance, and many more. +- It gives you access to developer features, including the ability to mock contract calls, make assertions about the store state, monitor Subgraph failures, check test performance, and many more. ## Como Começar @@ -87,7 +87,7 @@ And finally, do not use `graph test` (which uses your global installation of gra ### Using Matchstick -To use **Matchstick** in your subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). +To use **Matchstick** in your Subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). ### Opções de CLI @@ -113,7 +113,7 @@ graph test path/to/file.test.ts ```sh -c, --coverage Run the tests in coverage mode --d, --docker Run the tests in a docker container (Note: Please execute from the root folder of the subgraph) +-d, --docker Run the tests in a docker container (Note: Please execute from the root folder of the Subgraph) -f, --force Binary: Redownloads the binary. Docker: Redownloads the Dockerfile and rebuilds the docker image. -h, --help Show usage information -l, --logs Logs to the console information about the OS, CPU model and download url (debugging purposes) @@ -145,13 +145,13 @@ libsFolder: path/to/libs manifestPath: path/to/subgraph.yaml ``` -### Subgraph de demonstração +### Demo Subgraph You can try out and play around with the examples from this guide by cloning the [Demo Subgraph repo](https://github.com/LimeChain/demo-subgraph) ### Tutoriais de vídeo -Also you can check out the video series on ["How to use Matchstick to write unit tests for your subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) +Also you can check out the video series on ["How to use Matchstick to write unit tests for your Subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) ## Estrutura de testes @@ -662,7 +662,7 @@ That's a lot to unpack! First off, an important thing to notice is that we're im Prontinho — criamos o nosso primeiro teste! 👏 -Para executar os nossos testes, basta apenas executar o seguinte na pasta raiz do seu subgraph: +Now in order to run our tests you simply need to run the following in your Subgraph root folder: `graph test Gravity` @@ -1289,7 +1289,7 @@ test('file/ipfs dataSource creation example', () => { ## Cobertura de Testes -Using **Matchstick**, subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. +Using **Matchstick**, Subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. The test coverage tool takes the compiled test `wasm` binaries and converts them to `wat` files, which can then be easily inspected to see whether or not the handlers defined in `subgraph.yaml` have been called. Since code coverage (and testing as whole) is in very early stages in AssemblyScript and WebAssembly, **Matchstick** cannot check for branch coverage. Instead we rely on the assertion that if a given handler has been called, the event/function for it have been properly mocked. @@ -1375,29 +1375,19 @@ A saída do log inclui a duração do teste. Veja um exemplo: ## Erros comuns do compilador -> -> Critical: Could not create WasmInstance from valid module with context: unknown import: -> wasi_snapshot_preview1::fd_write has not been defined -> +> Critical: Could not create WasmInstance from valid module with context: unknown import: wasi_snapshot_preview1::fd_write has not been defined This means you have used `console.log` in your code, which is not supported by AssemblyScript. Please consider using the [Logging API](/subgraphs/developing/creating/graph-ts/api/#logging-api) > ERROR TS2554: Expected ? arguments, but got ?. > -> -> return new ethereum.Block(defaultAddressBytes, defaultAddressBytes, defaultAddressBytes, defaultAddress, -> defaultAddressBytes, defaultAddressBytes, defaultAddressBytes, defaultBigInt, defaultBigInt, defaultBigInt, -> defaultBigInt, defaultBigInt, defaultBigInt, defaultBigInt, defaultBigInt); -> +> return new ethereum.Block(defaultAddressBytes, defaultAddressBytes, defaultAddressBytes, defaultAddress, defaultAddressBytes, defaultAddressBytes, defaultAddressBytes, defaultBigInt, defaultBigInt, defaultBigInt, defaultBigInt, defaultBigInt, defaultBigInt, defaultBigInt, defaultBigInt); > > in ~lib/matchstick-as/assembly/defaults.ts(18,12) > > ERROR TS2554: Expected ? arguments, but got ?. > -> -> return new ethereum.Transaction(defaultAddressBytes, defaultBigInt, defaultAddress, defaultAddress, defaultBigInt, -> defaultBigInt, defaultBigInt, defaultAddressBytes, defaultBigInt); -> +> return new ethereum.Transaction(defaultAddressBytes, defaultBigInt, defaultAddress, defaultAddress, defaultBigInt, defaultBigInt, defaultBigInt, defaultAddressBytes, defaultBigInt); > > in ~lib/matchstick-as/assembly/defaults.ts(24,12) @@ -1405,7 +1395,7 @@ The mismatch in arguments is caused by mismatch in `graph-ts` and `matchstick-as ## Outros Recursos -For any additional support, check out this [demo subgraph repo using Matchstick](https://github.com/LimeChain/demo-subgraph#readme_). +For any additional support, check out this [demo Subgraph repo using Matchstick](https://github.com/LimeChain/demo-subgraph#readme_). ## Feedback From 55d4c0c72172c20dae7b8c484ceb7bc0f6f54e26 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:11:36 -0500 Subject: [PATCH 0196/1789] New translations unit-testing-framework.mdx (Russian) --- .../creating/unit-testing-framework.mdx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/website/src/pages/ru/subgraphs/developing/creating/unit-testing-framework.mdx b/website/src/pages/ru/subgraphs/developing/creating/unit-testing-framework.mdx index a747fd939efb..bb73475156b1 100644 --- a/website/src/pages/ru/subgraphs/developing/creating/unit-testing-framework.mdx +++ b/website/src/pages/ru/subgraphs/developing/creating/unit-testing-framework.mdx @@ -2,12 +2,12 @@ title: Фреймворк модульного тестирования --- -Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their subgraphs. +Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables Subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their Subgraphs. ## Benefits of Using Matchstick - It's written in Rust and optimized for high performance. -- It gives you access to developer features, including the ability to mock contract calls, make assertions about the store state, monitor subgraph failures, check test performance, and many more. +- It gives you access to developer features, including the ability to mock contract calls, make assertions about the store state, monitor Subgraph failures, check test performance, and many more. ## Начало работы @@ -87,7 +87,7 @@ And finally, do not use `graph test` (which uses your global installation of gra ### Using Matchstick -To use **Matchstick** in your subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). +To use **Matchstick** in your Subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). ### Параметры CLI @@ -113,7 +113,7 @@ graph test path/to/file.test.ts ```sh -c, --coverage Run the tests in coverage mode --d, --docker Run the tests in a docker container (Note: Please execute from the root folder of the subgraph) +-d, --docker Run the tests in a docker container (Note: Please execute from the root folder of the Subgraph) -f, --force Binary: Redownloads the binary. Docker: Redownloads the Dockerfile and rebuilds the docker image. -h, --help Show usage information -l, --logs Logs to the console information about the OS, CPU model and download url (debugging purposes) @@ -145,13 +145,13 @@ libsFolder: path/to/libs manifestPath: path/to/subgraph.yaml ``` -### Демонстрационный субграф +### Demo Subgraph You can try out and play around with the examples from this guide by cloning the [Demo Subgraph repo](https://github.com/LimeChain/demo-subgraph) ### Видеоуроки -Also you can check out the video series on ["How to use Matchstick to write unit tests for your subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) +Also you can check out the video series on ["How to use Matchstick to write unit tests for your Subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) ## Структура тестов @@ -662,7 +662,7 @@ That's a lot to unpack! First off, an important thing to notice is that we're im Вот и все - мы создали наш первый тест! 👏 -Теперь, чтобы запустить наши тесты, Вам просто нужно запустить в корневой папке своего субграфа следующее: +Now in order to run our tests you simply need to run the following in your Subgraph root folder: `graph test Gravity` @@ -1289,7 +1289,7 @@ test('file/ipfs dataSource creation example', () => { ## Тестовое покрытие -Using **Matchstick**, subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. +Using **Matchstick**, Subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. The test coverage tool takes the compiled test `wasm` binaries and converts them to `wat` files, which can then be easily inspected to see whether or not the handlers defined in `subgraph.yaml` have been called. Since code coverage (and testing as whole) is in very early stages in AssemblyScript and WebAssembly, **Matchstick** cannot check for branch coverage. Instead we rely on the assertion that if a given handler has been called, the event/function for it have been properly mocked. @@ -1395,7 +1395,7 @@ The mismatch in arguments is caused by mismatch in `graph-ts` and `matchstick-as ## Дополнительные ресурсы -For any additional support, check out this [demo subgraph repo using Matchstick](https://github.com/LimeChain/demo-subgraph#readme_). +For any additional support, check out this [demo Subgraph repo using Matchstick](https://github.com/LimeChain/demo-subgraph#readme_). ## Обратная связь From 6e8231e5461495b9d29c1c2be64197ddacfddb12 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:11:37 -0500 Subject: [PATCH 0197/1789] New translations unit-testing-framework.mdx (Swedish) --- .../creating/unit-testing-framework.mdx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/website/src/pages/sv/subgraphs/developing/creating/unit-testing-framework.mdx b/website/src/pages/sv/subgraphs/developing/creating/unit-testing-framework.mdx index 49aea6a7f4da..436c62716d9f 100644 --- a/website/src/pages/sv/subgraphs/developing/creating/unit-testing-framework.mdx +++ b/website/src/pages/sv/subgraphs/developing/creating/unit-testing-framework.mdx @@ -2,12 +2,12 @@ title: Enhetsprovningsramverk --- -Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their subgraphs. +Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables Subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their Subgraphs. ## Benefits of Using Matchstick - It's written in Rust and optimized for high performance. -- It gives you access to developer features, including the ability to mock contract calls, make assertions about the store state, monitor subgraph failures, check test performance, and many more. +- It gives you access to developer features, including the ability to mock contract calls, make assertions about the store state, monitor Subgraph failures, check test performance, and many more. ## Komma igång @@ -87,7 +87,7 @@ And finally, do not use `graph test` (which uses your global installation of gra ### Using Matchstick -To use **Matchstick** in your subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). +To use **Matchstick** in your Subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). ### CLI alternativ @@ -113,7 +113,7 @@ graph test path/to/file.test.ts ```sh -c, --coverage Run the tests in coverage mode --d, --docker Run the tests in a docker container (Note: Please execute from the root folder of the subgraph) +-d, --docker Run the tests in a docker container (Note: Please execute from the root folder of the Subgraph) -f, --force Binary: Redownloads the binary. Docker: Redownloads the Dockerfile and rebuilds the docker image. -h, --help Show usage information -l, --logs Logs to the console information about the OS, CPU model and download url (debugging purposes) @@ -145,13 +145,13 @@ libsFolder: path/to/libs manifestPath: path/to/subgraph.yaml ``` -### Demo undergraf +### Demo Subgraph You can try out and play around with the examples from this guide by cloning the [Demo Subgraph repo](https://github.com/LimeChain/demo-subgraph) ### Handledning för video -Also you can check out the video series on ["How to use Matchstick to write unit tests for your subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) +Also you can check out the video series on ["How to use Matchstick to write unit tests for your Subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) ## Tests structure @@ -662,7 +662,7 @@ That's a lot to unpack! First off, an important thing to notice is that we're im Så där har vi skapat vårt första test! 👏 -För att köra våra tester behöver du helt enkelt köra följande i din subgrafs rotmapp: +Now in order to run our tests you simply need to run the following in your Subgraph root folder: `graph test Gravity` @@ -1289,7 +1289,7 @@ test('file/ipfs dataSource creation example', () => { ## Testtäckning -Using **Matchstick**, subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. +Using **Matchstick**, Subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. The test coverage tool takes the compiled test `wasm` binaries and converts them to `wat` files, which can then be easily inspected to see whether or not the handlers defined in `subgraph.yaml` have been called. Since code coverage (and testing as whole) is in very early stages in AssemblyScript and WebAssembly, **Matchstick** cannot check for branch coverage. Instead we rely on the assertion that if a given handler has been called, the event/function for it have been properly mocked. @@ -1395,7 +1395,7 @@ The mismatch in arguments is caused by mismatch in `graph-ts` and `matchstick-as ## Ytterligare resurser -For any additional support, check out this [demo subgraph repo using Matchstick](https://github.com/LimeChain/demo-subgraph#readme_). +For any additional support, check out this [demo Subgraph repo using Matchstick](https://github.com/LimeChain/demo-subgraph#readme_). ## Respons From 6c0ebf343b5dd6c6ee25272b00de16331dc62bfa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:11:38 -0500 Subject: [PATCH 0198/1789] New translations unit-testing-framework.mdx (Turkish) --- .../creating/unit-testing-framework.mdx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/website/src/pages/tr/subgraphs/developing/creating/unit-testing-framework.mdx b/website/src/pages/tr/subgraphs/developing/creating/unit-testing-framework.mdx index fe203de9b520..05a52f58790b 100644 --- a/website/src/pages/tr/subgraphs/developing/creating/unit-testing-framework.mdx +++ b/website/src/pages/tr/subgraphs/developing/creating/unit-testing-framework.mdx @@ -2,12 +2,12 @@ title: Birim Testi Framework'ü --- -Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their subgraphs. +Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables Subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their Subgraphs. ## Benefits of Using Matchstick - It's written in Rust and optimized for high performance. -- It gives you access to developer features, including the ability to mock contract calls, make assertions about the store state, monitor subgraph failures, check test performance, and many more. +- It gives you access to developer features, including the ability to mock contract calls, make assertions about the store state, monitor Subgraph failures, check test performance, and many more. ## Buradan Başlayın @@ -87,7 +87,7 @@ And finally, do not use `graph test` (which uses your global installation of gra ### Using Matchstick -To use **Matchstick** in your subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). +To use **Matchstick** in your Subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). ### CLI seçenekleri @@ -113,7 +113,7 @@ graph test path/to/file.test.ts ```sh -c, --coverage Run the tests in coverage mode --d, --docker Run the tests in a docker container (Note: Please execute from the root folder of the subgraph) +-d, --docker Run the tests in a docker container (Note: Please execute from the root folder of the Subgraph) -f, --force Binary: Redownloads the binary. Docker: Redownloads the Dockerfile and rebuilds the docker image. -h, --help Show usage information -l, --logs Logs to the console information about the OS, CPU model and download url (debugging purposes) @@ -145,13 +145,13 @@ libsFolder: path/to/libs manifestPath: path/to/subgraph.yaml ``` -### Demo subgraph +### Demo Subgraph You can try out and play around with the examples from this guide by cloning the [Demo Subgraph repo](https://github.com/LimeChain/demo-subgraph) ### Öğretici videolar -Also you can check out the video series on ["How to use Matchstick to write unit tests for your subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) +Also you can check out the video series on ["How to use Matchstick to write unit tests for your Subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) ## Test yapısı @@ -662,7 +662,7 @@ That's a lot to unpack! First off, an important thing to notice is that we're im İşte başardın - ilk testimizi oluşturduk! 👏 -Şimdi testlerimizi çalıştırmak için subgraph kök klasörünüzde şunu çalıştırmanız yeterlidir: +Now in order to run our tests you simply need to run the following in your Subgraph root folder: `graph test Gravity` @@ -1289,7 +1289,7 @@ test('file/ipfs dataSource creation example', () => { ## Test Kapsamı -Using **Matchstick**, subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. +Using **Matchstick**, Subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. The test coverage tool takes the compiled test `wasm` binaries and converts them to `wat` files, which can then be easily inspected to see whether or not the handlers defined in `subgraph.yaml` have been called. Since code coverage (and testing as whole) is in very early stages in AssemblyScript and WebAssembly, **Matchstick** cannot check for branch coverage. Instead we rely on the assertion that if a given handler has been called, the event/function for it have been properly mocked. @@ -1395,7 +1395,7 @@ The mismatch in arguments is caused by mismatch in `graph-ts` and `matchstick-as ## Ek Kaynaklar -For any additional support, check out this [demo subgraph repo using Matchstick](https://github.com/LimeChain/demo-subgraph#readme_). +For any additional support, check out this [demo Subgraph repo using Matchstick](https://github.com/LimeChain/demo-subgraph#readme_). ## Geribildirim From 7e585748ba14691abf823c8ec5b58fdc591f73ba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:11:39 -0500 Subject: [PATCH 0199/1789] New translations unit-testing-framework.mdx (Ukrainian) --- .../creating/unit-testing-framework.mdx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/website/src/pages/uk/subgraphs/developing/creating/unit-testing-framework.mdx b/website/src/pages/uk/subgraphs/developing/creating/unit-testing-framework.mdx index 78df2c601459..fc63d72aaf35 100644 --- a/website/src/pages/uk/subgraphs/developing/creating/unit-testing-framework.mdx +++ b/website/src/pages/uk/subgraphs/developing/creating/unit-testing-framework.mdx @@ -2,12 +2,12 @@ title: Unit Testing Framework --- -Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their subgraphs. +Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables Subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their Subgraphs. ## Benefits of Using Matchstick - It's written in Rust and optimized for high performance. -- It gives you access to developer features, including the ability to mock contract calls, make assertions about the store state, monitor subgraph failures, check test performance, and many more. +- It gives you access to developer features, including the ability to mock contract calls, make assertions about the store state, monitor Subgraph failures, check test performance, and many more. ## Getting Started @@ -87,7 +87,7 @@ And finally, do not use `graph test` (which uses your global installation of gra ### Using Matchstick -To use **Matchstick** in your subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). +To use **Matchstick** in your Subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). ### CLI options @@ -113,7 +113,7 @@ graph test path/to/file.test.ts ```sh -c, --coverage Run the tests in coverage mode --d, --docker Run the tests in a docker container (Note: Please execute from the root folder of the subgraph) +-d, --docker Run the tests in a docker container (Note: Please execute from the root folder of the Subgraph) -f, --force Binary: Redownloads the binary. Docker: Redownloads the Dockerfile and rebuilds the docker image. -h, --help Show usage information -l, --logs Logs to the console information about the OS, CPU model and download url (debugging purposes) @@ -145,13 +145,13 @@ libsFolder: path/to/libs manifestPath: path/to/subgraph.yaml ``` -### Demo subgraph +### Demo Subgraph You can try out and play around with the examples from this guide by cloning the [Demo Subgraph repo](https://github.com/LimeChain/demo-subgraph) ### Video tutorials -Also you can check out the video series on ["How to use Matchstick to write unit tests for your subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) +Also you can check out the video series on ["How to use Matchstick to write unit tests for your Subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) ## Tests structure @@ -662,7 +662,7 @@ That's a lot to unpack! First off, an important thing to notice is that we're im There we go - we've created our first test! 👏 -Now in order to run our tests you simply need to run the following in your subgraph root folder: +Now in order to run our tests you simply need to run the following in your Subgraph root folder: `graph test Gravity` @@ -1289,7 +1289,7 @@ test('file/ipfs dataSource creation example', () => { ## Test Coverage -Using **Matchstick**, subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. +Using **Matchstick**, Subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. The test coverage tool takes the compiled test `wasm` binaries and converts them to `wat` files, which can then be easily inspected to see whether or not the handlers defined in `subgraph.yaml` have been called. Since code coverage (and testing as whole) is in very early stages in AssemblyScript and WebAssembly, **Matchstick** cannot check for branch coverage. Instead we rely on the assertion that if a given handler has been called, the event/function for it have been properly mocked. @@ -1395,7 +1395,7 @@ The mismatch in arguments is caused by mismatch in `graph-ts` and `matchstick-as ## Додаткові матеріали -For any additional support, check out this [demo subgraph repo using Matchstick](https://github.com/LimeChain/demo-subgraph#readme_). +For any additional support, check out this [demo Subgraph repo using Matchstick](https://github.com/LimeChain/demo-subgraph#readme_). ## Feedback From a5b24071b52005c5236ef2d0fbe20e8c102f1fd5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:11:40 -0500 Subject: [PATCH 0200/1789] New translations unit-testing-framework.mdx (Chinese Simplified) --- .../creating/unit-testing-framework.mdx | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/website/src/pages/zh/subgraphs/developing/creating/unit-testing-framework.mdx b/website/src/pages/zh/subgraphs/developing/creating/unit-testing-framework.mdx index fb9703c0fdff..352dffd32bbc 100644 --- a/website/src/pages/zh/subgraphs/developing/creating/unit-testing-framework.mdx +++ b/website/src/pages/zh/subgraphs/developing/creating/unit-testing-framework.mdx @@ -2,12 +2,12 @@ title: 单元测试框架 --- -Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their subgraphs. +Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables Subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their Subgraphs. ## Benefits of Using Matchstick - It's written in Rust and optimized for high performance. -- It gives you access to developer features, including the ability to mock contract calls, make assertions about the store state, monitor subgraph failures, check test performance, and many more. +- It gives you access to developer features, including the ability to mock contract calls, make assertions about the store state, monitor Subgraph failures, check test performance, and many more. ## 开始 @@ -87,7 +87,7 @@ And finally, do not use `graph test` (which uses your global installation of gra ### Using Matchstick -To use **Matchstick** in your subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). +To use **Matchstick** in your Subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). ### CLI 选项 @@ -113,7 +113,7 @@ graph test path/to/file.test.ts ```sh -c, --coverage Run the tests in coverage mode --d, --docker Run the tests in a docker container (Note: Please execute from the root folder of the subgraph) +-d, --docker Run the tests in a docker container (Note: Please execute from the root folder of the Subgraph) -f, --force Binary: Redownloads the binary. Docker: Redownloads the Dockerfile and rebuilds the docker image. -h, --help Show usage information -l, --logs Logs to the console information about the OS, CPU model and download url (debugging purposes) @@ -145,13 +145,13 @@ libsFolder: path/to/libs manifestPath: path/to/subgraph.yaml ``` -### 演示子图 +### Demo Subgraph You can try out and play around with the examples from this guide by cloning the [Demo Subgraph repo](https://github.com/LimeChain/demo-subgraph) ### 视频教程 -Also you can check out the video series on ["How to use Matchstick to write unit tests for your subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) +Also you can check out the video series on ["How to use Matchstick to write unit tests for your Subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) ## Tests structure @@ -662,7 +662,7 @@ That's a lot to unpack! First off, an important thing to notice is that we're im 好了,我们创建了第一个测试!👏 -现在,为了运行我们的测试,您只需在子图根文件夹中运行以下命令: +Now in order to run our tests you simply need to run the following in your Subgraph root folder: `graph test Gravity` @@ -1289,7 +1289,7 @@ test('file/ipfs dataSource creation example', () => { ## 测试覆盖率 -Using **Matchstick**, subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. +Using **Matchstick**, Subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. The test coverage tool takes the compiled test `wasm` binaries and converts them to `wat` files, which can then be easily inspected to see whether or not the handlers defined in `subgraph.yaml` have been called. Since code coverage (and testing as whole) is in very early stages in AssemblyScript and WebAssembly, **Matchstick** cannot check for branch coverage. Instead we rely on the assertion that if a given handler has been called, the event/function for it have been properly mocked. @@ -1393,9 +1393,9 @@ This means you have used `console.log` in your code, which is not supported by A The mismatch in arguments is caused by mismatch in `graph-ts` and `matchstick-as`. The best way to fix issues like this one is to update everything to the latest released version. -## 其他资源 +## Additional Resources -For any additional support, check out this [demo subgraph repo using Matchstick](https://github.com/LimeChain/demo-subgraph#readme_). +For any additional support, check out this [demo Subgraph repo using Matchstick](https://github.com/LimeChain/demo-subgraph#readme_). ## 反馈 From ab2c9f274d8b52e81e64cec54ee3a8e72ab48f2a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:11:42 -0500 Subject: [PATCH 0201/1789] New translations unit-testing-framework.mdx (Urdu (Pakistan)) --- .../creating/unit-testing-framework.mdx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/website/src/pages/ur/subgraphs/developing/creating/unit-testing-framework.mdx b/website/src/pages/ur/subgraphs/developing/creating/unit-testing-framework.mdx index ba6feb650a07..022454430767 100644 --- a/website/src/pages/ur/subgraphs/developing/creating/unit-testing-framework.mdx +++ b/website/src/pages/ur/subgraphs/developing/creating/unit-testing-framework.mdx @@ -2,12 +2,12 @@ title: یونٹ ٹیسٹنگ فریم ورک --- -Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their subgraphs. +Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables Subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their Subgraphs. ## Benefits of Using Matchstick - It's written in Rust and optimized for high performance. -- It gives you access to developer features, including the ability to mock contract calls, make assertions about the store state, monitor subgraph failures, check test performance, and many more. +- It gives you access to developer features, including the ability to mock contract calls, make assertions about the store state, monitor Subgraph failures, check test performance, and many more. ## شروع ہوا چاہتا ہے @@ -87,7 +87,7 @@ And finally, do not use `graph test` (which uses your global installation of gra ### Using Matchstick -To use **Matchstick** in your subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). +To use **Matchstick** in your Subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). ### CLI کے اختیارات @@ -113,7 +113,7 @@ graph test path/to/file.test.ts ```sh -c, --coverage Run the tests in coverage mode --d, --docker Run the tests in a docker container (Note: Please execute from the root folder of the subgraph) +-d, --docker Run the tests in a docker container (Note: Please execute from the root folder of the Subgraph) -f, --force Binary: Redownloads the binary. Docker: Redownloads the Dockerfile and rebuilds the docker image. -h, --help Show usage information -l, --logs Logs to the console information about the OS, CPU model and download url (debugging purposes) @@ -145,13 +145,13 @@ libsFolder: path/to/libs manifestPath: path/to/subgraph.yaml ``` -### ڈیمو سب گراف +### Demo Subgraph You can try out and play around with the examples from this guide by cloning the [Demo Subgraph repo](https://github.com/LimeChain/demo-subgraph) ### ویڈیو ٹیوٹوریلز -Also you can check out the video series on ["How to use Matchstick to write unit tests for your subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) +Also you can check out the video series on ["How to use Matchstick to write unit tests for your Subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) ## Tests structure @@ -662,7 +662,7 @@ That's a lot to unpack! First off, an important thing to notice is that we're im ہم وہاں جاتے ہیں - ہم نے اپنا پہلا ٹیسٹ بنایا ہے! 👏 -اب ہمارے ٹیسٹ چلانے کے لیے آپ کو اپنے سب گراف روٹ فولڈر میں درج ذیل کو چلانے کی ضرورت ہے: +Now in order to run our tests you simply need to run the following in your Subgraph root folder: `graph test Gravity` @@ -1289,7 +1289,7 @@ test('file/ipfs dataSource creation example', () => { ## ٹیسٹ کوریج -Using **Matchstick**, subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. +Using **Matchstick**, Subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. The test coverage tool takes the compiled test `wasm` binaries and converts them to `wat` files, which can then be easily inspected to see whether or not the handlers defined in `subgraph.yaml` have been called. Since code coverage (and testing as whole) is in very early stages in AssemblyScript and WebAssembly, **Matchstick** cannot check for branch coverage. Instead we rely on the assertion that if a given handler has been called, the event/function for it have been properly mocked. @@ -1395,7 +1395,7 @@ The mismatch in arguments is caused by mismatch in `graph-ts` and `matchstick-as ## اضافی وسائل -For any additional support, check out this [demo subgraph repo using Matchstick](https://github.com/LimeChain/demo-subgraph#readme_). +For any additional support, check out this [demo Subgraph repo using Matchstick](https://github.com/LimeChain/demo-subgraph#readme_). ## تاثرات From 4ac4df067ff31bafff69e1cf83dd51bc46050ee2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:11:43 -0500 Subject: [PATCH 0202/1789] New translations unit-testing-framework.mdx (Vietnamese) --- .../creating/unit-testing-framework.mdx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/website/src/pages/vi/subgraphs/developing/creating/unit-testing-framework.mdx b/website/src/pages/vi/subgraphs/developing/creating/unit-testing-framework.mdx index 10a1078a2eb5..6a7e481cd1ff 100644 --- a/website/src/pages/vi/subgraphs/developing/creating/unit-testing-framework.mdx +++ b/website/src/pages/vi/subgraphs/developing/creating/unit-testing-framework.mdx @@ -2,12 +2,12 @@ title: Unit Testing Framework --- -Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their subgraphs. +Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables Subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their Subgraphs. ## Benefits of Using Matchstick - It's written in Rust and optimized for high performance. -- It gives you access to developer features, including the ability to mock contract calls, make assertions about the store state, monitor subgraph failures, check test performance, and many more. +- It gives you access to developer features, including the ability to mock contract calls, make assertions about the store state, monitor Subgraph failures, check test performance, and many more. ## Getting Started @@ -87,7 +87,7 @@ And finally, do not use `graph test` (which uses your global installation of gra ### Using Matchstick -To use **Matchstick** in your subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). +To use **Matchstick** in your Subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). ### CLI options @@ -113,7 +113,7 @@ graph test path/to/file.test.ts ```sh -c, --coverage Run the tests in coverage mode --d, --docker Run the tests in a docker container (Note: Please execute from the root folder of the subgraph) +-d, --docker Run the tests in a docker container (Note: Please execute from the root folder of the Subgraph) -f, --force Binary: Redownloads the binary. Docker: Redownloads the Dockerfile and rebuilds the docker image. -h, --help Show usage information -l, --logs Logs to the console information about the OS, CPU model and download url (debugging purposes) @@ -145,13 +145,13 @@ libsFolder: path/to/libs manifestPath: path/to/subgraph.yaml ``` -### Demo subgraph +### Demo Subgraph You can try out and play around with the examples from this guide by cloning the [Demo Subgraph repo](https://github.com/LimeChain/demo-subgraph) ### Video tutorials -Also you can check out the video series on ["How to use Matchstick to write unit tests for your subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) +Also you can check out the video series on ["How to use Matchstick to write unit tests for your Subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) ## Tests structure @@ -662,7 +662,7 @@ That's a lot to unpack! First off, an important thing to notice is that we're im There we go - we've created our first test! 👏 -Now in order to run our tests you simply need to run the following in your subgraph root folder: +Now in order to run our tests you simply need to run the following in your Subgraph root folder: `graph test Gravity` @@ -1289,7 +1289,7 @@ test('file/ipfs dataSource creation example', () => { ## Test Coverage -Using **Matchstick**, subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. +Using **Matchstick**, Subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. The test coverage tool takes the compiled test `wasm` binaries and converts them to `wat` files, which can then be easily inspected to see whether or not the handlers defined in `subgraph.yaml` have been called. Since code coverage (and testing as whole) is in very early stages in AssemblyScript and WebAssembly, **Matchstick** cannot check for branch coverage. Instead we rely on the assertion that if a given handler has been called, the event/function for it have been properly mocked. @@ -1395,7 +1395,7 @@ The mismatch in arguments is caused by mismatch in `graph-ts` and `matchstick-as ## Additional Resources -For any additional support, check out this [demo subgraph repo using Matchstick](https://github.com/LimeChain/demo-subgraph#readme_). +For any additional support, check out this [demo Subgraph repo using Matchstick](https://github.com/LimeChain/demo-subgraph#readme_). ## Feedback From 2c6a6479d251a0535830a2e4c762a688d4e51ccd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:11:44 -0500 Subject: [PATCH 0203/1789] New translations unit-testing-framework.mdx (Marathi) --- .../creating/unit-testing-framework.mdx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/website/src/pages/mr/subgraphs/developing/creating/unit-testing-framework.mdx b/website/src/pages/mr/subgraphs/developing/creating/unit-testing-framework.mdx index e09a384b8e6d..b0f6385bff6a 100644 --- a/website/src/pages/mr/subgraphs/developing/creating/unit-testing-framework.mdx +++ b/website/src/pages/mr/subgraphs/developing/creating/unit-testing-framework.mdx @@ -2,12 +2,12 @@ title: युनिट चाचणी फ्रेमवर्क --- -Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their subgraphs. +Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables Subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their Subgraphs. ## Benefits of Using Matchstick - It's written in Rust and optimized for high performance. -- It gives you access to developer features, including the ability to mock contract calls, make assertions about the store state, monitor subgraph failures, check test performance, and many more. +- It gives you access to developer features, including the ability to mock contract calls, make assertions about the store state, monitor Subgraph failures, check test performance, and many more. ## प्रारंभ करणे @@ -87,7 +87,7 @@ And finally, do not use `graph test` (which uses your global installation of gra ### Using Matchstick -To use **Matchstick** in your subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). +To use **Matchstick** in your Subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). ### CLI पर्याय @@ -113,7 +113,7 @@ graph test path/to/file.test.ts ```sh -c, --coverage Run the tests in coverage mode --d, --docker Run the tests in a docker container (Note: Please execute from the root folder of the subgraph) +-d, --docker Run the tests in a docker container (Note: Please execute from the root folder of the Subgraph) -f, --force Binary: Redownloads the binary. Docker: Redownloads the Dockerfile and rebuilds the docker image. -h, --help Show usage information -l, --logs Logs to the console information about the OS, CPU model and download url (debugging purposes) @@ -145,13 +145,13 @@ libsFolder: path/to/libs manifestPath: path/to/subgraph.yaml ``` -### डेमो सबग्राफ +### Demo Subgraph You can try out and play around with the examples from this guide by cloning the [Demo Subgraph repo](https://github.com/LimeChain/demo-subgraph) ### व्हिडिओ ट्यूटोरियल -Also you can check out the video series on ["How to use Matchstick to write unit tests for your subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) +Also you can check out the video series on ["How to use Matchstick to write unit tests for your Subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) ## Tests structure @@ -662,7 +662,7 @@ That's a lot to unpack! First off, an important thing to notice is that we're im आम्ही तिथे जातो - आम्ही आमची पहिली चाचणी तयार केली आहे! 👏 -आता आमच्या चाचण्या चालवण्यासाठी तुम्हाला तुमच्या सबग्राफ रूट फोल्डरमध्ये खालील गोष्टी चालवाव्या लागतील: +Now in order to run our tests you simply need to run the following in your Subgraph root folder: `graph test Gravity` @@ -1289,7 +1289,7 @@ test('file/ipfs dataSource creation example', () => { ## चाचणी कव्हरेज -Using **Matchstick**, subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. +Using **Matchstick**, Subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. The test coverage tool takes the compiled test `wasm` binaries and converts them to `wat` files, which can then be easily inspected to see whether or not the handlers defined in `subgraph.yaml` have been called. Since code coverage (and testing as whole) is in very early stages in AssemblyScript and WebAssembly, **Matchstick** cannot check for branch coverage. Instead we rely on the assertion that if a given handler has been called, the event/function for it have been properly mocked. @@ -1395,7 +1395,7 @@ The mismatch in arguments is caused by mismatch in `graph-ts` and `matchstick-as ## अतिरिक्त संसाधने -For any additional support, check out this [demo subgraph repo using Matchstick](https://github.com/LimeChain/demo-subgraph#readme_). +For any additional support, check out this [demo Subgraph repo using Matchstick](https://github.com/LimeChain/demo-subgraph#readme_). ## अभिप्राय From 9cfc98cc3db4830ab5fa880032fa4ff473e30b03 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:11:45 -0500 Subject: [PATCH 0204/1789] New translations unit-testing-framework.mdx (Hindi) --- .../creating/unit-testing-framework.mdx | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/website/src/pages/hi/subgraphs/developing/creating/unit-testing-framework.mdx b/website/src/pages/hi/subgraphs/developing/creating/unit-testing-framework.mdx index 89a802802610..7a8c31ee6eb5 100644 --- a/website/src/pages/hi/subgraphs/developing/creating/unit-testing-framework.mdx +++ b/website/src/pages/hi/subgraphs/developing/creating/unit-testing-framework.mdx @@ -4,12 +4,12 @@ title: |- कला --- -Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their subgraphs. +Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables Subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their Subgraphs. ## Benefits of Using Matchstick - यह Rust में लिखा गया है और उच्च प्रदर्शन के लिए अनुकूलित है। -- यह आपको डेवलपर विशेषता तक पहुंच प्रदान करता है, जिसमें contract कॉल्स को मॉक करने, स्टोर स्टेट के बारे में एसेर्शन करने, सबग्राफ विफलताओं की निगरानी करने, टेस्ट परफॉर्मेंस जांचने और बहुत कुछ करने की क्षमता शामिल है। +- It gives you access to developer features, including the ability to mock contract calls, make assertions about the store state, monitor Subgraph failures, check test performance, and many more. ## शुरू करना @@ -35,7 +35,7 @@ yarn add --dev matchstick-as brew install postgresql ``` -यहां तक कि नवीनतम libpq.5.lib\_ का एक symlink बनाएं। आपको पहले यह dir बनाने की आवश्यकता हो सकती है: `/usr/local/opt/postgresql/lib/` +यहां तक कि नवीनतम libpq.5.lib_ का एक symlink बनाएं। आपको पहले यह dir बनाने की आवश्यकता हो सकती है: `/usr/local/opt/postgresql/lib/` ```sh ln -sf /usr/local/opt/postgresql@14/lib/postgresql@14/libpq.5.dylib /usr/local/opt/postgresql/lib/libpq.5.dylib @@ -89,7 +89,7 @@ And finally, do not use `graph test` (which uses your global installation of gra ### Using Matchstick -To use **Matchstick** in your subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). +To use **Matchstick** in your Subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). ### सीएलआई विकल्प @@ -115,7 +115,7 @@ graph test path/to/file.test.ts ```sh -c, --coverage Run the tests in coverage mode --d, --docker Run the tests in a docker container (Note: Please execute from the root folder of the subgraph) +-d, --docker Run the tests in a docker container (Note: Please execute from the root folder of the Subgraph) -f, --force Binary: Redownloads the binary. Docker: Redownloads the Dockerfile and rebuilds the docker image. -h, --help Show usage information -l, --logs Logs to the console information about the OS, CPU model and download url (debugging purposes) @@ -147,13 +147,13 @@ libsFolder: path/to/libs manifestPath: path/to/subgraph.yaml ``` -### डेमो सबग्राफ +### Demo Subgraph You can try out and play around with the examples from this guide by cloning the [Demo Subgraph repo](https://github.com/LimeChain/demo-subgraph) ### वीडियो शिक्षण -Also you can check out the video series on ["How to use Matchstick to write unit tests for your subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) +Also you can check out the video series on ["How to use Matchstick to write unit tests for your Subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) ## Tests structure @@ -664,7 +664,7 @@ That's a lot to unpack! First off, an important thing to notice is that we're im ये रहा - हमने अपना पहला परीक्षण बना लिया है! 👏 -अब हमारे परीक्षण चलाने के लिए आपको बस अपने सबग्राफ रूट फ़ोल्डर में निम्नलिखित को चलाने की आवश्यकता है: +Now in order to run our tests you simply need to run the following in your Subgraph root folder: `graph test Gravity` @@ -1291,7 +1291,7 @@ test('file/ipfs dataSource creation example', () => { ## टेस्ट कवरेज -Using **Matchstick**, subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. +Using **Matchstick**, Subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. The test coverage tool takes the compiled test `wasm` binaries and converts them to `wat` files, which can then be easily inspected to see whether or not the handlers defined in `subgraph.yaml` have been called. Since code coverage (and testing as whole) is in very early stages in AssemblyScript and WebAssembly, **Matchstick** cannot check for branch coverage. Instead we rely on the assertion that if a given handler has been called, the event/function for it have been properly mocked. @@ -1397,7 +1397,7 @@ The mismatch in arguments is caused by mismatch in `graph-ts` and `matchstick-as ## Additional Resources -For any additional support, check out this [demo subgraph repo using Matchstick](https://github.com/LimeChain/demo-subgraph#readme_). +For any additional support, check out this [demo Subgraph repo using Matchstick](https://github.com/LimeChain/demo-subgraph#readme_). ## प्रतिक्रिया From e9dd037ff77f8acfa7978319be8fb764f83ff0fc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:11:46 -0500 Subject: [PATCH 0205/1789] New translations unit-testing-framework.mdx (Swahili) --- .../creating/unit-testing-framework.mdx | 1402 +++++++++++++++++ 1 file changed, 1402 insertions(+) create mode 100644 website/src/pages/sw/subgraphs/developing/creating/unit-testing-framework.mdx diff --git a/website/src/pages/sw/subgraphs/developing/creating/unit-testing-framework.mdx b/website/src/pages/sw/subgraphs/developing/creating/unit-testing-framework.mdx new file mode 100644 index 000000000000..fdfd2116b563 --- /dev/null +++ b/website/src/pages/sw/subgraphs/developing/creating/unit-testing-framework.mdx @@ -0,0 +1,1402 @@ +--- +title: Unit Testing Framework +--- + +Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables Subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their Subgraphs. + +## Benefits of Using Matchstick + +- It's written in Rust and optimized for high performance. +- It gives you access to developer features, including the ability to mock contract calls, make assertions about the store state, monitor Subgraph failures, check test performance, and many more. + +## Getting Started + +### Install Dependencies + +In order to use the test helper methods and run tests, you need to install the following dependencies: + +```sh +yarn add --dev matchstick-as +``` + +### Install PostgreSQL + +`graph-node` depends on PostgreSQL, so if you don't already have it, then you will need to install it. + +> Note: It's highly recommended to use the commands below to avoid unexpected errors. + +#### Using MacOS + +Installation command: + +```sh +brew install postgresql +``` + +Create a symlink to the latest libpq.5.lib _You may need to create this dir first_ `/usr/local/opt/postgresql/lib/` + +```sh +ln -sf /usr/local/opt/postgresql@14/lib/postgresql@14/libpq.5.dylib /usr/local/opt/postgresql/lib/libpq.5.dylib +``` + +#### Using Linux + +Installation command (depends on your distro): + +```sh +sudo apt install postgresql +``` + +### Using WSL (Windows Subsystem for Linux) + +You can use Matchstick on WSL both using the Docker approach and the binary approach. As WSL can be a bit tricky, here's a few tips in case you encounter issues like + +``` +static BYTES = Symbol("Bytes") SyntaxError: Unexpected token = +``` + +or + +``` +/node_modules/gluegun/build/index.js:13 throw up; +``` + +Please make sure you're on a newer version of Node.js graph-cli doesn't support **v10.19.0** anymore, and that is still the default version for new Ubuntu images on WSL. For instance Matchstick is confirmed to be working on WSL with **v18.1.0**, you can switch to it either via **nvm** or if you update your global Node.js. Don't forget to delete `node_modules` and to run `npm install` again after updating you nodejs! Then, make sure you have **libpq** installed, you can do that by running + +``` +sudo apt-get install libpq-dev +``` + +And finally, do not use `graph test` (which uses your global installation of graph-cli and for some reason that looks like it's broken on WSL currently), instead use `yarn test` or `npm run test` (that will use the local, project-level instance of graph-cli, which works like a charm). For that you would of course need to have a `"test"` script in your `package.json` file which can be something as simple as + +```json +{ + "name": "demo-subgraph", + "version": "0.1.0", + "scripts": { + "test": "graph test", + ... + }, + "dependencies": { + "@graphprotocol/graph-cli": "^0.56.0", + "@graphprotocol/graph-ts": "^0.31.0", + "matchstick-as": "^0.6.0" + } +} +``` + +### Using Matchstick + +To use **Matchstick** in your Subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). + +### CLI options + +This will run all tests in the test folder: + +```sh +graph test +``` + +This will run a test named gravity.test.ts and/or all test inside of a folder named gravity: + +```sh +graph test gravity +``` + +This will run only that specific test file: + +```sh +graph test path/to/file.test.ts +``` + +**Options:** + +```sh +-c, --coverage Run the tests in coverage mode +-d, --docker Run the tests in a docker container (Note: Please execute from the root folder of the Subgraph) +-f, --force Binary: Redownloads the binary. Docker: Redownloads the Dockerfile and rebuilds the docker image. +-h, --help Show usage information +-l, --logs Logs to the console information about the OS, CPU model and download url (debugging purposes) +-r, --recompile Forces tests to be recompiled +-v, --version Choose the version of the rust binary that you want to be downloaded/used +``` + +### Docker + +From `graph-cli 0.25.2`, the `graph test` command supports running `matchstick` in a docker container with the `-d` flag. The docker implementation uses [bind mount](https://docs.docker.com/storage/bind-mounts/) so it does not have to rebuild the docker image every time the `graph test -d` command is executed. Alternatively you can follow the instructions from the [matchstick](https://github.com/LimeChain/matchstick#docker-) repository to run docker manually. + +❗ `graph test -d` forces `docker run` to run with flag `-t`. This must be removed to run inside non-interactive environments (like GitHub CI). + +❗ If you have previously ran `graph test` you may encounter the following error during docker build: + +```sh + error from sender: failed to xattr node_modules/binary-install-raw/bin/binary-: permission denied +``` + +In this case create a `.dockerignore` in the root folder and add `node_modules/binary-install-raw/bin` + +### Configuration + +Matchstick can be configured to use a custom tests, libs and manifest path via `matchstick.yaml` config file: + +```yaml +testsFolder: path/to/tests +libsFolder: path/to/libs +manifestPath: path/to/subgraph.yaml +``` + +### Demo Subgraph + +You can try out and play around with the examples from this guide by cloning the [Demo Subgraph repo](https://github.com/LimeChain/demo-subgraph) + +### Video tutorials + +Also you can check out the video series on ["How to use Matchstick to write unit tests for your Subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) + +## Tests structure + +_**IMPORTANT: The test structure described below depens on `matchstick-as` version >=0.5.0**_ + +### describe() + +`describe(name: String , () => {})` - Defines a test group. + +**_Notes:_** + +- _Describes are not mandatory. You can still use test() the old way, outside of the describe() blocks_ + +Example: + +```typescript +import { describe, test } from "matchstick-as/assembly/index" +import { handleNewGravatar } from "../../src/gravity" + +describe("handleNewGravatar()", () => { + test("Should create a new Gravatar entity", () => { + ... + }) +}) +``` + +Nested `describe()` example: + +```typescript +import { describe, test } from "matchstick-as/assembly/index" +import { handleUpdatedGravatar } from "../../src/gravity" + +describe("handleUpdatedGravatar()", () => { + describe("When entity exists", () => { + test("updates the entity", () => { + ... + }) + }) + + describe("When entity does not exists", () => { + test("it creates a new entity", () => { + ... + }) + }) +}) +``` + +--- + +### test() + +`test(name: String, () =>, should_fail: bool)` - Defines a test case. You can use test() inside of describe() blocks or independently. + +Example: + +```typescript +import { describe, test } from "matchstick-as/assembly/index" +import { handleNewGravatar } from "../../src/gravity" + +describe("handleNewGravatar()", () => { + test("Should create a new Entity", () => { + ... + }) +}) +``` + +or + +```typescript +test("handleNewGravatar() should create a new entity", () => { + ... +}) + + +``` + +--- + +### beforeAll() + +Runs a code block before any of the tests in the file. If `beforeAll` is declared inside of a `describe` block, it runs at the beginning of that `describe` block. + +Examples: + +Code inside `beforeAll` will execute once before _all_ tests in the file. + +```typescript +import { describe, test, beforeAll } from "matchstick-as/assembly/index" +import { handleUpdatedGravatar, handleNewGravatar } from "../../src/gravity" +import { Gravatar } from "../../generated/schema" + +beforeAll(() => { + let gravatar = new Gravatar("0x0") + gravatar.displayName = “First Gravatar” + gravatar.save() + ... +}) + +describe("When the entity does not exist", () => { + test("it should create a new Gravatar with id 0x1", () => { + ... + }) +}) + +describe("When entity already exists", () => { + test("it should update the Gravatar with id 0x0", () => { + ... + }) +}) +``` + +Code inside `beforeAll` will execute once before all tests in the first describe block + +```typescript +import { describe, test, beforeAll } from "matchstick-as/assembly/index" +import { handleUpdatedGravatar, handleNewGravatar } from "../../src/gravity" +import { Gravatar } from "../../generated/schema" + +describe("handleUpdatedGravatar()", () => { + beforeAll(() => { + let gravatar = new Gravatar("0x0") + gravatar.displayName = “First Gravatar” + gravatar.save() + ... + }) + + test("updates Gravatar with id 0x0", () => { + ... + }) + + test("creates new Gravatar with id 0x1", () => { + ... + }) +}) +``` + +--- + +### afterAll() + +Runs a code block after all of the tests in the file. If `afterAll` is declared inside of a `describe` block, it runs at the end of that `describe` block. + +Example: + +Code inside `afterAll` will execute once after _all_ tests in the file. + +```typescript +import { describe, test, afterAll } from "matchstick-as/assembly/index" +import { handleUpdatedGravatar, handleNewGravatar } from "../../src/gravity" +import { store } from "@graphprotocol/graph-ts" + +afterAll(() => { + store.remove("Gravatar", "0x0") + ... +}) + +describe("handleNewGravatar, () => { + test("creates Gravatar with id 0x0", () => { + ... + }) +}) + +describe("handleUpdatedGravatar", () => { + test("updates Gravatar with id 0x0", () => { + ... + }) +}) +``` + +Code inside `afterAll` will execute once after all tests in the first describe block + +```typescript +import { describe, test, afterAll, clearStore } from "matchstick-as/assembly/index" +import { handleUpdatedGravatar, handleNewGravatar } from "../../src/gravity" + +describe("handleNewGravatar", () => { + afterAll(() => { + store.remove("Gravatar", "0x1") + ... + }) + + test("It creates a new entity with Id 0x0", () => { + ... + }) + + test("It creates a new entity with Id 0x1", () => { + ... + }) +}) + +describe("handleUpdatedGravatar", () => { + test("updates Gravatar with id 0x0", () => { + ... + }) +}) +``` + +--- + +### beforeEach() + +Runs a code block before every test. If `beforeEach` is declared inside of a `describe` block, it runs before each test in that `describe` block. + +Examples: Code inside `beforeEach` will execute before each tests. + +```typescript +import { describe, test, beforeEach, clearStore } from "matchstick-as/assembly/index" +import { handleNewGravatars } from "./utils" + +beforeEach(() => { + clearStore() // <-- clear the store before each test in the file +}) + +describe("handleNewGravatars, () => { + test("A test that requires a clean store", () => { + ... + }) + + test("Second that requires a clean store", () => { + ... + }) +}) + + ... +``` + +Code inside `beforeEach` will execute only before each test in the that describe + +```typescript +import { describe, test, beforeEach } from 'matchstick-as/assembly/index' +import { handleUpdatedGravatar, handleNewGravatar } from '../../src/gravity' + +describe('handleUpdatedGravatars', () => { + beforeEach(() => { + let gravatar = new Gravatar('0x0') + gravatar.displayName = 'First Gravatar' + gravatar.imageUrl = '' + gravatar.save() + }) + + test('Updates the displayName', () => { + assert.fieldEquals('Gravatar', '0x0', 'displayName', 'First Gravatar') + + // code that should update the displayName to 1st Gravatar + + assert.fieldEquals('Gravatar', '0x0', 'displayName', '1st Gravatar') + store.remove('Gravatar', '0x0') + }) + + test('Updates the imageUrl', () => { + assert.fieldEquals('Gravatar', '0x0', 'imageUrl', '') + + // code that should changes the imageUrl to https://www.gravatar.com/avatar/0x0 + + assert.fieldEquals('Gravatar', '0x0', 'imageUrl', 'https://www.gravatar.com/avatar/0x0') + store.remove('Gravatar', '0x0') + }) +}) +``` + +--- + +### afterEach() + +Runs a code block after every test. If `afterEach` is declared inside of a `describe` block, it runs after each test in that `describe` block. + +Examples: + +Code inside `afterEach` will execute after every test. + +```typescript +import { describe, test, beforeEach, afterEach } from "matchstick-as/assembly/index" +import { handleUpdatedGravatar, handleNewGravatar } from "../../src/gravity" + +beforeEach(() => { + let gravatar = new Gravatar("0x0") + gravatar.displayName = “First Gravatar” + gravatar.save() +}) + +afterEach(() => { + store.remove("Gravatar", "0x0") +}) + +describe("handleNewGravatar", () => { + ... +}) + +describe("handleUpdatedGravatar", () => { + test("Updates the displayName", () => { + assert.fieldEquals("Gravatar", "0x0", "displayName", "First Gravatar") + + // code that should update the displayName to 1st Gravatar + + assert.fieldEquals("Gravatar", "0x0", "displayName", "1st Gravatar") + }) + + test("Updates the imageUrl", () => { + assert.fieldEquals("Gravatar", "0x0", "imageUrl", "") + + // code that should changes the imageUrl to https://www.gravatar.com/avatar/0x0 + + assert.fieldEquals("Gravatar", "0x0", "imageUrl", "https://www.gravatar.com/avatar/0x0") + }) +}) +``` + +Code inside `afterEach` will execute after each test in that describe + +```typescript +import { describe, test, beforeEach, afterEach } from "matchstick-as/assembly/index" +import { handleUpdatedGravatar, handleNewGravatar } from "../../src/gravity" + +describe("handleNewGravatar", () => { + ... +}) + +describe("handleUpdatedGravatar", () => { + beforeEach(() => { + let gravatar = new Gravatar("0x0") + gravatar.displayName = "First Gravatar" + gravatar.imageUrl = "" + gravatar.save() + }) + + afterEach(() => { + store.remove("Gravatar", "0x0") + }) + + test("Updates the displayName", () => { + assert.fieldEquals("Gravatar", "0x0", "displayName", "First Gravatar") + + // code that should update the displayName to 1st Gravatar + + assert.fieldEquals("Gravatar", "0x0", "displayName", "1st Gravatar") + }) + + test("Updates the imageUrl", () => { + assert.fieldEquals("Gravatar", "0x0", "imageUrl", "") + + // code that should changes the imageUrl to https://www.gravatar.com/avatar/0x0 + + assert.fieldEquals("Gravatar", "0x0", "imageUrl", "https://www.gravatar.com/avatar/0x0") + }) +}) +``` + +## Asserts + +```typescript +fieldEquals(entityType: string, id: string, fieldName: string, expectedVal: string) + +equals(expected: ethereum.Value, actual: ethereum.Value) + +notInStore(entityType: string, id: string) + +addressEquals(address1: Address, address2: Address) + +bytesEquals(bytes1: Bytes, bytes2: Bytes) + +i32Equals(number1: i32, number2: i32) + +bigIntEquals(bigInt1: BigInt, bigInt2: BigInt) + +booleanEquals(bool1: boolean, bool2: boolean) + +stringEquals(string1: string, string2: string) + +arrayEquals(array1: Array, array2: Array) + +tupleEquals(tuple1: ethereum.Tuple, tuple2: ethereum.Tuple) + +assertTrue(value: boolean) + +assertNull(value: T) + +assertNotNull(value: T) + +entityCount(entityType: string, expectedCount: i32) +``` + +As of version 0.6.0, asserts support custom error messages as well + +```typescript +assert.fieldEquals('Gravatar', '0x123', 'id', '0x123', 'Id should be 0x123') +assert.equals(ethereum.Value.fromI32(1), ethereum.Value.fromI32(1), 'Value should equal 1') +assert.notInStore('Gravatar', '0x124', 'Gravatar should not be in store') +assert.addressEquals(Address.zero(), Address.zero(), 'Address should be zero') +assert.bytesEquals(Bytes.fromUTF8('0x123'), Bytes.fromUTF8('0x123'), 'Bytes should be equal') +assert.i32Equals(2, 2, 'I32 should equal 2') +assert.bigIntEquals(BigInt.fromI32(1), BigInt.fromI32(1), 'BigInt should equal 1') +assert.booleanEquals(true, true, 'Boolean should be true') +assert.stringEquals('1', '1', 'String should equal 1') +assert.arrayEquals([ethereum.Value.fromI32(1)], [ethereum.Value.fromI32(1)], 'Arrays should be equal') +assert.tupleEquals( + changetype([ethereum.Value.fromI32(1)]), + changetype([ethereum.Value.fromI32(1)]), + 'Tuples should be equal', +) +assert.assertTrue(true, 'Should be true') +assert.assertNull(null, 'Should be null') +assert.assertNotNull('not null', 'Should be not null') +assert.entityCount('Gravatar', 1, 'There should be 2 gravatars') +assert.dataSourceCount('GraphTokenLockWallet', 1, 'GraphTokenLockWallet template should have one data source') +assert.dataSourceExists( + 'GraphTokenLockWallet', + Address.zero().toHexString(), + 'GraphTokenLockWallet should have a data source for zero address', +) +``` + +## Write a Unit Test + +Let's see how a simple unit test would look like using the Gravatar examples in the [Demo Subgraph](https://github.com/LimeChain/demo-subgraph/blob/main/src/gravity.ts). + +Assuming we have the following handler function (along with two helper functions to make our life easier): + +```typescript +export function handleNewGravatar(event: NewGravatar): void { + let gravatar = new Gravatar(event.params.id.toHex()) + gravatar.owner = event.params.owner + gravatar.displayName = event.params.displayName + gravatar.imageUrl = event.params.imageUrl + gravatar.save() +} + +export function handleNewGravatars(events: NewGravatar[]): void { + events.forEach((event) => { + handleNewGravatar(event) + }) +} + +export function createNewGravatarEvent( + id: i32, + ownerAddress: string, + displayName: string, + imageUrl: string, +): NewGravatar { + let mockEvent = newMockEvent() + let newGravatarEvent = new NewGravatar( + mockEvent.address, + mockEvent.logIndex, + mockEvent.transactionLogIndex, + mockEvent.logType, + mockEvent.block, + mockEvent.transaction, + mockEvent.parameters, + ) + newGravatarEvent.parameters = new Array() + let idParam = new ethereum.EventParam('id', ethereum.Value.fromI32(id)) + let addressParam = new ethereum.EventParam( + 'ownerAddress', + ethereum.Value.fromAddress(Address.fromString(ownerAddress)), + ) + let displayNameParam = new ethereum.EventParam('displayName', ethereum.Value.fromString(displayName)) + let imageUrlParam = new ethereum.EventParam('imageUrl', ethereum.Value.fromString(imageUrl)) + + newGravatarEvent.parameters.push(idParam) + newGravatarEvent.parameters.push(addressParam) + newGravatarEvent.parameters.push(displayNameParam) + newGravatarEvent.parameters.push(imageUrlParam) + + return newGravatarEvent +} +``` + +We first have to create a test file in our project. This is an example of how that might look like: + +```typescript +import { clearStore, test, assert } from 'matchstick-as/assembly/index' +import { Gravatar } from '../../generated/schema' +import { NewGravatar } from '../../generated/Gravity/Gravity' +import { createNewGravatarEvent, handleNewGravatars } from '../mappings/gravity' + +test('Can call mappings with custom events', () => { + // Create a test entity and save it in the store as initial state (optional) + let gravatar = new Gravatar('gravatarId0') + gravatar.save() + + // Create mock events + let newGravatarEvent = createNewGravatarEvent(12345, '0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7', 'cap', 'pac') + let anotherGravatarEvent = createNewGravatarEvent(3546, '0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7', 'cap', 'pac') + + // Call mapping functions passing the events we just created + handleNewGravatars([newGravatarEvent, anotherGravatarEvent]) + + // Assert the state of the store + assert.fieldEquals('Gravatar', 'gravatarId0', 'id', 'gravatarId0') + assert.fieldEquals('Gravatar', '12345', 'owner', '0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7') + assert.fieldEquals('Gravatar', '3546', 'displayName', 'cap') + + // Clear the store in order to start the next test off on a clean slate + clearStore() +}) + +test('Next test', () => { + //... +}) +``` + +That's a lot to unpack! First off, an important thing to notice is that we're importing things from `matchstick-as`, our AssemblyScript helper library (distributed as an npm module). You can find the repository [here](https://github.com/LimeChain/matchstick-as). `matchstick-as` provides us with useful testing methods and also defines the `test()` function which we will use to build our test blocks. The rest of it is pretty straightforward - here's what happens: + +- We're setting up our initial state and adding one custom Gravatar entity; +- We define two `NewGravatar` event objects along with their data, using the `createNewGravatarEvent()` function; +- We're calling out handler methods for those events - `handleNewGravatars()` and passing in the list of our custom events; +- We assert the state of the store. How does that work? - We're passing a unique combination of Entity type and id. Then we check a specific field on that Entity and assert that it has the value we expect it to have. We're doing this both for the initial Gravatar Entity we added to the store, as well as the two Gravatar entities that gets added when the handler function is called; +- And lastly - we're cleaning the store using `clearStore()` so that our next test can start with a fresh and empty store object. We can define as many test blocks as we want. + +There we go - we've created our first test! 👏 + +Now in order to run our tests you simply need to run the following in your Subgraph root folder: + +`graph test Gravity` + +And if all goes well you should be greeted with the following: + +![Matchstick saying “All tests passed!”](/img/matchstick-tests-passed.png) + +## Common test scenarios + +### Hydrating the store with a certain state + +Users are able to hydrate the store with a known set of entities. Here's an example to initialise the store with a Gravatar entity: + +```typescript +let gravatar = new Gravatar('entryId') +gravatar.save() +``` + +### Calling a mapping function with an event + +A user can create a custom event and pass it to a mapping function that is bound to the store: + +```typescript +import { store } from 'matchstick-as/assembly/store' +import { NewGravatar } from '../../generated/Gravity/Gravity' +import { handleNewGravatars, createNewGravatarEvent } from './mapping' + +let newGravatarEvent = createNewGravatarEvent(12345, '0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7', 'cap', 'pac') + +handleNewGravatar(newGravatarEvent) +``` + +### Calling all of the mappings with event fixtures + +Users can call the mappings with test fixtures. + +```typescript +import { NewGravatar } from '../../generated/Gravity/Gravity' +import { store } from 'matchstick-as/assembly/store' +import { handleNewGravatars, createNewGravatarEvent } from './mapping' + +let newGravatarEvent = createNewGravatarEvent(12345, '0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7', 'cap', 'pac') + +let anotherGravatarEvent = createNewGravatarEvent(3546, '0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7', 'cap', 'pac') + +handleNewGravatars([newGravatarEvent, anotherGravatarEvent]) +``` + +``` +export function handleNewGravatars(events: NewGravatar[]): void { + events.forEach(event => { + handleNewGravatar(event); + }); +} +``` + +### Mocking contract calls + +Users can mock contract calls: + +```typescript +import { addMetadata, assert, createMockedFunction, clearStore, test } from 'matchstick-as/assembly/index' +import { Gravity } from '../../generated/Gravity/Gravity' +import { Address, BigInt, ethereum } from '@graphprotocol/graph-ts' + +let contractAddress = Address.fromString('0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7') +let expectedResult = Address.fromString('0x90cBa2Bbb19ecc291A12066Fd8329D65FA1f1947') +let bigIntParam = BigInt.fromString('1234') +createMockedFunction(contractAddress, 'gravatarToOwner', 'gravatarToOwner(uint256):(address)') + .withArgs([ethereum.Value.fromSignedBigInt(bigIntParam)]) + .returns([ethereum.Value.fromAddress(Address.fromString('0x90cBa2Bbb19ecc291A12066Fd8329D65FA1f1947'))]) + +let gravity = Gravity.bind(contractAddress) +let result = gravity.gravatarToOwner(bigIntParam) + +assert.equals(ethereum.Value.fromAddress(expectedResult), ethereum.Value.fromAddress(result)) +``` + +As demonstrated, in order to mock a contract call and hardcore a return value, the user must provide a contract address, function name, function signature, an array of arguments, and of course - the return value. + +Users can also mock function reverts: + +```typescript +let contractAddress = Address.fromString('0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7') +createMockedFunction(contractAddress, 'getGravatar', 'getGravatar(address):(string,string)') + .withArgs([ethereum.Value.fromAddress(contractAddress)]) + .reverts() +``` + +### Mocking IPFS files (from matchstick 0.4.1) + +Users can mock IPFS files by using `mockIpfsFile(hash, filePath)` function. The function accepts two arguments, the first one is the IPFS file hash/path and the second one is the path to a local file. + +NOTE: When testing `ipfs.map/ipfs.mapJSON`, the callback function must be exported from the test file in order for matchstck to detect it, like the `processGravatar()` function in the test example bellow: + +`.test.ts` file: + +```typescript +import { assert, test, mockIpfsFile } from 'matchstick-as/assembly/index' +import { ipfs } from '@graphprotocol/graph-ts' +import { gravatarFromIpfs } from './utils' + +// Export ipfs.map() callback in order for matchstck to detect it +export { processGravatar } from './utils' + +test('ipfs.cat', () => { + mockIpfsFile('ipfsCatfileHash', 'tests/ipfs/cat.json') + + assert.entityCount(GRAVATAR_ENTITY_TYPE, 0) + + gravatarFromIpfs() + + assert.entityCount(GRAVATAR_ENTITY_TYPE, 1) + assert.fieldEquals(GRAVATAR_ENTITY_TYPE, '1', 'imageUrl', 'https://i.ytimg.com/vi/MELP46s8Cic/maxresdefault.jpg') + + clearStore() +}) + +test('ipfs.map', () => { + mockIpfsFile('ipfsMapfileHash', 'tests/ipfs/map.json') + + assert.entityCount(GRAVATAR_ENTITY_TYPE, 0) + + ipfs.map('ipfsMapfileHash', 'processGravatar', Value.fromString('Gravatar'), ['json']) + + assert.entityCount(GRAVATAR_ENTITY_TYPE, 3) + assert.fieldEquals(GRAVATAR_ENTITY_TYPE, '1', 'displayName', 'Gravatar1') + assert.fieldEquals(GRAVATAR_ENTITY_TYPE, '2', 'displayName', 'Gravatar2') + assert.fieldEquals(GRAVATAR_ENTITY_TYPE, '3', 'displayName', 'Gravatar3') +}) +``` + +`utils.ts` file: + +```typescript +import { Address, ethereum, JSONValue, Value, ipfs, json, Bytes } from "@graphprotocol/graph-ts" +import { Gravatar } from "../../generated/schema" + +... + +// ipfs.map callback +export function processGravatar(value: JSONValue, userData: Value): void { + // See the JSONValue documentation for details on dealing + // with JSON values + let obj = value.toObject() + let id = obj.get('id') + + if (!id) { + return + } + + // Callbacks can also created entities + let gravatar = new Gravatar(id.toString()) + gravatar.displayName = userData.toString() + id.toString() + gravatar.save() +} + +// function that calls ipfs.cat +export function gravatarFromIpfs(): void { + let rawData = ipfs.cat("ipfsCatfileHash") + + if (!rawData) { + return + } + + let jsonData = json.fromBytes(rawData as Bytes).toObject() + + let id = jsonData.get('id') + let url = jsonData.get("imageUrl") + + if (!id || !url) { + return + } + + let gravatar = new Gravatar(id.toString()) + gravatar.imageUrl = url.toString() + gravatar.save() +} +``` + +### Asserting the state of the store + +Users are able to assert the final (or midway) state of the store through asserting entities. In order to do this, the user has to supply an Entity type, the specific ID of an Entity, a name of a field on that Entity, and the expected value of the field. Here's a quick example: + +```typescript +import { assert } from 'matchstick-as/assembly/index' +import { Gravatar } from '../generated/schema' + +let gravatar = new Gravatar('gravatarId0') +gravatar.save() + +assert.fieldEquals('Gravatar', 'gravatarId0', 'id', 'gravatarId0') +``` + +Running the assert.fieldEquals() function will check for equality of the given field against the given expected value. The test will fail and an error message will be outputted if the values are **NOT** equal. Otherwise the test will pass successfully. + +### Interacting with Event metadata + +Users can use default transaction metadata, which could be returned as an ethereum.Event by using the `newMockEvent()` function. The following example shows how you can read/write to those fields on the Event object: + +```typescript +// Read +let logType = newGravatarEvent.logType + +// Write +let UPDATED_ADDRESS = '0xB16081F360e3847006dB660bae1c6d1b2e17eC2A' +newGravatarEvent.address = Address.fromString(UPDATED_ADDRESS) +``` + +### Asserting variable equality + +```typescript +assert.equals(ethereum.Value.fromString("hello"); ethereum.Value.fromString("hello")); +``` + +### Asserting that an Entity is **not** in the store + +Users can assert that an entity does not exist in the store. The function takes an entity type and an id. If the entity is in fact in the store, the test will fail with a relevant error message. Here's a quick example of how to use this functionality: + +```typescript +assert.notInStore('Gravatar', '23') +``` + +### Printing the whole store, or single entities from it (for debug purposes) + +You can print the whole store to the console using this helper function: + +```typescript +import { logStore } from 'matchstick-as/assembly/store' + +logStore() +``` + +As of version 0.6.0, `logStore` no longer prints derived fields, instead users can use the new `logEntity` function. Of course `logEntity` can be used to print any entity, not just ones that have derived fields. `logEntity` takes the entity type, entity id and a `showRelated` flag to indicate if users want to print the related derived entities. + +``` +import { logEntity } from 'matchstick-as/assembly/store' + + +logEntity("Gravatar", 23, true) +``` + +### Expected failure + +Users can have expected test failures, using the shouldFail flag on the test() functions: + +```typescript +test( + 'Should throw an error', + () => { + throw new Error() + }, + true, +) +``` + +If the test is marked with shouldFail = true but DOES NOT fail, that will show up as an error in the logs and the test block will fail. Also, if it's marked with shouldFail = false (the default state), the test executor will crash. + +### Logging + +Having custom logs in the unit tests is exactly the same as logging in the mappings. The difference is that the log object needs to be imported from matchstick-as rather than graph-ts. Here's a simple example with all non-critical log types: + +```typescript +import { test } from "matchstick-as/assembly/index"; +import { log } from "matchstick-as/assembly/log"; + +test("Success", () => { + log.success("Success!". []); +}); +test("Error", () => { + log.error("Error :( ", []); +}); +test("Debug", () => { + log.debug("Debugging...", []); +}); +test("Info", () => { + log.info("Info!", []); +}); +test("Warning", () => { + log.warning("Warning!", []); +}); +``` + +Users can also simulate a critical failure, like so: + +```typescript +test('Blow everything up', () => { + log.critical('Boom!') +}) +``` + +Logging critical errors will stop the execution of the tests and blow everything up. After all - we want to make sure you're code doesn't have critical logs in deployment, and you should notice right away if that were to happen. + +### Testing derived fields + +Testing derived fields is a feature which allows users to set a field on a certain entity and have another entity be updated automatically if it derives one of its fields from the first entity. + +Before version `0.6.0` it was possible to get the derived entities by accessing them as entity fields/properties, like so: + +```typescript +let entity = ExampleEntity.load('id') +let derivedEntity = entity.derived_entity +``` + +As of version `0.6.0`, this is done by using the `loadRelated` function of graph-node, the derived entities can be accessed the same way as in the handlers. + +```typescript +test('Derived fields example test', () => { + let mainAccount = GraphAccount.load('12')! + + assert.assertNull(mainAccount.get('nameSignalTransactions')) + assert.assertNull(mainAccount.get('operatorOf')) + + let operatedAccount = GraphAccount.load('1')! + operatedAccount.operators = [mainAccount.id] + operatedAccount.save() + + mockNameSignalTransaction('1234', mainAccount.id) + mockNameSignalTransaction('2', mainAccount.id) + + mainAccount = GraphAccount.load('12')! + + assert.assertNull(mainAccount.get('nameSignalTransactions')) + assert.assertNull(mainAccount.get('operatorOf')) + + const nameSignalTransactions = mainAccount.nameSignalTransactions.load() + const operatorsOfMainAccount = mainAccount.operatorOf.load() + + assert.i32Equals(2, nameSignalTransactions.length) + assert.i32Equals(1, operatorsOfMainAccount.length) + + assert.stringEquals('1', operatorsOfMainAccount[0].id) + + mockNameSignalTransaction('2345', mainAccount.id) + + let nst = NameSignalTransaction.load('1234')! + nst.signer = '11' + nst.save() + + store.remove('NameSignalTransaction', '2') + + mainAccount = GraphAccount.load('12')! + assert.i32Equals(1, mainAccount.nameSignalTransactions.load().length) +}) +``` + +### Testing `loadInBlock` + +As of version `0.6.0`, users can test `loadInBlock` by using the `mockInBlockStore`, it allows mocking entities in the block cache. + +```typescript +import { afterAll, beforeAll, describe, mockInBlockStore, test } from 'matchstick-as' +import { Gravatar } from '../../generated/schema' + +describe('loadInBlock', () => { + beforeAll(() => { + mockInBlockStore('Gravatar', 'gravatarId0', gravatar) + }) + + afterAll(() => { + clearInBlockStore() + }) + + test('Can use entity.loadInBlock() to retrieve entity from cache store in the current block', () => { + let retrievedGravatar = Gravatar.loadInBlock('gravatarId0') + assert.stringEquals('gravatarId0', retrievedGravatar!.get('id')!.toString()) + }) + + test("Returns null when calling entity.loadInBlock() if an entity doesn't exist in the current block", () => { + let retrievedGravatar = Gravatar.loadInBlock('IDoNotExist') + assert.assertNull(retrievedGravatar) + }) +}) +``` + +### Testing dynamic data sources + +Testing dynamic data sources can be be done by mocking the return value of the `context()`, `address()` and `network()` functions of the dataSource namespace. These functions currently return the following: `context()` - returns an empty entity (DataSourceContext), `address()` - returns `0x0000000000000000000000000000000000000000`, `network()` - returns `mainnet`. The `create(...)` and `createWithContext(...)` functions are mocked to do nothing so they don't need to be called in the tests at all. Changes to the return values can be done through the functions of the `dataSourceMock` namespace in `matchstick-as` (version 0.3.0+). + +Example below: + +First we have the following event handler (which has been intentionally repurposed to showcase datasource mocking): + +```typescript +export function handleApproveTokenDestinations(event: ApproveTokenDestinations): void { + let tokenLockWallet = TokenLockWallet.load(dataSource.address().toHexString())! + if (dataSource.network() == 'rinkeby') { + tokenLockWallet.tokenDestinationsApproved = true + } + let context = dataSource.context() + if (context.get('contextVal')!.toI32() > 0) { + tokenLockWallet.setBigInt('tokensReleased', BigInt.fromI32(context.get('contextVal')!.toI32())) + } + tokenLockWallet.save() +} +``` + +And then we have the test using one of the methods in the dataSourceMock namespace to set a new return value for all of the dataSource functions: + +```typescript +import { assert, test, newMockEvent, dataSourceMock } from 'matchstick-as/assembly/index' +import { BigInt, DataSourceContext, Value } from '@graphprotocol/graph-ts' + +import { handleApproveTokenDestinations } from '../../src/token-lock-wallet' +import { ApproveTokenDestinations } from '../../generated/templates/GraphTokenLockWallet/GraphTokenLockWallet' +import { TokenLockWallet } from '../../generated/schema' + +test('Data source simple mocking example', () => { + let addressString = '0xA16081F360e3847006dB660bae1c6d1b2e17eC2A' + let address = Address.fromString(addressString) + + let wallet = new TokenLockWallet(address.toHexString()) + wallet.save() + let context = new DataSourceContext() + context.set('contextVal', Value.fromI32(325)) + dataSourceMock.setReturnValues(addressString, 'rinkeby', context) + let event = changetype(newMockEvent()) + + assert.assertTrue(!wallet.tokenDestinationsApproved) + + handleApproveTokenDestinations(event) + + wallet = TokenLockWallet.load(address.toHexString())! + assert.assertTrue(wallet.tokenDestinationsApproved) + assert.bigIntEquals(wallet.tokensReleased, BigInt.fromI32(325)) + + dataSourceMock.resetValues() +}) +``` + +Notice that dataSourceMock.resetValues() is called at the end. That's because the values are remembered when they are changed and need to be reset if you want to go back to the default values. + +### Testing dynamic data source creation + +As of version `0.6.0`, it is possible to test if a new data source has been created from a template. This feature supports both ethereum/contract and file/ipfs templates. There are four functions for this: + +- `assert.dataSourceCount(templateName, expectedCount)` can be used to assert the expected count of data sources from the specified template +- `assert.dataSourceExists(templateName, address/ipfsHash)` asserts that a data source with the specified identifier (could be a contract address or IPFS file hash) from a specified template was created +- `logDataSources(templateName)` prints all data sources from the specified template to the console for debugging purposes +- `readFile(path)` reads a JSON file that represents an IPFS file and returns the content as Bytes + +#### Testing `ethereum/contract` templates + +```typescript +test('ethereum/contract dataSource creation example', () => { + // Assert there are no dataSources created from GraphTokenLockWallet template + assert.dataSourceCount('GraphTokenLockWallet', 0) + + // Create a new GraphTokenLockWallet datasource with address 0xA16081F360e3847006dB660bae1c6d1b2e17eC2A + GraphTokenLockWallet.create(Address.fromString('0xA16081F360e3847006dB660bae1c6d1b2e17eC2A')) + + // Assert the dataSource has been created + assert.dataSourceCount('GraphTokenLockWallet', 1) + + // Add a second dataSource with context + let context = new DataSourceContext() + context.set('contextVal', Value.fromI32(325)) + + GraphTokenLockWallet.createWithContext(Address.fromString('0xA16081F360e3847006dB660bae1c6d1b2e17eC2B'), context) + + // Assert there are now 2 dataSources + assert.dataSourceCount('GraphTokenLockWallet', 2) + + // Assert that a dataSource with address "0xA16081F360e3847006dB660bae1c6d1b2e17eC2B" was created + // Keep in mind that `Address` type is transformed to lower case when decoded, so you have to pass the address as all lower case when asserting if it exists + assert.dataSourceExists('GraphTokenLockWallet', '0xA16081F360e3847006dB660bae1c6d1b2e17eC2B'.toLowerCase()) + + logDataSources('GraphTokenLockWallet') +}) +``` + +##### Example `logDataSource` output + +```bash +🛠 { + "0xa16081f360e3847006db660bae1c6d1b2e17ec2a": { + "kind": "ethereum/contract", + "name": "GraphTokenLockWallet", + "address": "0xa16081f360e3847006db660bae1c6d1b2e17ec2a", + "context": null + }, + "0xa16081f360e3847006db660bae1c6d1b2e17ec2b": { + "kind": "ethereum/contract", + "name": "GraphTokenLockWallet", + "address": "0xa16081f360e3847006db660bae1c6d1b2e17ec2b", + "context": { + "contextVal": { + "type": "Int", + "data": 325 + } + } + } +} +``` + +#### Testing `file/ipfs` templates + +Similarly to contract dynamic data sources, users can test test file data sources and their handlers + +##### Example `subgraph.yaml` + +```yaml +... +templates: + - kind: file/ipfs + name: GraphTokenLockMetadata + network: mainnet + mapping: + kind: ethereum/events + apiVersion: 0.0.6 + language: wasm/assemblyscript + file: ./src/token-lock-wallet.ts + handler: handleMetadata + entities: + - TokenLockMetadata + abis: + - name: GraphTokenLockWallet + file: ./abis/GraphTokenLockWallet.json +``` + +##### Example `schema.graphql` + +```graphql +""" +Token Lock Wallets which hold locked GRT +""" +type TokenLockMetadata @entity { + "The address of the token lock wallet" + id: ID! + "Start time of the release schedule" + startTime: BigInt! + "End time of the release schedule" + endTime: BigInt! + "Number of periods between start time and end time" + periods: BigInt! + "Time when the releases start" + releaseStartTime: BigInt! +} +``` + +##### Example `metadata.json` + +```json +{ + "startTime": 1, + "endTime": 1, + "periods": 1, + "releaseStartTime": 1 +} +``` + +##### Example handler + +```typescript +export function handleMetadata(content: Bytes): void { + // dataSource.stringParams() returns the File DataSource CID + // stringParam() will be mocked in the handler test + // for more info https://thegraph.com/docs/en/developing/creating-a-subgraph/#create-a-new-handler-to-process-files + let tokenMetadata = new TokenLockMetadata(dataSource.stringParam()) + const value = json.fromBytes(content).toObject() + + if (value) { + const startTime = value.get('startTime') + const endTime = value.get('endTime') + const periods = value.get('periods') + const releaseStartTime = value.get('releaseStartTime') + + if (startTime && endTime && periods && releaseStartTime) { + tokenMetadata.startTime = startTime.toBigInt() + tokenMetadata.endTime = endTime.toBigInt() + tokenMetadata.periods = periods.toBigInt() + tokenMetadata.releaseStartTime = releaseStartTime.toBigInt() + } + + tokenMetadata.save() + } +} +``` + +##### Example test + +```typescript +import { assert, test, dataSourceMock, readFile } from 'matchstick-as' +import { Address, BigInt, Bytes, DataSourceContext, ipfs, json, store, Value } from '@graphprotocol/graph-ts' + +import { handleMetadata } from '../../src/token-lock-wallet' +import { TokenLockMetadata } from '../../generated/schema' +import { GraphTokenLockMetadata } from '../../generated/templates' + +test('file/ipfs dataSource creation example', () => { + // Generate the dataSource CID from the ipfsHash + ipfs path file + // For example QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm/example.json + const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' + const CID = `${ipfshash}/example.json` + + // Create a new dataSource using the generated CID + GraphTokenLockMetadata.create(CID) + + // Assert the dataSource has been created + assert.dataSourceCount('GraphTokenLockMetadata', 1) + assert.dataSourceExists('GraphTokenLockMetadata', CID) + logDataSources('GraphTokenLockMetadata') + + // Now we have to mock the dataSource metadata and specifically dataSource.stringParam() + // dataSource.stringParams actually uses the value of dataSource.address(), so we will mock the address using dataSourceMock from matchstick-as + // First we will reset the values and then use dataSourceMock.setAddress() to set the CID + dataSourceMock.resetValues() + dataSourceMock.setAddress(CID) + + // Now we need to generate the Bytes to pass to the dataSource handler + // For this case we introduced a new function readFile, that reads a local json and returns the content as Bytes + const content = readFile(`path/to/metadata.json`) + handleMetadata(content) + + // Now we will test if a TokenLockMetadata was created + const metadata = TokenLockMetadata.load(CID) + + assert.bigIntEquals(metadata!.endTime, BigInt.fromI32(1)) + assert.bigIntEquals(metadata!.periods, BigInt.fromI32(1)) + assert.bigIntEquals(metadata!.releaseStartTime, BigInt.fromI32(1)) + assert.bigIntEquals(metadata!.startTime, BigInt.fromI32(1)) +}) +``` + +## Test Coverage + +Using **Matchstick**, Subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. + +The test coverage tool takes the compiled test `wasm` binaries and converts them to `wat` files, which can then be easily inspected to see whether or not the handlers defined in `subgraph.yaml` have been called. Since code coverage (and testing as whole) is in very early stages in AssemblyScript and WebAssembly, **Matchstick** cannot check for branch coverage. Instead we rely on the assertion that if a given handler has been called, the event/function for it have been properly mocked. + +### Prerequisites + +To run the test coverage functionality provided in **Matchstick**, there are a few things you need to prepare beforehand: + +#### Export your handlers + +In order for **Matchstick** to check which handlers are being run, those handlers need to be exported from the **test file**. So for instance in our example, in our gravity.test.ts file we have the following handler being imported: + +```typescript +import { handleNewGravatar } from '../../src/gravity' +``` + +In order for that function to be visible (for it to be included in the `wat` file **by name**) we need to also export it, like this: + +```typescript +export { handleNewGravatar } +``` + +### Usage + +Once that's all set up, to run the test coverage tool, simply run: + +```sh +graph test -- -c +``` + +You could also add a custom `coverage` command to your `package.json` file, like so: + +```typescript + "scripts": { + /.../ + "coverage": "graph test -- -c" + }, +``` + +That will execute the coverage tool and you should see something like this in the terminal: + +```sh +$ graph test -c +Skipping download/install step because binary already exists at /Users/petko/work/demo-subgraph/node_modules/binary-install-raw/bin/0.4.0 + +___ ___ _ _ _ _ _ +| \/ | | | | | | | (_) | | +| . . | __ _| |_ ___| |__ ___| |_ _ ___| | __ +| |\/| |/ _` | __/ __| '_ \/ __| __| |/ __| |/ / +| | | | (_| | || (__| | | \__ \ |_| | (__| < +\_| |_/\__,_|\__\___|_| |_|___/\__|_|\___|_|\_\ + +Compiling... + +Running in coverage report mode. + ️ +Reading generated test modules... 🔎️ + +Generating coverage report 📝 + +Handlers for source 'Gravity': +Handler 'handleNewGravatar' is tested. +Handler 'handleUpdatedGravatar' is not tested. +Handler 'handleCreateGravatar' is tested. +Test coverage: 66.7% (2/3 handlers). + +Handlers for source 'GraphTokenLockWallet': +Handler 'handleTokensReleased' is not tested. +Handler 'handleTokensWithdrawn' is not tested. +Handler 'handleTokensRevoked' is not tested. +Handler 'handleManagerUpdated' is not tested. +Handler 'handleApproveTokenDestinations' is not tested. +Handler 'handleRevokeTokenDestinations' is not tested. +Test coverage: 0.0% (0/6 handlers). + +Global test coverage: 22.2% (2/9 handlers). +``` + +### Test run time duration in the log output + +The log output includes the test run duration. Here's an example: + +`[Thu, 31 Mar 2022 13:54:54 +0300] Program executed in: 42.270ms.` + +## Common compiler errors + +> Critical: Could not create WasmInstance from valid module with context: unknown import: wasi_snapshot_preview1::fd_write has not been defined + +This means you have used `console.log` in your code, which is not supported by AssemblyScript. Please consider using the [Logging API](/subgraphs/developing/creating/graph-ts/api/#logging-api) + +> ERROR TS2554: Expected ? arguments, but got ?. +> +> return new ethereum.Block(defaultAddressBytes, defaultAddressBytes, defaultAddressBytes, defaultAddress, defaultAddressBytes, defaultAddressBytes, defaultAddressBytes, defaultBigInt, defaultBigInt, defaultBigInt, defaultBigInt, defaultBigInt, defaultBigInt, defaultBigInt, defaultBigInt); +> +> in ~lib/matchstick-as/assembly/defaults.ts(18,12) +> +> ERROR TS2554: Expected ? arguments, but got ?. +> +> return new ethereum.Transaction(defaultAddressBytes, defaultBigInt, defaultAddress, defaultAddress, defaultBigInt, defaultBigInt, defaultBigInt, defaultAddressBytes, defaultBigInt); +> +> in ~lib/matchstick-as/assembly/defaults.ts(24,12) + +The mismatch in arguments is caused by mismatch in `graph-ts` and `matchstick-as`. The best way to fix issues like this one is to update everything to the latest released version. + +## Additional Resources + +For any additional support, check out this [demo Subgraph repo using Matchstick](https://github.com/LimeChain/demo-subgraph#readme_). + +## Feedback + +If you have any questions, feedback, feature requests or just want to reach out, the best place would be The Graph Discord where we have a dedicated channel for Matchstick, called 🔥| unit-testing. From ee97e486e8eab0f0be6f37875686fbde56957abc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:11:47 -0500 Subject: [PATCH 0206/1789] New translations transferring-a-subgraph.mdx (Romanian) --- .../managing/transferring-a-subgraph.mdx | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/website/src/pages/ro/subgraphs/developing/managing/transferring-a-subgraph.mdx b/website/src/pages/ro/subgraphs/developing/managing/transferring-a-subgraph.mdx index 0fc6632cbc40..e80bde3fa6d2 100644 --- a/website/src/pages/ro/subgraphs/developing/managing/transferring-a-subgraph.mdx +++ b/website/src/pages/ro/subgraphs/developing/managing/transferring-a-subgraph.mdx @@ -2,18 +2,18 @@ title: Transferring a Subgraph --- -Subgraphs published to the decentralized network have an NFT minted to the address that published the subgraph. The NFT is based on a standard ERC721, which facilitates transfers between accounts on The Graph Network. +Subgraphs published to the decentralized network have an NFT minted to the address that published the Subgraph. The NFT is based on a standard ERC721, which facilitates transfers between accounts on The Graph Network. ## Reminders -- Whoever owns the NFT controls the subgraph. -- If the owner decides to sell or transfer the NFT, they will no longer be able to edit or update that subgraph on the network. -- You can easily move control of a subgraph to a multi-sig. -- A community member can create a subgraph on behalf of a DAO. +- Whoever owns the NFT controls the Subgraph. +- If the owner decides to sell or transfer the NFT, they will no longer be able to edit or update that Subgraph on the network. +- You can easily move control of a Subgraph to a multi-sig. +- A community member can create a Subgraph on behalf of a DAO. ## View Your Subgraph as an NFT -To view your subgraph as an NFT, you can visit an NFT marketplace like **OpenSea**: +To view your Subgraph as an NFT, you can visit an NFT marketplace like **OpenSea**: ``` https://opensea.io/your-wallet-address @@ -27,13 +27,13 @@ https://rainbow.me/your-wallet-addres ## Step-by-Step -To transfer ownership of a subgraph, do the following: +To transfer ownership of a Subgraph, do the following: 1. Use the UI built into Subgraph Studio: ![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-1.png) -2. Choose the address that you would like to transfer the subgraph to: +2. Choose the address that you would like to transfer the Subgraph to: ![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-2.png) From 0439e4978e3c51620557318609af937f3b90ede3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:11:48 -0500 Subject: [PATCH 0207/1789] New translations transferring-a-subgraph.mdx (French) --- .../managing/transferring-a-subgraph.mdx | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/website/src/pages/fr/subgraphs/developing/managing/transferring-a-subgraph.mdx b/website/src/pages/fr/subgraphs/developing/managing/transferring-a-subgraph.mdx index fe386614b198..2e046ac25087 100644 --- a/website/src/pages/fr/subgraphs/developing/managing/transferring-a-subgraph.mdx +++ b/website/src/pages/fr/subgraphs/developing/managing/transferring-a-subgraph.mdx @@ -2,18 +2,18 @@ title: Transfer d'un Subgraph --- -Les subgraphs publiés sur le réseau décentralisé possèdent un NFT minté à l'adresse qui a publié le subgraph. Le NFT est basé sur la norme ERC721, ce qui facilite les transferts entre comptes sur The Graph Network. +Subgraphs published to the decentralized network have an NFT minted to the address that published the Subgraph. The NFT is based on a standard ERC721, which facilitates transfers between accounts on The Graph Network. ## Rappels -- Quiconque possède le NFT contrôle le subgraph. -- Si le propriétaire décide de vendre ou de transférer le NFT, il ne pourra plus éditer ou mettre à jour ce subgraph sur le réseau. -- Vous pouvez facilement déplacer le contrôle d'un subgraph vers un multi-sig. -- Un membre de la communauté peut créer un subgraph au nom d'une DAO. +- Whoever owns the NFT controls the Subgraph. +- If the owner decides to sell or transfer the NFT, they will no longer be able to edit or update that Subgraph on the network. +- You can easily move control of a Subgraph to a multi-sig. +- A community member can create a Subgraph on behalf of a DAO. ## Voir votre Subgraph en tant que NFT -Pour voir votre subgraph en tant que NFT, vous pouvez visiter une marketplace NFT telle que **OpenSea**: +To view your Subgraph as an NFT, you can visit an NFT marketplace like **OpenSea**: ``` https://opensea.io/your-wallet-address @@ -27,13 +27,13 @@ https://rainbow.me/adresse-de-votre-portefeuille ## Étape par Étape -Pour transférer la propriété d'un subgraph, procédez comme suit : +To transfer ownership of a Subgraph, do the following: 1. Utilisez l'interface utilisateur intégrée dans Subgraph Studio : ![Transfert de propriété de subgraph](/img/subgraph-ownership-transfer-1.png) -2. Choisissez l'adresse vers laquelle vous souhaitez transférer le subgraph : +2. Choose the address that you would like to transfer the Subgraph to: ![Transfert de propriété d'un subgraph](/img/subgraph-ownership-transfer-2.png) From aa015427f9674b84befa3aa66ab7e2ffceca045a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:11:49 -0500 Subject: [PATCH 0208/1789] New translations transferring-a-subgraph.mdx (Spanish) --- .../managing/transferring-a-subgraph.mdx | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/website/src/pages/es/subgraphs/developing/managing/transferring-a-subgraph.mdx b/website/src/pages/es/subgraphs/developing/managing/transferring-a-subgraph.mdx index 0fc6632cbc40..e80bde3fa6d2 100644 --- a/website/src/pages/es/subgraphs/developing/managing/transferring-a-subgraph.mdx +++ b/website/src/pages/es/subgraphs/developing/managing/transferring-a-subgraph.mdx @@ -2,18 +2,18 @@ title: Transferring a Subgraph --- -Subgraphs published to the decentralized network have an NFT minted to the address that published the subgraph. The NFT is based on a standard ERC721, which facilitates transfers between accounts on The Graph Network. +Subgraphs published to the decentralized network have an NFT minted to the address that published the Subgraph. The NFT is based on a standard ERC721, which facilitates transfers between accounts on The Graph Network. ## Reminders -- Whoever owns the NFT controls the subgraph. -- If the owner decides to sell or transfer the NFT, they will no longer be able to edit or update that subgraph on the network. -- You can easily move control of a subgraph to a multi-sig. -- A community member can create a subgraph on behalf of a DAO. +- Whoever owns the NFT controls the Subgraph. +- If the owner decides to sell or transfer the NFT, they will no longer be able to edit or update that Subgraph on the network. +- You can easily move control of a Subgraph to a multi-sig. +- A community member can create a Subgraph on behalf of a DAO. ## View Your Subgraph as an NFT -To view your subgraph as an NFT, you can visit an NFT marketplace like **OpenSea**: +To view your Subgraph as an NFT, you can visit an NFT marketplace like **OpenSea**: ``` https://opensea.io/your-wallet-address @@ -27,13 +27,13 @@ https://rainbow.me/your-wallet-addres ## Step-by-Step -To transfer ownership of a subgraph, do the following: +To transfer ownership of a Subgraph, do the following: 1. Use the UI built into Subgraph Studio: ![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-1.png) -2. Choose the address that you would like to transfer the subgraph to: +2. Choose the address that you would like to transfer the Subgraph to: ![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-2.png) From 1c13f1adc7e7964431cc1451dfa3028eaf394472 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:11:50 -0500 Subject: [PATCH 0209/1789] New translations transferring-a-subgraph.mdx (Arabic) --- .../managing/transferring-a-subgraph.mdx | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/website/src/pages/ar/subgraphs/developing/managing/transferring-a-subgraph.mdx b/website/src/pages/ar/subgraphs/developing/managing/transferring-a-subgraph.mdx index 0fc6632cbc40..e80bde3fa6d2 100644 --- a/website/src/pages/ar/subgraphs/developing/managing/transferring-a-subgraph.mdx +++ b/website/src/pages/ar/subgraphs/developing/managing/transferring-a-subgraph.mdx @@ -2,18 +2,18 @@ title: Transferring a Subgraph --- -Subgraphs published to the decentralized network have an NFT minted to the address that published the subgraph. The NFT is based on a standard ERC721, which facilitates transfers between accounts on The Graph Network. +Subgraphs published to the decentralized network have an NFT minted to the address that published the Subgraph. The NFT is based on a standard ERC721, which facilitates transfers between accounts on The Graph Network. ## Reminders -- Whoever owns the NFT controls the subgraph. -- If the owner decides to sell or transfer the NFT, they will no longer be able to edit or update that subgraph on the network. -- You can easily move control of a subgraph to a multi-sig. -- A community member can create a subgraph on behalf of a DAO. +- Whoever owns the NFT controls the Subgraph. +- If the owner decides to sell or transfer the NFT, they will no longer be able to edit or update that Subgraph on the network. +- You can easily move control of a Subgraph to a multi-sig. +- A community member can create a Subgraph on behalf of a DAO. ## View Your Subgraph as an NFT -To view your subgraph as an NFT, you can visit an NFT marketplace like **OpenSea**: +To view your Subgraph as an NFT, you can visit an NFT marketplace like **OpenSea**: ``` https://opensea.io/your-wallet-address @@ -27,13 +27,13 @@ https://rainbow.me/your-wallet-addres ## Step-by-Step -To transfer ownership of a subgraph, do the following: +To transfer ownership of a Subgraph, do the following: 1. Use the UI built into Subgraph Studio: ![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-1.png) -2. Choose the address that you would like to transfer the subgraph to: +2. Choose the address that you would like to transfer the Subgraph to: ![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-2.png) From 5c6c75751828d3359a219c90dd2ec3577ce15567 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:11:51 -0500 Subject: [PATCH 0210/1789] New translations transferring-a-subgraph.mdx (Czech) --- .../managing/transferring-a-subgraph.mdx | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/website/src/pages/cs/subgraphs/developing/managing/transferring-a-subgraph.mdx b/website/src/pages/cs/subgraphs/developing/managing/transferring-a-subgraph.mdx index 0fc6632cbc40..e80bde3fa6d2 100644 --- a/website/src/pages/cs/subgraphs/developing/managing/transferring-a-subgraph.mdx +++ b/website/src/pages/cs/subgraphs/developing/managing/transferring-a-subgraph.mdx @@ -2,18 +2,18 @@ title: Transferring a Subgraph --- -Subgraphs published to the decentralized network have an NFT minted to the address that published the subgraph. The NFT is based on a standard ERC721, which facilitates transfers between accounts on The Graph Network. +Subgraphs published to the decentralized network have an NFT minted to the address that published the Subgraph. The NFT is based on a standard ERC721, which facilitates transfers between accounts on The Graph Network. ## Reminders -- Whoever owns the NFT controls the subgraph. -- If the owner decides to sell or transfer the NFT, they will no longer be able to edit or update that subgraph on the network. -- You can easily move control of a subgraph to a multi-sig. -- A community member can create a subgraph on behalf of a DAO. +- Whoever owns the NFT controls the Subgraph. +- If the owner decides to sell or transfer the NFT, they will no longer be able to edit or update that Subgraph on the network. +- You can easily move control of a Subgraph to a multi-sig. +- A community member can create a Subgraph on behalf of a DAO. ## View Your Subgraph as an NFT -To view your subgraph as an NFT, you can visit an NFT marketplace like **OpenSea**: +To view your Subgraph as an NFT, you can visit an NFT marketplace like **OpenSea**: ``` https://opensea.io/your-wallet-address @@ -27,13 +27,13 @@ https://rainbow.me/your-wallet-addres ## Step-by-Step -To transfer ownership of a subgraph, do the following: +To transfer ownership of a Subgraph, do the following: 1. Use the UI built into Subgraph Studio: ![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-1.png) -2. Choose the address that you would like to transfer the subgraph to: +2. Choose the address that you would like to transfer the Subgraph to: ![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-2.png) From 885b51217333f6cc9c037374ffd8cf069b31c6da Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:11:52 -0500 Subject: [PATCH 0211/1789] New translations transferring-a-subgraph.mdx (German) --- .../managing/transferring-a-subgraph.mdx | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/website/src/pages/de/subgraphs/developing/managing/transferring-a-subgraph.mdx b/website/src/pages/de/subgraphs/developing/managing/transferring-a-subgraph.mdx index d6837fbade98..4be0429a799e 100644 --- a/website/src/pages/de/subgraphs/developing/managing/transferring-a-subgraph.mdx +++ b/website/src/pages/de/subgraphs/developing/managing/transferring-a-subgraph.mdx @@ -2,18 +2,18 @@ title: Transferring a Subgraph --- -Subgraphs, die im dezentralen Netzwerk veröffentlicht werden, haben eine NFT, die auf die Adresse geprägt wird, die den Subgraph veröffentlicht hat. Die NFT basiert auf dem Standard ERC721, der Überweisungen zwischen Konten im The Graph Network erleichtert. +Subgraphs published to the decentralized network have an NFT minted to the address that published the Subgraph. The NFT is based on a standard ERC721, which facilitates transfers between accounts on The Graph Network. ## Erinnerungshilfen -- Wer im Besitz der NFT ist, kontrolliert den Subgraph. -- Wenn der Eigentümer beschließt, das NFT zu verkaufen oder zu übertragen, kann er diesen Subgraph im Netz nicht mehr bearbeiten oder aktualisieren. -- Sie können die Kontrolle über einen Subgraph leicht an eine Multisig übertragen. -- Ein Community-Mitglied kann einen Subgraph im Namen einer DAO erstellen. +- Whoever owns the NFT controls the Subgraph. +- If the owner decides to sell or transfer the NFT, they will no longer be able to edit or update that Subgraph on the network. +- You can easily move control of a Subgraph to a multi-sig. +- A community member can create a Subgraph on behalf of a DAO. ## Betrachten Sie Ihren Subgraph als NFT -Um Ihren Subgraph als NFT zu betrachten, können Sie einen NFT-Marktplatz wie **OpenSea** besuchen: +To view your Subgraph as an NFT, you can visit an NFT marketplace like **OpenSea**: ``` https://opensea.io/your-wallet-address @@ -27,13 +27,13 @@ https://rainbow.me/your-wallet-addres ## Schritt für Schritt -Um das Eigentum an einem Subgraph zu übertragen, gehen Sie wie folgt vor: +To transfer ownership of a Subgraph, do the following: 1. Verwenden Sie die in Subgraph Studio integrierte Benutzeroberfläche: ![Subgraph-Besitzübertragung](/img/subgraph-ownership-transfer-1.png) -2. Wählen Sie die Adresse, an die Sie den Subgraph übertragen möchten: +2. Choose the address that you would like to transfer the Subgraph to: ![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-2.png) From abc0493dc680fc3dd018cd9b0cb82a12e0b12ca0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:11:53 -0500 Subject: [PATCH 0212/1789] New translations transferring-a-subgraph.mdx (Italian) --- .../managing/transferring-a-subgraph.mdx | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/website/src/pages/it/subgraphs/developing/managing/transferring-a-subgraph.mdx b/website/src/pages/it/subgraphs/developing/managing/transferring-a-subgraph.mdx index 0fc6632cbc40..e80bde3fa6d2 100644 --- a/website/src/pages/it/subgraphs/developing/managing/transferring-a-subgraph.mdx +++ b/website/src/pages/it/subgraphs/developing/managing/transferring-a-subgraph.mdx @@ -2,18 +2,18 @@ title: Transferring a Subgraph --- -Subgraphs published to the decentralized network have an NFT minted to the address that published the subgraph. The NFT is based on a standard ERC721, which facilitates transfers between accounts on The Graph Network. +Subgraphs published to the decentralized network have an NFT minted to the address that published the Subgraph. The NFT is based on a standard ERC721, which facilitates transfers between accounts on The Graph Network. ## Reminders -- Whoever owns the NFT controls the subgraph. -- If the owner decides to sell or transfer the NFT, they will no longer be able to edit or update that subgraph on the network. -- You can easily move control of a subgraph to a multi-sig. -- A community member can create a subgraph on behalf of a DAO. +- Whoever owns the NFT controls the Subgraph. +- If the owner decides to sell or transfer the NFT, they will no longer be able to edit or update that Subgraph on the network. +- You can easily move control of a Subgraph to a multi-sig. +- A community member can create a Subgraph on behalf of a DAO. ## View Your Subgraph as an NFT -To view your subgraph as an NFT, you can visit an NFT marketplace like **OpenSea**: +To view your Subgraph as an NFT, you can visit an NFT marketplace like **OpenSea**: ``` https://opensea.io/your-wallet-address @@ -27,13 +27,13 @@ https://rainbow.me/your-wallet-addres ## Step-by-Step -To transfer ownership of a subgraph, do the following: +To transfer ownership of a Subgraph, do the following: 1. Use the UI built into Subgraph Studio: ![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-1.png) -2. Choose the address that you would like to transfer the subgraph to: +2. Choose the address that you would like to transfer the Subgraph to: ![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-2.png) From 231044fe371ec564ab22276f7bf05930ec4819b7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:11:54 -0500 Subject: [PATCH 0213/1789] New translations transferring-a-subgraph.mdx (Japanese) --- .../managing/transferring-a-subgraph.mdx | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/website/src/pages/ja/subgraphs/developing/managing/transferring-a-subgraph.mdx b/website/src/pages/ja/subgraphs/developing/managing/transferring-a-subgraph.mdx index 0fc6632cbc40..e80bde3fa6d2 100644 --- a/website/src/pages/ja/subgraphs/developing/managing/transferring-a-subgraph.mdx +++ b/website/src/pages/ja/subgraphs/developing/managing/transferring-a-subgraph.mdx @@ -2,18 +2,18 @@ title: Transferring a Subgraph --- -Subgraphs published to the decentralized network have an NFT minted to the address that published the subgraph. The NFT is based on a standard ERC721, which facilitates transfers between accounts on The Graph Network. +Subgraphs published to the decentralized network have an NFT minted to the address that published the Subgraph. The NFT is based on a standard ERC721, which facilitates transfers between accounts on The Graph Network. ## Reminders -- Whoever owns the NFT controls the subgraph. -- If the owner decides to sell or transfer the NFT, they will no longer be able to edit or update that subgraph on the network. -- You can easily move control of a subgraph to a multi-sig. -- A community member can create a subgraph on behalf of a DAO. +- Whoever owns the NFT controls the Subgraph. +- If the owner decides to sell or transfer the NFT, they will no longer be able to edit or update that Subgraph on the network. +- You can easily move control of a Subgraph to a multi-sig. +- A community member can create a Subgraph on behalf of a DAO. ## View Your Subgraph as an NFT -To view your subgraph as an NFT, you can visit an NFT marketplace like **OpenSea**: +To view your Subgraph as an NFT, you can visit an NFT marketplace like **OpenSea**: ``` https://opensea.io/your-wallet-address @@ -27,13 +27,13 @@ https://rainbow.me/your-wallet-addres ## Step-by-Step -To transfer ownership of a subgraph, do the following: +To transfer ownership of a Subgraph, do the following: 1. Use the UI built into Subgraph Studio: ![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-1.png) -2. Choose the address that you would like to transfer the subgraph to: +2. Choose the address that you would like to transfer the Subgraph to: ![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-2.png) From 8b4fe59ae4a5f1474ed3dcc2e563e7cd50aafafa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:11:55 -0500 Subject: [PATCH 0214/1789] New translations transferring-a-subgraph.mdx (Korean) --- .../managing/transferring-a-subgraph.mdx | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/website/src/pages/ko/subgraphs/developing/managing/transferring-a-subgraph.mdx b/website/src/pages/ko/subgraphs/developing/managing/transferring-a-subgraph.mdx index 0fc6632cbc40..e80bde3fa6d2 100644 --- a/website/src/pages/ko/subgraphs/developing/managing/transferring-a-subgraph.mdx +++ b/website/src/pages/ko/subgraphs/developing/managing/transferring-a-subgraph.mdx @@ -2,18 +2,18 @@ title: Transferring a Subgraph --- -Subgraphs published to the decentralized network have an NFT minted to the address that published the subgraph. The NFT is based on a standard ERC721, which facilitates transfers between accounts on The Graph Network. +Subgraphs published to the decentralized network have an NFT minted to the address that published the Subgraph. The NFT is based on a standard ERC721, which facilitates transfers between accounts on The Graph Network. ## Reminders -- Whoever owns the NFT controls the subgraph. -- If the owner decides to sell or transfer the NFT, they will no longer be able to edit or update that subgraph on the network. -- You can easily move control of a subgraph to a multi-sig. -- A community member can create a subgraph on behalf of a DAO. +- Whoever owns the NFT controls the Subgraph. +- If the owner decides to sell or transfer the NFT, they will no longer be able to edit or update that Subgraph on the network. +- You can easily move control of a Subgraph to a multi-sig. +- A community member can create a Subgraph on behalf of a DAO. ## View Your Subgraph as an NFT -To view your subgraph as an NFT, you can visit an NFT marketplace like **OpenSea**: +To view your Subgraph as an NFT, you can visit an NFT marketplace like **OpenSea**: ``` https://opensea.io/your-wallet-address @@ -27,13 +27,13 @@ https://rainbow.me/your-wallet-addres ## Step-by-Step -To transfer ownership of a subgraph, do the following: +To transfer ownership of a Subgraph, do the following: 1. Use the UI built into Subgraph Studio: ![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-1.png) -2. Choose the address that you would like to transfer the subgraph to: +2. Choose the address that you would like to transfer the Subgraph to: ![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-2.png) From d37d29b4a9e32f7b3af3875a94f49e99dda7ae8a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:11:56 -0500 Subject: [PATCH 0215/1789] New translations transferring-a-subgraph.mdx (Dutch) --- .../managing/transferring-a-subgraph.mdx | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/website/src/pages/nl/subgraphs/developing/managing/transferring-a-subgraph.mdx b/website/src/pages/nl/subgraphs/developing/managing/transferring-a-subgraph.mdx index 0fc6632cbc40..e80bde3fa6d2 100644 --- a/website/src/pages/nl/subgraphs/developing/managing/transferring-a-subgraph.mdx +++ b/website/src/pages/nl/subgraphs/developing/managing/transferring-a-subgraph.mdx @@ -2,18 +2,18 @@ title: Transferring a Subgraph --- -Subgraphs published to the decentralized network have an NFT minted to the address that published the subgraph. The NFT is based on a standard ERC721, which facilitates transfers between accounts on The Graph Network. +Subgraphs published to the decentralized network have an NFT minted to the address that published the Subgraph. The NFT is based on a standard ERC721, which facilitates transfers between accounts on The Graph Network. ## Reminders -- Whoever owns the NFT controls the subgraph. -- If the owner decides to sell or transfer the NFT, they will no longer be able to edit or update that subgraph on the network. -- You can easily move control of a subgraph to a multi-sig. -- A community member can create a subgraph on behalf of a DAO. +- Whoever owns the NFT controls the Subgraph. +- If the owner decides to sell or transfer the NFT, they will no longer be able to edit or update that Subgraph on the network. +- You can easily move control of a Subgraph to a multi-sig. +- A community member can create a Subgraph on behalf of a DAO. ## View Your Subgraph as an NFT -To view your subgraph as an NFT, you can visit an NFT marketplace like **OpenSea**: +To view your Subgraph as an NFT, you can visit an NFT marketplace like **OpenSea**: ``` https://opensea.io/your-wallet-address @@ -27,13 +27,13 @@ https://rainbow.me/your-wallet-addres ## Step-by-Step -To transfer ownership of a subgraph, do the following: +To transfer ownership of a Subgraph, do the following: 1. Use the UI built into Subgraph Studio: ![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-1.png) -2. Choose the address that you would like to transfer the subgraph to: +2. Choose the address that you would like to transfer the Subgraph to: ![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-2.png) From 6ce64ae0203e2eadcf6a50a77f28ec4f311069e1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:11:57 -0500 Subject: [PATCH 0216/1789] New translations transferring-a-subgraph.mdx (Polish) --- .../managing/transferring-a-subgraph.mdx | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/website/src/pages/pl/subgraphs/developing/managing/transferring-a-subgraph.mdx b/website/src/pages/pl/subgraphs/developing/managing/transferring-a-subgraph.mdx index 0fc6632cbc40..e80bde3fa6d2 100644 --- a/website/src/pages/pl/subgraphs/developing/managing/transferring-a-subgraph.mdx +++ b/website/src/pages/pl/subgraphs/developing/managing/transferring-a-subgraph.mdx @@ -2,18 +2,18 @@ title: Transferring a Subgraph --- -Subgraphs published to the decentralized network have an NFT minted to the address that published the subgraph. The NFT is based on a standard ERC721, which facilitates transfers between accounts on The Graph Network. +Subgraphs published to the decentralized network have an NFT minted to the address that published the Subgraph. The NFT is based on a standard ERC721, which facilitates transfers between accounts on The Graph Network. ## Reminders -- Whoever owns the NFT controls the subgraph. -- If the owner decides to sell or transfer the NFT, they will no longer be able to edit or update that subgraph on the network. -- You can easily move control of a subgraph to a multi-sig. -- A community member can create a subgraph on behalf of a DAO. +- Whoever owns the NFT controls the Subgraph. +- If the owner decides to sell or transfer the NFT, they will no longer be able to edit or update that Subgraph on the network. +- You can easily move control of a Subgraph to a multi-sig. +- A community member can create a Subgraph on behalf of a DAO. ## View Your Subgraph as an NFT -To view your subgraph as an NFT, you can visit an NFT marketplace like **OpenSea**: +To view your Subgraph as an NFT, you can visit an NFT marketplace like **OpenSea**: ``` https://opensea.io/your-wallet-address @@ -27,13 +27,13 @@ https://rainbow.me/your-wallet-addres ## Step-by-Step -To transfer ownership of a subgraph, do the following: +To transfer ownership of a Subgraph, do the following: 1. Use the UI built into Subgraph Studio: ![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-1.png) -2. Choose the address that you would like to transfer the subgraph to: +2. Choose the address that you would like to transfer the Subgraph to: ![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-2.png) From 02d7e6f24ef5cc2b45748e9fb99cbf1dd001c2c1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:11:58 -0500 Subject: [PATCH 0217/1789] New translations transferring-a-subgraph.mdx (Portuguese) --- .../managing/transferring-a-subgraph.mdx | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/website/src/pages/pt/subgraphs/developing/managing/transferring-a-subgraph.mdx b/website/src/pages/pt/subgraphs/developing/managing/transferring-a-subgraph.mdx index 1931370a6df7..4de632fc490d 100644 --- a/website/src/pages/pt/subgraphs/developing/managing/transferring-a-subgraph.mdx +++ b/website/src/pages/pt/subgraphs/developing/managing/transferring-a-subgraph.mdx @@ -2,18 +2,18 @@ title: Transferring a Subgraph --- -Subgraphs publicados na rede descentralizada terão um NFT mintado no endereço que publicou o subgraph. O NFT é baseado no padrão ERC-721, que facilita transferências entre contas na Graph Network. +Subgraphs published to the decentralized network have an NFT minted to the address that published the Subgraph. The NFT is based on a standard ERC721, which facilitates transfers between accounts on The Graph Network. ## Reminders -- O dono do NFT controla o subgraph. -- Se o dono atual decidir vender ou transferir o NFT, ele não poderá mais editar ou atualizar aquele subgraph na rede. -- É possível transferir o controle de um subgraph para uma multisig. -- Um membro da comunidade pode criar um subgraph no nome de uma DAO. +- Whoever owns the NFT controls the Subgraph. +- If the owner decides to sell or transfer the NFT, they will no longer be able to edit or update that Subgraph on the network. +- You can easily move control of a Subgraph to a multi-sig. +- A community member can create a Subgraph on behalf of a DAO. ## View Your Subgraph as an NFT -Para visualizar o seu subgraph como um NFT, visite um mercado de NFTs como o **OpenSea**: +To view your Subgraph as an NFT, you can visit an NFT marketplace like **OpenSea**: ``` https://opensea.io/your-wallet-address @@ -27,13 +27,13 @@ https://rainbow.me/your-wallet-addres ## Passo a Passo -Para transferir a titularidade de um subgraph, faça o seguinte: +To transfer ownership of a Subgraph, do the following: 1. Use a interface embutida no Subgraph Studio: ![Transferência de Titularidade de Subgraph](/img/subgraph-ownership-transfer-1.png) -2. Escolha o endereço para o qual gostaria de transferir o subgraph: +2. Choose the address that you would like to transfer the Subgraph to: ![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-2.png) From cbee11edf2c3557b2943c82eda946a0befc6d3a3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:11:59 -0500 Subject: [PATCH 0218/1789] New translations transferring-a-subgraph.mdx (Russian) --- .../managing/transferring-a-subgraph.mdx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/website/src/pages/ru/subgraphs/developing/managing/transferring-a-subgraph.mdx b/website/src/pages/ru/subgraphs/developing/managing/transferring-a-subgraph.mdx index bc76890218f7..f99757ea07e9 100644 --- a/website/src/pages/ru/subgraphs/developing/managing/transferring-a-subgraph.mdx +++ b/website/src/pages/ru/subgraphs/developing/managing/transferring-a-subgraph.mdx @@ -2,18 +2,18 @@ title: Transferring a Subgraph --- -Субграфы, опубликованные в децентрализованной сети, имеют NFT, сминченный по адресу, опубликовавшему субграф. NFT основан на стандарте ERC721, который облегчает переводы между аккаунтами в The Graph Network. +Subgraphs published to the decentralized network have an NFT minted to the address that published the Subgraph. The NFT is based on a standard ERC721, which facilitates transfers between accounts on The Graph Network. ## Напоминания -- Тот, кто владеет NFT, управляет субграфом. -- Если владелец решит продать или передать NFT, он больше не сможет редактировать или обновлять этот субграф в сети. -- Вы можете легко перенести управление субграфом на мультиподпись. -- Участник сообщества может создать субграф от имени DAO. +- Whoever owns the NFT controls the Subgraph. +- If the owner decides to sell or transfer the NFT, they will no longer be able to edit or update that Subgraph on the network. +- You can easily move control of a Subgraph to a multi-sig. +- A community member can create a Subgraph on behalf of a DAO. ## Просмотр Вашего субграфа как NFT -Чтобы просмотреть свой субграф как NFT, Вы можете посетить маркетплейс NFT, например, **OpenSea**: +To view your Subgraph as an NFT, you can visit an NFT marketplace like **OpenSea**: ``` https://opensea.io/your-wallet-address @@ -27,13 +27,13 @@ https://rainbow.me/your-wallet-addres ## Пошаговое руководство -Чтобы передать право собственности на субграф, выполните следующие действия: +To transfer ownership of a Subgraph, do the following: 1. Используйте встроенный в Subgraph Studio пользовательский интерфейс: - ![Передача права собственности на субграф](/img/subgraph-ownership-transfer-1.png) + ![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-1.png) -2. Выберите адрес, на который хотели бы передать субграф: +2. Choose the address that you would like to transfer the Subgraph to: ![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-2.png) From d7385380ab3c5baa701144da897a9fdb2814cc11 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:12:00 -0500 Subject: [PATCH 0219/1789] New translations transferring-a-subgraph.mdx (Swedish) --- .../managing/transferring-a-subgraph.mdx | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/website/src/pages/sv/subgraphs/developing/managing/transferring-a-subgraph.mdx b/website/src/pages/sv/subgraphs/developing/managing/transferring-a-subgraph.mdx index 0fc6632cbc40..e80bde3fa6d2 100644 --- a/website/src/pages/sv/subgraphs/developing/managing/transferring-a-subgraph.mdx +++ b/website/src/pages/sv/subgraphs/developing/managing/transferring-a-subgraph.mdx @@ -2,18 +2,18 @@ title: Transferring a Subgraph --- -Subgraphs published to the decentralized network have an NFT minted to the address that published the subgraph. The NFT is based on a standard ERC721, which facilitates transfers between accounts on The Graph Network. +Subgraphs published to the decentralized network have an NFT minted to the address that published the Subgraph. The NFT is based on a standard ERC721, which facilitates transfers between accounts on The Graph Network. ## Reminders -- Whoever owns the NFT controls the subgraph. -- If the owner decides to sell or transfer the NFT, they will no longer be able to edit or update that subgraph on the network. -- You can easily move control of a subgraph to a multi-sig. -- A community member can create a subgraph on behalf of a DAO. +- Whoever owns the NFT controls the Subgraph. +- If the owner decides to sell or transfer the NFT, they will no longer be able to edit or update that Subgraph on the network. +- You can easily move control of a Subgraph to a multi-sig. +- A community member can create a Subgraph on behalf of a DAO. ## View Your Subgraph as an NFT -To view your subgraph as an NFT, you can visit an NFT marketplace like **OpenSea**: +To view your Subgraph as an NFT, you can visit an NFT marketplace like **OpenSea**: ``` https://opensea.io/your-wallet-address @@ -27,13 +27,13 @@ https://rainbow.me/your-wallet-addres ## Step-by-Step -To transfer ownership of a subgraph, do the following: +To transfer ownership of a Subgraph, do the following: 1. Use the UI built into Subgraph Studio: ![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-1.png) -2. Choose the address that you would like to transfer the subgraph to: +2. Choose the address that you would like to transfer the Subgraph to: ![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-2.png) From 44c6d91ef6f746e0f4c3da5fb79580327bdd9630 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:12:01 -0500 Subject: [PATCH 0220/1789] New translations transferring-a-subgraph.mdx (Turkish) --- .../managing/transferring-a-subgraph.mdx | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/website/src/pages/tr/subgraphs/developing/managing/transferring-a-subgraph.mdx b/website/src/pages/tr/subgraphs/developing/managing/transferring-a-subgraph.mdx index 3631cc8a2973..0707d7d2ab5a 100644 --- a/website/src/pages/tr/subgraphs/developing/managing/transferring-a-subgraph.mdx +++ b/website/src/pages/tr/subgraphs/developing/managing/transferring-a-subgraph.mdx @@ -2,18 +2,18 @@ title: Transferring a Subgraph --- -Merkeziyetsiz ağda yayımlanan subgraph’ler, NFT olarak oluşturulup subgraph’i yayımlayan adrese gönderilir. Bu NFT, The Graph Ağı’ndaki hesaplar arasında transferi kolaylaştıran standart bir ERC721 sözleşmesini temel alır. +Subgraphs published to the decentralized network have an NFT minted to the address that published the Subgraph. The NFT is based on a standard ERC721, which facilitates transfers between accounts on The Graph Network. ## Reminders -- NFT’ye sahip olan kişi, subgraph’in kontrolünü elinde tutar. -- NFT’nin sahibi NFT’yi satmaya veya transfer etmeye karar verirse, artık bu subgraph’i ağ üzerinde düzenleyemez veya güncelleyemez. -- Subgraph kontrolünü kolayca bir multi-sig cüzdana taşıyabilirsiniz. -- Bir topluluk üyesi, bir DAO adına subgraph oluşturabilir. +- Whoever owns the NFT controls the Subgraph. +- If the owner decides to sell or transfer the NFT, they will no longer be able to edit or update that Subgraph on the network. +- You can easily move control of a Subgraph to a multi-sig. +- A community member can create a Subgraph on behalf of a DAO. ## Subgraph’inizi NFT Olarak Görüntüleyin -Subgraph’inizi bir NFT olarak görüntülemek için, **OpenSea** gibi bir NFT pazar yerini ziyaret edebilirsiniz: +To view your Subgraph as an NFT, you can visit an NFT marketplace like **OpenSea**: ``` https://opensea.io/your-wallet-address @@ -27,13 +27,13 @@ https://rainbow.me/your-wallet-addres ## Adım Adım -Bir subgraph’in sahipliğini transfer etmek için şu adımları izleyin: +To transfer ownership of a Subgraph, do the following: 1. Subgraph Studio’ya entegre edilmiş kullanıcı arayüzünü kullanın: ![Subgraph Sahipliği Transferi](/img/subgraph-ownership-transfer-1.png) -2. Subgraph’i transfer etmek istediğiniz adresi seçin: +2. Choose the address that you would like to transfer the Subgraph to: ![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-2.png) From 1ee2cc013d16eee8c512a0b84dd951bbdf7357b6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:12:02 -0500 Subject: [PATCH 0221/1789] New translations transferring-a-subgraph.mdx (Ukrainian) --- .../managing/transferring-a-subgraph.mdx | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/website/src/pages/uk/subgraphs/developing/managing/transferring-a-subgraph.mdx b/website/src/pages/uk/subgraphs/developing/managing/transferring-a-subgraph.mdx index 0fc6632cbc40..e80bde3fa6d2 100644 --- a/website/src/pages/uk/subgraphs/developing/managing/transferring-a-subgraph.mdx +++ b/website/src/pages/uk/subgraphs/developing/managing/transferring-a-subgraph.mdx @@ -2,18 +2,18 @@ title: Transferring a Subgraph --- -Subgraphs published to the decentralized network have an NFT minted to the address that published the subgraph. The NFT is based on a standard ERC721, which facilitates transfers between accounts on The Graph Network. +Subgraphs published to the decentralized network have an NFT minted to the address that published the Subgraph. The NFT is based on a standard ERC721, which facilitates transfers between accounts on The Graph Network. ## Reminders -- Whoever owns the NFT controls the subgraph. -- If the owner decides to sell or transfer the NFT, they will no longer be able to edit or update that subgraph on the network. -- You can easily move control of a subgraph to a multi-sig. -- A community member can create a subgraph on behalf of a DAO. +- Whoever owns the NFT controls the Subgraph. +- If the owner decides to sell or transfer the NFT, they will no longer be able to edit or update that Subgraph on the network. +- You can easily move control of a Subgraph to a multi-sig. +- A community member can create a Subgraph on behalf of a DAO. ## View Your Subgraph as an NFT -To view your subgraph as an NFT, you can visit an NFT marketplace like **OpenSea**: +To view your Subgraph as an NFT, you can visit an NFT marketplace like **OpenSea**: ``` https://opensea.io/your-wallet-address @@ -27,13 +27,13 @@ https://rainbow.me/your-wallet-addres ## Step-by-Step -To transfer ownership of a subgraph, do the following: +To transfer ownership of a Subgraph, do the following: 1. Use the UI built into Subgraph Studio: ![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-1.png) -2. Choose the address that you would like to transfer the subgraph to: +2. Choose the address that you would like to transfer the Subgraph to: ![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-2.png) From 79f7e4d6ff505a65e9c162be9f191f7f076b5502 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:12:03 -0500 Subject: [PATCH 0222/1789] New translations transferring-a-subgraph.mdx (Chinese Simplified) --- .../managing/transferring-a-subgraph.mdx | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/website/src/pages/zh/subgraphs/developing/managing/transferring-a-subgraph.mdx b/website/src/pages/zh/subgraphs/developing/managing/transferring-a-subgraph.mdx index 0fc6632cbc40..e80bde3fa6d2 100644 --- a/website/src/pages/zh/subgraphs/developing/managing/transferring-a-subgraph.mdx +++ b/website/src/pages/zh/subgraphs/developing/managing/transferring-a-subgraph.mdx @@ -2,18 +2,18 @@ title: Transferring a Subgraph --- -Subgraphs published to the decentralized network have an NFT minted to the address that published the subgraph. The NFT is based on a standard ERC721, which facilitates transfers between accounts on The Graph Network. +Subgraphs published to the decentralized network have an NFT minted to the address that published the Subgraph. The NFT is based on a standard ERC721, which facilitates transfers between accounts on The Graph Network. ## Reminders -- Whoever owns the NFT controls the subgraph. -- If the owner decides to sell or transfer the NFT, they will no longer be able to edit or update that subgraph on the network. -- You can easily move control of a subgraph to a multi-sig. -- A community member can create a subgraph on behalf of a DAO. +- Whoever owns the NFT controls the Subgraph. +- If the owner decides to sell or transfer the NFT, they will no longer be able to edit or update that Subgraph on the network. +- You can easily move control of a Subgraph to a multi-sig. +- A community member can create a Subgraph on behalf of a DAO. ## View Your Subgraph as an NFT -To view your subgraph as an NFT, you can visit an NFT marketplace like **OpenSea**: +To view your Subgraph as an NFT, you can visit an NFT marketplace like **OpenSea**: ``` https://opensea.io/your-wallet-address @@ -27,13 +27,13 @@ https://rainbow.me/your-wallet-addres ## Step-by-Step -To transfer ownership of a subgraph, do the following: +To transfer ownership of a Subgraph, do the following: 1. Use the UI built into Subgraph Studio: ![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-1.png) -2. Choose the address that you would like to transfer the subgraph to: +2. Choose the address that you would like to transfer the Subgraph to: ![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-2.png) From 891e57735058656b1dccb310b961161c55ca3b72 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:12:04 -0500 Subject: [PATCH 0223/1789] New translations transferring-a-subgraph.mdx (Urdu (Pakistan)) --- .../managing/transferring-a-subgraph.mdx | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/website/src/pages/ur/subgraphs/developing/managing/transferring-a-subgraph.mdx b/website/src/pages/ur/subgraphs/developing/managing/transferring-a-subgraph.mdx index 0fc6632cbc40..e80bde3fa6d2 100644 --- a/website/src/pages/ur/subgraphs/developing/managing/transferring-a-subgraph.mdx +++ b/website/src/pages/ur/subgraphs/developing/managing/transferring-a-subgraph.mdx @@ -2,18 +2,18 @@ title: Transferring a Subgraph --- -Subgraphs published to the decentralized network have an NFT minted to the address that published the subgraph. The NFT is based on a standard ERC721, which facilitates transfers between accounts on The Graph Network. +Subgraphs published to the decentralized network have an NFT minted to the address that published the Subgraph. The NFT is based on a standard ERC721, which facilitates transfers between accounts on The Graph Network. ## Reminders -- Whoever owns the NFT controls the subgraph. -- If the owner decides to sell or transfer the NFT, they will no longer be able to edit or update that subgraph on the network. -- You can easily move control of a subgraph to a multi-sig. -- A community member can create a subgraph on behalf of a DAO. +- Whoever owns the NFT controls the Subgraph. +- If the owner decides to sell or transfer the NFT, they will no longer be able to edit or update that Subgraph on the network. +- You can easily move control of a Subgraph to a multi-sig. +- A community member can create a Subgraph on behalf of a DAO. ## View Your Subgraph as an NFT -To view your subgraph as an NFT, you can visit an NFT marketplace like **OpenSea**: +To view your Subgraph as an NFT, you can visit an NFT marketplace like **OpenSea**: ``` https://opensea.io/your-wallet-address @@ -27,13 +27,13 @@ https://rainbow.me/your-wallet-addres ## Step-by-Step -To transfer ownership of a subgraph, do the following: +To transfer ownership of a Subgraph, do the following: 1. Use the UI built into Subgraph Studio: ![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-1.png) -2. Choose the address that you would like to transfer the subgraph to: +2. Choose the address that you would like to transfer the Subgraph to: ![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-2.png) From 8c247895bb93a1336790c30bcf05066f5b0f24c2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:12:05 -0500 Subject: [PATCH 0224/1789] New translations transferring-a-subgraph.mdx (Vietnamese) --- .../managing/transferring-a-subgraph.mdx | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/website/src/pages/vi/subgraphs/developing/managing/transferring-a-subgraph.mdx b/website/src/pages/vi/subgraphs/developing/managing/transferring-a-subgraph.mdx index 0fc6632cbc40..e80bde3fa6d2 100644 --- a/website/src/pages/vi/subgraphs/developing/managing/transferring-a-subgraph.mdx +++ b/website/src/pages/vi/subgraphs/developing/managing/transferring-a-subgraph.mdx @@ -2,18 +2,18 @@ title: Transferring a Subgraph --- -Subgraphs published to the decentralized network have an NFT minted to the address that published the subgraph. The NFT is based on a standard ERC721, which facilitates transfers between accounts on The Graph Network. +Subgraphs published to the decentralized network have an NFT minted to the address that published the Subgraph. The NFT is based on a standard ERC721, which facilitates transfers between accounts on The Graph Network. ## Reminders -- Whoever owns the NFT controls the subgraph. -- If the owner decides to sell or transfer the NFT, they will no longer be able to edit or update that subgraph on the network. -- You can easily move control of a subgraph to a multi-sig. -- A community member can create a subgraph on behalf of a DAO. +- Whoever owns the NFT controls the Subgraph. +- If the owner decides to sell or transfer the NFT, they will no longer be able to edit or update that Subgraph on the network. +- You can easily move control of a Subgraph to a multi-sig. +- A community member can create a Subgraph on behalf of a DAO. ## View Your Subgraph as an NFT -To view your subgraph as an NFT, you can visit an NFT marketplace like **OpenSea**: +To view your Subgraph as an NFT, you can visit an NFT marketplace like **OpenSea**: ``` https://opensea.io/your-wallet-address @@ -27,13 +27,13 @@ https://rainbow.me/your-wallet-addres ## Step-by-Step -To transfer ownership of a subgraph, do the following: +To transfer ownership of a Subgraph, do the following: 1. Use the UI built into Subgraph Studio: ![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-1.png) -2. Choose the address that you would like to transfer the subgraph to: +2. Choose the address that you would like to transfer the Subgraph to: ![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-2.png) From 75ee05296f792e0c5e9d0ad4fe5b929692266d4d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:12:06 -0500 Subject: [PATCH 0225/1789] New translations transferring-a-subgraph.mdx (Marathi) --- .../managing/transferring-a-subgraph.mdx | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/website/src/pages/mr/subgraphs/developing/managing/transferring-a-subgraph.mdx b/website/src/pages/mr/subgraphs/developing/managing/transferring-a-subgraph.mdx index 0fc6632cbc40..e80bde3fa6d2 100644 --- a/website/src/pages/mr/subgraphs/developing/managing/transferring-a-subgraph.mdx +++ b/website/src/pages/mr/subgraphs/developing/managing/transferring-a-subgraph.mdx @@ -2,18 +2,18 @@ title: Transferring a Subgraph --- -Subgraphs published to the decentralized network have an NFT minted to the address that published the subgraph. The NFT is based on a standard ERC721, which facilitates transfers between accounts on The Graph Network. +Subgraphs published to the decentralized network have an NFT minted to the address that published the Subgraph. The NFT is based on a standard ERC721, which facilitates transfers between accounts on The Graph Network. ## Reminders -- Whoever owns the NFT controls the subgraph. -- If the owner decides to sell or transfer the NFT, they will no longer be able to edit or update that subgraph on the network. -- You can easily move control of a subgraph to a multi-sig. -- A community member can create a subgraph on behalf of a DAO. +- Whoever owns the NFT controls the Subgraph. +- If the owner decides to sell or transfer the NFT, they will no longer be able to edit or update that Subgraph on the network. +- You can easily move control of a Subgraph to a multi-sig. +- A community member can create a Subgraph on behalf of a DAO. ## View Your Subgraph as an NFT -To view your subgraph as an NFT, you can visit an NFT marketplace like **OpenSea**: +To view your Subgraph as an NFT, you can visit an NFT marketplace like **OpenSea**: ``` https://opensea.io/your-wallet-address @@ -27,13 +27,13 @@ https://rainbow.me/your-wallet-addres ## Step-by-Step -To transfer ownership of a subgraph, do the following: +To transfer ownership of a Subgraph, do the following: 1. Use the UI built into Subgraph Studio: ![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-1.png) -2. Choose the address that you would like to transfer the subgraph to: +2. Choose the address that you would like to transfer the Subgraph to: ![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-2.png) From 555e8702c28090cc34e86f232a3b561ebd7c8754 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:12:07 -0500 Subject: [PATCH 0226/1789] New translations transferring-a-subgraph.mdx (Hindi) --- .../managing/transferring-a-subgraph.mdx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/website/src/pages/hi/subgraphs/developing/managing/transferring-a-subgraph.mdx b/website/src/pages/hi/subgraphs/developing/managing/transferring-a-subgraph.mdx index 1b71f96fd6e8..5e1517d2c1c0 100644 --- a/website/src/pages/hi/subgraphs/developing/managing/transferring-a-subgraph.mdx +++ b/website/src/pages/hi/subgraphs/developing/managing/transferring-a-subgraph.mdx @@ -2,18 +2,18 @@ title: Transferring a Subgraph --- -विभिन्न नेटवर्क पर प्रकाशित subgraphs के लिए उस पते पर एक NFT जारी किया गया है जिसने subgraph प्रकाशित किया। NFT एक मानक ERC721 पर आधारित है, जो The Graph नेटवर्क पर खातों के बीच स्थानांतरण की सुविधा देता है। +Subgraphs published to the decentralized network have an NFT minted to the address that published the Subgraph. The NFT is based on a standard ERC721, which facilitates transfers between accounts on The Graph Network. -## अनुस्मारक +## अनुस्मारक -- जो भी 'NFT' का मालिक है, वह subgraph को नियंत्रित करता है। -- यदि मालिक 'NFT' को बेचने या स्थानांतरित करने का निर्णय लेता है, तो वे नेटवर्क पर उस subgraph को संपादित या अपडेट नहीं कर पाएंगे। -- आप आसानी से एक subgraph का नियंत्रण एक multi-sig में स्थानांतरित कर सकते हैं। -- एक समुदाय का सदस्य DAO की ओर से एक subgraph बना सकता है। +- Whoever owns the NFT controls the Subgraph. +- If the owner decides to sell or transfer the NFT, they will no longer be able to edit or update that Subgraph on the network. +- You can easily move control of a Subgraph to a multi-sig. +- A community member can create a Subgraph on behalf of a DAO. ## अपने 'subgraph' को एक NFT के रूप में देखें -अपने 'subgraph' को एक NFT के रूप में देखने के लिए, आप एक NFT मार्केटप्लेस जैसे OpenSea पर जा सकते हैं: +To view your Subgraph as an NFT, you can visit an NFT marketplace like **OpenSea**: ``` https://opensea.io/your-wallet-address @@ -27,13 +27,13 @@ https://rainbow.me/your-wallet-addres ## चरण-दर-चरण -एक Subgraph का स्वामित्व स्थानांतरित करने के लिए, निम्नलिखित करें: +To transfer ownership of a Subgraph, do the following: 1. 'Subgraph Studio' में निर्मित UI का उपयोग करें: ![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-1.png) -2. उस पते का चयन करें जिसे आप 'subgraph' को स्थानांतरित करना चाहेंगे: +2. Choose the address that you would like to transfer the Subgraph to: ![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-2.png) From 3747135a7fc802e4f03526f9b1d3d936f94dce2f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:12:08 -0500 Subject: [PATCH 0227/1789] New translations transferring-a-subgraph.mdx (Swahili) --- .../managing/transferring-a-subgraph.mdx | 42 +++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 website/src/pages/sw/subgraphs/developing/managing/transferring-a-subgraph.mdx diff --git a/website/src/pages/sw/subgraphs/developing/managing/transferring-a-subgraph.mdx b/website/src/pages/sw/subgraphs/developing/managing/transferring-a-subgraph.mdx new file mode 100644 index 000000000000..e80bde3fa6d2 --- /dev/null +++ b/website/src/pages/sw/subgraphs/developing/managing/transferring-a-subgraph.mdx @@ -0,0 +1,42 @@ +--- +title: Transferring a Subgraph +--- + +Subgraphs published to the decentralized network have an NFT minted to the address that published the Subgraph. The NFT is based on a standard ERC721, which facilitates transfers between accounts on The Graph Network. + +## Reminders + +- Whoever owns the NFT controls the Subgraph. +- If the owner decides to sell or transfer the NFT, they will no longer be able to edit or update that Subgraph on the network. +- You can easily move control of a Subgraph to a multi-sig. +- A community member can create a Subgraph on behalf of a DAO. + +## View Your Subgraph as an NFT + +To view your Subgraph as an NFT, you can visit an NFT marketplace like **OpenSea**: + +``` +https://opensea.io/your-wallet-address +``` + +Or a wallet explorer like **Rainbow.me**: + +``` +https://rainbow.me/your-wallet-addres +``` + +## Step-by-Step + +To transfer ownership of a Subgraph, do the following: + +1. Use the UI built into Subgraph Studio: + + ![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-1.png) + +2. Choose the address that you would like to transfer the Subgraph to: + + ![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-2.png) + +Optionally, you can also use the built-in UI of NFT marketplaces like OpenSea: + +![Subgraph Ownership Transfer from NFT marketplace](/img/subgraph-ownership-transfer-nft-marketplace.png) From cbb24d1f481f9c7bf261a06ad8618c83f4494458 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:12:09 -0500 Subject: [PATCH 0228/1789] New translations graphql-api.mdx (Romanian) --- .../ro/subgraphs/querying/graphql-api.mdx | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/website/src/pages/ro/subgraphs/querying/graphql-api.mdx b/website/src/pages/ro/subgraphs/querying/graphql-api.mdx index b3003ece651a..b82afcfa252c 100644 --- a/website/src/pages/ro/subgraphs/querying/graphql-api.mdx +++ b/website/src/pages/ro/subgraphs/querying/graphql-api.mdx @@ -6,13 +6,13 @@ Learn about the GraphQL Query API used in The Graph. ## What is GraphQL? -[GraphQL](https://graphql.org/learn/) is a query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query subgraphs. +[GraphQL](https://graphql.org/learn/) is a query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query Subgraphs. -To understand the larger role that GraphQL plays, review [developing](/subgraphs/developing/introduction/) and [creating a subgraph](/developing/creating-a-subgraph/). +To understand the larger role that GraphQL plays, review [developing](/subgraphs/developing/introduction/) and [creating a Subgraph](/developing/creating-a-subgraph/). ## Queries with GraphQL -In your subgraph schema you define types called `Entities`. For each `Entity` type, `entity` and `entities` fields will be generated on the top-level `Query` type. +In your Subgraph schema you define types called `Entities`. For each `Entity` type, `entity` and `entities` fields will be generated on the top-level `Query` type. > Note: `query` does not need to be included at the top of the `graphql` query when using The Graph. @@ -170,7 +170,7 @@ You can use suffixes like `_gt`, `_lte` for value comparison: You can also filter entities that were updated in or after a specified block with `_change_block(number_gte: Int)`. -This can be useful if you are looking to fetch only entities which have changed, for example since the last time you polled. Or alternatively it can be useful to investigate or debug how entities are changing in your subgraph (if combined with a block filter, you can isolate only entities that changed in a specific block). +This can be useful if you are looking to fetch only entities which have changed, for example since the last time you polled. Or alternatively it can be useful to investigate or debug how entities are changing in your Subgraph (if combined with a block filter, you can isolate only entities that changed in a specific block). ```graphql { @@ -329,18 +329,18 @@ This query will return `Challenge` entities, and their associated `Application` ### Fulltext Search Queries -Fulltext search query fields provide an expressive text search API that can be added to the subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph/#defining-fulltext-search-fields) to add fulltext search to your subgraph. +Fulltext search query fields provide an expressive text search API that can be added to the Subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph/#defining-fulltext-search-fields) to add fulltext search to your Subgraph. Fulltext search queries have one required field, `text`, for supplying search terms. Several special fulltext operators are available to be used in this `text` search field. Fulltext search operators: -| Symbol | Operator | Description | -| --- | --- | --- | -| `&` | `And` | For combining multiple search terms into a filter for entities that include all of the provided terms | -| | | `Or` | Queries with multiple search terms separated by the or operator will return all entities with a match from any of the provided terms | -| `<->` | `Follow by` | Specify the distance between two words. | -| `:*` | `Prefix` | Use the prefix search term to find words whose prefix match (2 characters required.) | +| Symbol | Operator | Description | +| ------ | ----------- | ------------------------------------------------------------------------------------------------------------------------------------ | +| `&` | `And` | For combining multiple search terms into a filter for entities that include all of the provided terms | +| | | `Or` | Queries with multiple search terms separated by the or operator will return all entities with a match from any of the provided terms | +| `<->` | `Follow by` | Specify the distance between two words. | +| `:*` | `Prefix` | Use the prefix search term to find words whose prefix match (2 characters required.) | #### Examples @@ -391,7 +391,7 @@ Graph Node implements [specification-based](https://spec.graphql.org/October2021 The schema of your dataSources, i.e. the entity types, values, and relationships that are available to query, are defined through the [GraphQL Interface Definition Language (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). -GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your subgraph is automatically generated from the GraphQL schema that's included in your [subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). +GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your Subgraph is automatically generated from the GraphQL schema that's included in your [Subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). > Note: Our API does not expose mutations because developers are expected to issue transactions directly against the underlying blockchain from their applications. @@ -403,7 +403,7 @@ All GraphQL types with `@entity` directives in your schema will be treated as en ### Subgraph Metadata -All subgraphs have an auto-generated `_Meta_` object, which provides access to subgraph metadata. This can be queried as follows: +All Subgraphs have an auto-generated `_Meta_` object, which provides access to Subgraph metadata. This can be queried as follows: ```graphQL { @@ -419,7 +419,7 @@ All subgraphs have an auto-generated `_Meta_` object, which provides access to s } ``` -If a block is provided, the metadata is as of that block, if not the latest indexed block is used. If provided, the block must be after the subgraph's start block, and less than or equal to the most recently indexed block. +If a block is provided, the metadata is as of that block, if not the latest indexed block is used. If provided, the block must be after the Subgraph's start block, and less than or equal to the most recently indexed block. `deployment` is a unique ID, corresponding to the IPFS CID of the `subgraph.yaml` file. @@ -427,6 +427,6 @@ If a block is provided, the metadata is as of that block, if not the latest inde - hash: the hash of the block - number: the block number -- timestamp: the timestamp of the block, if available (this is currently only available for subgraphs indexing EVM networks) +- timestamp: the timestamp of the block, if available (this is currently only available for Subgraphs indexing EVM networks) -`hasIndexingErrors` is a boolean identifying whether the subgraph encountered indexing errors at some past block +`hasIndexingErrors` is a boolean identifying whether the Subgraph encountered indexing errors at some past block From 31f02dd81133530bfb3ca78629b4f8a43c2208a8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:12:10 -0500 Subject: [PATCH 0229/1789] New translations graphql-api.mdx (French) --- .../fr/subgraphs/querying/graphql-api.mdx | 142 +++++++++--------- 1 file changed, 71 insertions(+), 71 deletions(-) diff --git a/website/src/pages/fr/subgraphs/querying/graphql-api.mdx b/website/src/pages/fr/subgraphs/querying/graphql-api.mdx index 204fae24a5a5..c6dfd2a7bc9d 100644 --- a/website/src/pages/fr/subgraphs/querying/graphql-api.mdx +++ b/website/src/pages/fr/subgraphs/querying/graphql-api.mdx @@ -2,23 +2,23 @@ title: API GraphQL --- -Learn about the GraphQL Query API used in The Graph. +Découvrez l'API de requête GraphQL utilisée dans The Graph. ## Qu'est-ce que GraphQL ? -[GraphQL](https://graphql.org/learn/) is a query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query subgraphs. +[GraphQL](https://graphql.org/learn/) is a query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query Subgraphs. -To understand the larger role that GraphQL plays, review [developing](/subgraphs/developing/introduction/) and [creating a subgraph](/developing/creating-a-subgraph/). +To understand the larger role that GraphQL plays, review [developing](/subgraphs/developing/introduction/) and [creating a Subgraph](/developing/creating-a-subgraph/). -## Queries with GraphQL +## Requêtes avec GraphQL -In your subgraph schema you define types called `Entities`. For each `Entity` type, `entity` and `entities` fields will be generated on the top-level `Query` type. +In your Subgraph schema you define types called `Entities`. For each `Entity` type, `entity` and `entities` fields will be generated on the top-level `Query` type. -> Note: `query` does not need to be included at the top of the `graphql` query when using The Graph. +> Note : `query` n'a pas besoin d'être inclus au début de la requête `graphql` lors de l'utilisation de The Graph. ### Exemples -Query for a single `Token` entity defined in your schema: +Requête pour une seule entité `Token` définie dans votre schéma : ```graphql { @@ -29,9 +29,9 @@ Query for a single `Token` entity defined in your schema: } ``` -> Note: When querying for a single entity, the `id` field is required, and it must be written as a string. +> Note : Lors de l'interrogation d'une seule entité, le champ `id` est obligatoire et doit être écrit sous forme de chaîne de caractères. -Query all `Token` entities: +Interroge toutes les entités `Token` : ```graphql { @@ -44,10 +44,10 @@ Query all `Token` entities: ### Tri -When querying a collection, you may: +Lors de l'interrogation d'une collection, vous pouvez : -- Use the `orderBy` parameter to sort by a specific attribute. -- Use the `orderDirection` to specify the sort direction, `asc` for ascending or `desc` for descending. +- Utilisez le paramètre `orderBy` pour trier les données en fonction d'un attribut spécifique. +- Utilisez `orderDirection` pour spécifier la direction du tri, `asc` pour ascendant ou `desc` pour descendant. #### Exemple @@ -62,9 +62,9 @@ When querying a collection, you may: #### Exemple de tri d'entités imbriquées -As of Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) entities can be sorted on the basis of nested entities. +Depuis Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0), les entités peuvent être triées sur la base des entités imbriquées. -The following example shows tokens sorted by the name of their owner: +L'exemple suivant montre des jetons triés par le nom de leur propriétaire : ```graphql { @@ -79,18 +79,18 @@ The following example shows tokens sorted by the name of their owner: } ``` -> Currently, you can sort by one-level deep `String` or `ID` types on `@entity` and `@derivedFrom` fields. Unfortunately, [sorting by interfaces on one level-deep entities](https://github.com/graphprotocol/graph-node/pull/4058), sorting by fields which are arrays and nested entities is not yet supported. +> Actuellement, vous pouvez trier par type `String` ou `ID` à "un" niveau de profondeur sur les champs `@entity` et `@derivedFrom`. Malheureusement, le [tri par interfaces sur des entités d'un niveau de profondeur] (https://github.com/graphprotocol/graph-node/pull/4058), le tri par champs qui sont des tableaux et des entités imbriquées n'est pas encore prit en charge. ### Pagination -When querying a collection, it's best to: +Lors de l'interrogation d'une collection, il est préférable de : -- Use the `first` parameter to paginate from the beginning of the collection. - - The default sort order is by `ID` in ascending alphanumeric order, **not** by creation time. -- Use the `skip` parameter to skip entities and paginate. For instance, `first:100` shows the first 100 entities and `first:100, skip:100` shows the next 100 entities. -- Avoid using `skip` values in queries because they generally perform poorly. To retrieve a large number of items, it's best to page through entities based on an attribute as shown in the previous example above. +- Utilisez le paramètre `first` pour paginer à partir du début de la collection. + - L'ordre de tri par défaut est le tri par `ID` dans l'ordre alphanumérique croissant, **non** par heure de création. +- Utilisez le paramètre `skip` pour sauter des entités et paginer. Par exemple, `first:100` affiche les 100 premières entités et `first:100, skip:100` affiche les 100 entités suivantes. +- Évitez d'utiliser les valeurs `skip` dans les requêtes car elles sont généralement peu performantes. Pour récupérer un grand nombre d'éléments, il est préférable de parcourir les entités en fonction d'un attribut, comme indiqué dans l'exemple précédent. -#### Example using `first` +#### Exemple d'utilisation de `first` Interroger les 10 premiers tokens : @@ -103,11 +103,11 @@ Interroger les 10 premiers tokens : } ``` -To query for groups of entities in the middle of a collection, the `skip` parameter may be used in conjunction with the `first` parameter to skip a specified number of entities starting at the beginning of the collection. +Pour rechercher des groupes d'entités au milieu d'une collection, le paramètre `skip` peut être utilisé en conjonction avec le paramètre `first` pour sauter un nombre spécifié d'entités en commençant par le début de la collection. -#### Example using `first` and `skip` +#### Exemple utilisant `first` et `skip` -Query 10 `Token` entities, offset by 10 places from the beginning of the collection: +Interroger 10 entités `Token`, décalées de 10 places par rapport au début de la collection : ```graphql { @@ -118,9 +118,9 @@ Query 10 `Token` entities, offset by 10 places from the beginning of the collect } ``` -#### Example using `first` and `id_ge` +#### Exemple utilisant `first` et `id_ge` -If a client needs to retrieve a large number of entities, it's more performant to base queries on an attribute and filter by that attribute. For example, a client could retrieve a large number of tokens using this query: +Si un client a besoin de récupérer un grand nombre d'entités, il est plus performant de baser les requêtes sur un attribut et de filtrer par cet attribut. Par exemple, un client pourrait récupérer un grand nombre de jetons en utilisant cette requête : ```graphql query manyTokens($lastID: String) { @@ -131,16 +131,16 @@ query manyTokens($lastID: String) { } ``` -The first time, it would send the query with `lastID = ""`, and for subsequent requests it would set `lastID` to the `id` attribute of the last entity in the previous request. This approach will perform significantly better than using increasing `skip` values. +La première fois, il enverra la requête avec `lastID = ""`, et pour les requêtes suivantes, il fixera `lastID` à l'attribut `id` de la dernière entité dans la requête précédente. Cette approche est nettement plus performante que l'utilisation de valeurs `skip` croissantes. ### Filtration -- You can use the `where` parameter in your queries to filter for different properties. -- You can filter on multiple values within the `where` parameter. +- Vous pouvez utiliser le paramètre `where` dans vos requêtes pour filtrer les différentes propriétés. +- Vous pouvez filtrer sur plusieurs valeurs dans le paramètre `where`. -#### Example using `where` +#### Exemple d'utilisation de `where` -Query challenges with `failed` outcome: +Défis de la requête avec un résultat `failed` : ```graphql { @@ -154,7 +154,7 @@ Query challenges with `failed` outcome: } ``` -You can use suffixes like `_gt`, `_lte` for value comparison: +Vous pouvez utiliser des suffixes comme `_gt`, `_lte` pour comparer les valeurs : #### Exemple de filtrage de plage @@ -170,9 +170,9 @@ You can use suffixes like `_gt`, `_lte` for value comparison: #### Exemple de filtrage par bloc -You can also filter entities that were updated in or after a specified block with `_change_block(number_gte: Int)`. +Vous pouvez également filtrer les entités qui ont été mises à jour dans ou après un bloc spécifié avec `_change_block(number_gte : Int)`. -Cela peut être utile si vous cherchez à récupérer uniquement les entités qui ont changé, par exemple depuis la dernière fois que vous avez interrogé. Ou bien, il peut être utile d'étudier ou de déboguer la façon dont les entités changent dans votre subgraph (si combiné avec un filtre de bloc, vous pouvez isoler uniquement les entités qui ont changé dans un bloc spécifique). +This can be useful if you are looking to fetch only entities which have changed, for example since the last time you polled. Or alternatively it can be useful to investigate or debug how entities are changing in your Subgraph (if combined with a block filter, you can isolate only entities that changed in a specific block). ```graphql { @@ -186,7 +186,7 @@ Cela peut être utile si vous cherchez à récupérer uniquement les entités qu #### Exemple de filtrage d'entités imbriquées -Filtering on the basis of nested entities is possible in the fields with the `_` suffix. +Le filtrage sur la base d'entités imbriquées est possible dans les champs avec le suffixe `_`. Cela peut être utile si vous souhaitez récupérer uniquement les entités dont les entités au niveau enfant remplissent les conditions fournies. @@ -204,11 +204,11 @@ Cela peut être utile si vous souhaitez récupérer uniquement les entités dont #### Opérateurs logiques -As of Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) you can group multiple parameters in the same `where` argument using the `and` or the `or` operators to filter results based on more than one criteria. +Depuis Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0), vous pouvez regrouper plusieurs paramètres dans le même argument `where` en utilisant les opérateurs `and` ou `or` pour filtrer les résultats en fonction de plusieurs critères. -##### `AND` Operator +##### L'opérateur `AND` -The following example filters for challenges with `outcome` `succeeded` and `number` greater than or equal to `100`. +L'exemple suivant filtre les défis avec `outcome` `succeeded` et `number` supérieur ou égal à `100`. ```graphql { @@ -222,7 +222,7 @@ The following example filters for challenges with `outcome` `succeeded` and `num } ``` -> **Syntactic sugar:** You can simplify the above query by removing the `and` operator by passing a sub-expression separated by commas. +> **Sucre syntaxique:** Vous pouvez simplifier la requête ci-dessus en supprimant l'opérateur \`and\`\` et en passant une sous-expression séparée par des virgules. > > ```graphql > { @@ -236,9 +236,9 @@ The following example filters for challenges with `outcome` `succeeded` and `num > } > ``` -##### `OR` Operator +##### L'opérateur `OR` -The following example filters for challenges with `outcome` `succeeded` or `number` greater than or equal to `100`. +L'exemple suivant filtre les défis avec `outcome` `succeeded` ou `number` supérieur ou égal à `100`. ```graphql { @@ -252,7 +252,7 @@ The following example filters for challenges with `outcome` `succeeded` or `numb } ``` -> **Note**: When constructing queries, it is important to consider the performance impact of using the `or` operator. While `or` can be a useful tool for broadening search results, it can also have significant costs. One of the main issues with `or` is that it can cause queries to slow down. This is because `or` requires the database to scan through multiple indexes, which can be a time-consuming process. To avoid these issues, it is recommended that developers use and operators instead of or whenever possible. This allows for more precise filtering and can lead to faster, more accurate queries. +> **Note** : Lors de l'élaboration des requêtes, il est important de prendre en compte l'impact sur les performances de l'utilisation de l'opérateur `or`. Si `or` peut être un outil utile pour élargir les résultats d'une recherche, il peut aussi avoir des coûts importants. L'un des principaux problèmes de l'opérateur `or` est qu'il peut ralentir les requêtes. En effet, `or` oblige la base de données à parcourir plusieurs index, ce qui peut prendre beaucoup de temps. Pour éviter ces problèmes, il est recommandé aux développeurs d'utiliser les opérateurs and au lieu de or chaque fois que cela est possible. Cela permet un filtrage plus précis et peut conduire à des requêtes plus rapides et plus précises. #### Tous les filtres @@ -281,9 +281,9 @@ _not_ends_with _not_ends_with_nocase ``` -> Please note that some suffixes are only supported for specific types. For example, `Boolean` only supports `_not`, `_in`, and `_not_in`, but `_` is available only for object and interface types. +> Veuillez noter que certains suffixes ne sont supportés que pour des types spécifiques. Par exemple, `Boolean` ne supporte que `_not`, `_in`, et `_not_in`, mais `_` n'est disponible que pour les types objet et interface. -In addition, the following global filters are available as part of `where` argument: +En outre, les filtres globaux suivants sont disponibles en tant que partie de l'argument `where` : ```graphql _change_block(numéro_gte : Int) @@ -291,11 +291,11 @@ _change_block(numéro_gte : Int) ### Interrogation des états précédents -You can query the state of your entities not just for the latest block, which is the default, but also for an arbitrary block in the past. The block at which a query should happen can be specified either by its block number or its block hash by including a `block` argument in the toplevel fields of queries. +Vous pouvez interroger l'état de vos entités non seulement pour le dernier bloc, ce qui est le cas par défaut, mais aussi pour un bloc arbitraire dans le passé. Le bloc auquel une requête doit se produire peut être spécifié soit par son numéro de bloc, soit par son hash de bloc, en incluant un argument `block` dans les champs de niveau supérieur des requêtes. -The result of such a query will not change over time, i.e., querying at a certain past block will return the same result no matter when it is executed, with the exception that if you query at a block very close to the head of the chain, the result might change if that block turns out to **not** be on the main chain and the chain gets reorganized. Once a block can be considered final, the result of the query will not change. +Le résultat d'une telle requête ne changera pas au fil du temps, c'est-à-dire qu'une requête portant sur un certain bloc passé renverra le même résultat quel que soit le moment où elle est exécutée, à l'exception d'une requête portant sur un bloc très proche de la tête de la chaîne, dont le résultat pourrait changer s'il s'avérait que ce bloc ne figurait **pas** sur la chaîne principale et que la chaîne était réorganisée. Une fois qu'un bloc peut être considéré comme définitif, le résultat de la requête ne changera pas. -> Note: The current implementation is still subject to certain limitations that might violate these guarantees. The implementation can not always tell that a given block hash is not on the main chain at all, or if a query result by a block hash for a block that is not yet considered final could be influenced by a block reorganization running concurrently with the query. They do not affect the results of queries by block hash when the block is final and known to be on the main chain. [This issue](https://github.com/graphprotocol/graph-node/issues/1405) explains what these limitations are in detail. +> Remarque : l'implémentation actuelle est encore sujette à certaines limitations qui pourraient violer ces garanties. L'implémentation ne permet pas toujours de déterminer si un bloc donné n'est pas du tout sur la chaîne principale ou si le résultat d'une requête par bloc pour un bloc qui n'est pas encore considéré comme final peut être influencé par une réorganisation du bloc qui a lieu en même temps que la requête. Elles n'affectent pas les résultats des requêtes par hash de bloc lorsque le bloc est final et que l'on sait qu'il se trouve sur la chaîne principale. [Ce numéro](https://github.com/graphprotocol/graph-node/issues/1405) explique ces limitations en détail. #### Exemple @@ -311,7 +311,7 @@ The result of such a query will not change over time, i.e., querying at a certai } ``` -This query will return `Challenge` entities, and their associated `Application` entities, as they existed directly after processing block number 8,000,000. +Cette requête renverra les entités `Challenge` et les entités `Application` qui leur sont associées, telles qu'elles existaient directement après le traitement du bloc numéro 8 000 000. #### Exemple @@ -327,26 +327,26 @@ This query will return `Challenge` entities, and their associated `Application` } ``` -This query will return `Challenge` entities, and their associated `Application` entities, as they existed directly after processing the block with the given hash. +Cette requête renverra les entités `Challenge`, et leurs entités `Application` associées, telles qu'elles existaient directement après le traitement du bloc avec le hash donné. ### Requêtes de recherche en texte intégral -Fulltext search query fields provide an expressive text search API that can be added to the subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph/#defining-fulltext-search-fields) to add fulltext search to your subgraph. +Fulltext search query fields provide an expressive text search API that can be added to the Subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph/#defining-fulltext-search-fields) to add fulltext search to your Subgraph. -Fulltext search queries have one required field, `text`, for supplying search terms. Several special fulltext operators are available to be used in this `text` search field. +Les requêtes de recherche en texte intégral comportent un champ obligatoire, `text`, pour fournir les termes de la recherche. Plusieurs opérateurs spéciaux de texte intégral peuvent être utilisés dans ce champ de recherche `text`. Opérateurs de recherche en texte intégral : -| Symbole | Opérateur | Description | -| --- | --- | --- | -| `&` | `And` | Pour combiner plusieurs termes de recherche dans un filtre pour les entités incluant tous les termes fournis | -| | | `Or` | Les requêtes comportant plusieurs termes de recherche séparés par l'opérateur ou renverront toutes les entités correspondant à l'un des termes fournis | -| `<->` | `Follow by` | Spécifiez la distance entre deux mots. | -| `:*` | `Prefix` | Utilisez le terme de recherche de préfixe pour trouver les mots dont le préfixe correspond (2 caractères requis.) | +| Symbole | Opérateur | Description | +| ------- | ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `&` | `And` | Pour combiner plusieurs termes de recherche dans un filtre pour les entités incluant tous les termes fournis | +| | | `Or` | Les requêtes comportant plusieurs termes de recherche séparés par l'opérateur ou renverront toutes les entités correspondant à l'un des termes fournis | +| `<->` | `Follow by` | Spécifiez la distance entre deux mots. | +| `:*` | `Prefix` | Utilisez le terme de recherche de préfixe pour trouver les mots dont le préfixe correspond (2 caractères requis.) | #### Exemples -Using the `or` operator, this query will filter to blog entities with variations of either "anarchism" or "crumpet" in their fulltext fields. +En utilisant l'opérateur `ou`, cette requête filtrera les entités de blog ayant des variations d' "anarchism" ou "crumpet" dans leurs champs de texte intégral. ```graphql { @@ -359,7 +359,7 @@ Using the `or` operator, this query will filter to blog entities with variations } ``` -The `follow by` operator specifies a words a specific distance apart in the fulltext documents. The following query will return all blogs with variations of "decentralize" followed by "philosophy" +L'opérateur `follow by` spécifie un mot à une distance spécifique dans les documents en texte intégral. La requête suivante renverra tous les blogs contenant des variations de "decentralize" suivies de "philosophy" ```graphql { @@ -387,25 +387,25 @@ Combinez des opérateurs de texte intégral pour créer des filtres plus complex ### Validation -Graph Node implements [specification-based](https://spec.graphql.org/October2021/#sec-Validation) validation of the GraphQL queries it receives using [graphql-tools-rs](https://github.com/dotansimha/graphql-tools-rs#validation-rules), which is based on the [graphql-js reference implementation](https://github.com/graphql/graphql-js/tree/main/src/validation). Queries which fail a validation rule do so with a standard error - visit the [GraphQL spec](https://spec.graphql.org/October2021/#sec-Validation) to learn more. +Graph Node met en œuvre une validation [basée sur les spécifications](https://spec.graphql.org/October2021/#sec-Validation) des requêtes GraphQL qu'il reçoit à l'aide de [graphql-tools-rs](https://github.com/dotansimha/graphql-tools-rs#validation-rules), qui est basée sur l'implémentation de référence [graphql-js](https://github.com/graphql/graphql-js/tree/main/src/validation). Les requêtes qui échouent à une règle de validation sont accompagnées d'une erreur standard - consultez les [spécifications GraphQL](https://spec.graphql.org/October2021/#sec-Validation) pour en savoir plus. ## Schema -The schema of your dataSources, i.e. the entity types, values, and relationships that are available to query, are defined through the [GraphQL Interface Definition Language (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). +Le schéma de vos sources de données, c'est-à-dire les types d'entités, les valeurs et les relations qui peuvent être interrogés, est défini dans le [GraphQL Interface Definition Language (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). -GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your subgraph is automatically generated from the GraphQL schema that's included in your [subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). +GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your Subgraph is automatically generated from the GraphQL schema that's included in your [Subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). -> Note: Our API does not expose mutations because developers are expected to issue transactions directly against the underlying blockchain from their applications. +> Remarque : notre API n'expose pas les mutations car les développeurs sont censés émettre des transactions directement contre la blockchain sous-jacente à partir de leurs applications. ### Entities -All GraphQL types with `@entity` directives in your schema will be treated as entities and must have an `ID` field. +Tous les types GraphQL avec des directives `@entity` dans votre schéma seront traités comme des entités et doivent avoir un champ `ID`. -> **Note:** Currently, all types in your schema must have an `@entity` directive. In the future, we will treat types without an `@entity` directive as value objects, but this is not yet supported. +> **Note:** Actuellement, tous les types de votre schéma doivent avoir une directive `@entity`. Dans le futur, nous traiterons les types n'ayant pas la directive `@entity` comme des objets de valeur, mais cela n'est pas encore pris en charge. ### Métadonnées du Subgraph -All subgraphs have an auto-generated `_Meta_` object, which provides access to subgraph metadata. This can be queried as follows: +All Subgraphs have an auto-generated `_Meta_` object, which provides access to Subgraph metadata. This can be queried as follows: ```graphQL { @@ -421,14 +421,14 @@ All subgraphs have an auto-generated `_Meta_` object, which provides access to s } ``` -Si un bloc est fourni, les métadonnées sont celles de ce bloc, sinon le dernier bloc indexé est utilisé. S'il est fourni, le bloc doit être postérieur au bloc de départ du subgraph et inférieur ou égal au bloc indexé le plus récent. +If a block is provided, the metadata is as of that block, if not the latest indexed block is used. If provided, the block must be after the Subgraph's start block, and less than or equal to the most recently indexed block. -`deployment` is a unique ID, corresponding to the IPFS CID of the `subgraph.yaml` file. +`deployment` est un ID unique, correspondant au IPFS CID du fichier `subgraph.yaml`. -`block` provides information about the latest block (taking into account any block constraints passed to `_meta`): +`block` fournit des informations sur le dernier bloc (en tenant compte des contraintes de bloc passées à `_meta`) : - hash : le hash du bloc - number: the block number -- timestamp : l'horodatage du bloc, si disponible (ceci n'est actuellement disponible que pour les subgraphs indexant les réseaux EVM) +- timestamp: the timestamp of the block, if available (this is currently only available for Subgraphs indexing EVM networks) -`hasIndexingErrors` is a boolean identifying whether the subgraph encountered indexing errors at some past block +`hasIndexingErrors` is a boolean identifying whether the Subgraph encountered indexing errors at some past block From 132fc8d4c5eb053bb8d39b49439b81b688484ec3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:12:12 -0500 Subject: [PATCH 0230/1789] New translations graphql-api.mdx (Spanish) --- .../es/subgraphs/querying/graphql-api.mdx | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/website/src/pages/es/subgraphs/querying/graphql-api.mdx b/website/src/pages/es/subgraphs/querying/graphql-api.mdx index 018abd046e72..726d7e84884d 100644 --- a/website/src/pages/es/subgraphs/querying/graphql-api.mdx +++ b/website/src/pages/es/subgraphs/querying/graphql-api.mdx @@ -6,13 +6,13 @@ Learn about the GraphQL Query API used in The Graph. ## What is GraphQL? -[GraphQL](https://graphql.org/learn/) is a query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query subgraphs. +[GraphQL](https://graphql.org/learn/) is a query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query Subgraphs. -To understand the larger role that GraphQL plays, review [developing](/subgraphs/developing/introduction/) and [creating a subgraph](/developing/creating-a-subgraph/). +To understand the larger role that GraphQL plays, review [developing](/subgraphs/developing/introduction/) and [creating a Subgraph](/developing/creating-a-subgraph/). ## Queries with GraphQL -In your subgraph schema you define types called `Entities`. For each `Entity` type, `entity` and `entities` fields will be generated on the top-level `Query` type. +In your Subgraph schema you define types called `Entities`. For each `Entity` type, `entity` and `entities` fields will be generated on the top-level `Query` type. > Note: `query` does not need to be included at the top of the `graphql` query when using The Graph. @@ -170,7 +170,7 @@ You can use suffixes like `_gt`, `_lte` for value comparison: You can also filter entities that were updated in or after a specified block with `_change_block(number_gte: Int)`. -Esto puede ser útil si buscas obtener solo las entidades que han cambiado, por ejemplo, desde la última vez que realizaste una encuesta. O, alternativamente, puede ser útil para investigar o depurar cómo cambian las entidades en tu subgrafo (si se combina con un filtro de bloque, puedes aislar solo las entidades que cambiaron en un bloque específico). +This can be useful if you are looking to fetch only entities which have changed, for example since the last time you polled. Or alternatively it can be useful to investigate or debug how entities are changing in your Subgraph (if combined with a block filter, you can isolate only entities that changed in a specific block). ```graphql { @@ -329,18 +329,18 @@ This query will return `Challenge` entities, and their associated `Application` ### Consultas de Búsqueda de Texto Completo -Fulltext search query fields provide an expressive text search API that can be added to the subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph/#defining-fulltext-search-fields) to add fulltext search to your subgraph. +Fulltext search query fields provide an expressive text search API that can be added to the Subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph/#defining-fulltext-search-fields) to add fulltext search to your Subgraph. Fulltext search queries have one required field, `text`, for supplying search terms. Several special fulltext operators are available to be used in this `text` search field. Operadores de búsqueda de texto completo: -| Símbolo | Operador | Descripción | -| --- | --- | --- | -| `&` | `And` | Para combinar varios términos de búsqueda en un filtro para entidades que incluyen todos los términos proporcionados | -| | | `Or` | Las consultas con varios términos de búsqueda separados por o el operador devolverá todas las entidades que coincidan con cualquiera de los términos proporcionados | -| `<->` | `Follow by` | Especifica la distancia entre dos palabras. | -| `:*` | `Prefix` | Utilice el término de búsqueda del prefijo para encontrar palabras cuyo prefijo coincida (se requieren 2 caracteres.) | +| Símbolo | Operador | Descripción | +| ------- | ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `&` | `And` | Para combinar varios términos de búsqueda en un filtro para entidades que incluyen todos los términos proporcionados | +| | | `Or` | Las consultas con varios términos de búsqueda separados por o el operador devolverá todas las entidades que coincidan con cualquiera de los términos proporcionados | +| `<->` | `Follow by` | Especifica la distancia entre dos palabras. | +| `:*` | `Prefix` | Utilice el término de búsqueda del prefijo para encontrar palabras cuyo prefijo coincida (se requieren 2 caracteres.) | #### Ejemplos @@ -391,7 +391,7 @@ Graph Node implements [specification-based](https://spec.graphql.org/October2021 The schema of your dataSources, i.e. the entity types, values, and relationships that are available to query, are defined through the [GraphQL Interface Definition Language (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). -GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your subgraph is automatically generated from the GraphQL schema that's included in your [subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). +GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your Subgraph is automatically generated from the GraphQL schema that's included in your [Subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). > Note: Our API does not expose mutations because developers are expected to issue transactions directly against the underlying blockchain from their applications. @@ -403,7 +403,7 @@ All GraphQL types with `@entity` directives in your schema will be treated as en ### Metadatos del subgrafo -All subgraphs have an auto-generated `_Meta_` object, which provides access to subgraph metadata. This can be queried as follows: +All Subgraphs have an auto-generated `_Meta_` object, which provides access to Subgraph metadata. This can be queried as follows: ```graphQL { @@ -419,7 +419,7 @@ All subgraphs have an auto-generated `_Meta_` object, which provides access to s } ``` -Si se proporciona un bloque, los metadatos corresponden a ese bloque; de lo contrario, se utiliza el bloque indexado más reciente. Si es proporcionado, el bloque debe ser posterior al bloque de inicio del subgrafo y menor o igual que el bloque indexado más reciente. +If a block is provided, the metadata is as of that block, if not the latest indexed block is used. If provided, the block must be after the Subgraph's start block, and less than or equal to the most recently indexed block. `deployment` is a unique ID, corresponding to the IPFS CID of the `subgraph.yaml` file. @@ -427,6 +427,6 @@ Si se proporciona un bloque, los metadatos corresponden a ese bloque; de lo cont - hash: el hash del bloque - número: el número de bloque -- timestamp: la marca de tiempo del bloque, en caso de estar disponible (actualmente solo está disponible para subgrafos que indexan redes EVM) +- timestamp: the timestamp of the block, if available (this is currently only available for Subgraphs indexing EVM networks) -`hasIndexingErrors` is a boolean identifying whether the subgraph encountered indexing errors at some past block +`hasIndexingErrors` is a boolean identifying whether the Subgraph encountered indexing errors at some past block From 44677d1882c9585d1995065305e4844773469514 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:12:12 -0500 Subject: [PATCH 0231/1789] New translations graphql-api.mdx (Arabic) --- .../ar/subgraphs/querying/graphql-api.mdx | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/website/src/pages/ar/subgraphs/querying/graphql-api.mdx b/website/src/pages/ar/subgraphs/querying/graphql-api.mdx index d73381f88a7d..801e95fa66de 100644 --- a/website/src/pages/ar/subgraphs/querying/graphql-api.mdx +++ b/website/src/pages/ar/subgraphs/querying/graphql-api.mdx @@ -6,13 +6,13 @@ Learn about the GraphQL Query API used in The Graph. ## What is GraphQL? -[GraphQL](https://graphql.org/learn/) is a query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query subgraphs. +[GraphQL](https://graphql.org/learn/) is a query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query Subgraphs. -To understand the larger role that GraphQL plays, review [developing](/subgraphs/developing/introduction/) and [creating a subgraph](/developing/creating-a-subgraph/). +To understand the larger role that GraphQL plays, review [developing](/subgraphs/developing/introduction/) and [creating a Subgraph](/developing/creating-a-subgraph/). ## Queries with GraphQL -In your subgraph schema you define types called `Entities`. For each `Entity` type, `entity` and `entities` fields will be generated on the top-level `Query` type. +In your Subgraph schema you define types called `Entities`. For each `Entity` type, `entity` and `entities` fields will be generated on the top-level `Query` type. > Note: `query` does not need to be included at the top of the `graphql` query when using The Graph. @@ -170,7 +170,7 @@ You can use suffixes like `_gt`, `_lte` for value comparison: You can also filter entities that were updated in or after a specified block with `_change_block(number_gte: Int)`. -This can be useful if you are looking to fetch only entities which have changed, for example since the last time you polled. Or alternatively it can be useful to investigate or debug how entities are changing in your subgraph (if combined with a block filter, you can isolate only entities that changed in a specific block). +This can be useful if you are looking to fetch only entities which have changed, for example since the last time you polled. Or alternatively it can be useful to investigate or debug how entities are changing in your Subgraph (if combined with a block filter, you can isolate only entities that changed in a specific block). ```graphql { @@ -329,18 +329,18 @@ This query will return `Challenge` entities, and their associated `Application` ### Fulltext Search Queries -Fulltext search query fields provide an expressive text search API that can be added to the subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph/#defining-fulltext-search-fields) to add fulltext search to your subgraph. +Fulltext search query fields provide an expressive text search API that can be added to the Subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph/#defining-fulltext-search-fields) to add fulltext search to your Subgraph. Fulltext search queries have one required field, `text`, for supplying search terms. Several special fulltext operators are available to be used in this `text` search field. Fulltext search operators: -| رمز | عامل التشغيل | الوصف | -| --- | --- | --- | -| `&` | `And` | لدمج عبارات بحث متعددة في فلتر للكيانات التي تتضمن جميع العبارات المتوفرة | -| | | `Or` | الاستعلامات التي تحتوي على عبارات بحث متعددة مفصولة بواسطة عامل التشغيل or ستعيد جميع الكيانات المتطابقة من أي عبارة متوفرة | -| `<->` | `Follow by` | يحدد المسافة بين كلمتين. | -| `:*` | `Prefix` | يستخدم عبارة البحث prefix للعثور على الكلمات التي تتطابق بادئتها (مطلوب حرفان.) | +| رمز | عامل التشغيل | الوصف | +| ------ | ------------ | --------------------------------------------------------------------------------------------------------------------------- | +| `&` | `And` | لدمج عبارات بحث متعددة في فلتر للكيانات التي تتضمن جميع العبارات المتوفرة | +| | | `Or` | الاستعلامات التي تحتوي على عبارات بحث متعددة مفصولة بواسطة عامل التشغيل or ستعيد جميع الكيانات المتطابقة من أي عبارة متوفرة | +| `<->` | `Follow by` | يحدد المسافة بين كلمتين. | +| `:*` | `Prefix` | يستخدم عبارة البحث prefix للعثور على الكلمات التي تتطابق بادئتها (مطلوب حرفان.) | #### Examples @@ -391,7 +391,7 @@ Graph Node implements [specification-based](https://spec.graphql.org/October2021 The schema of your dataSources, i.e. the entity types, values, and relationships that are available to query, are defined through the [GraphQL Interface Definition Language (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). -GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your subgraph is automatically generated from the GraphQL schema that's included in your [subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). +GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your Subgraph is automatically generated from the GraphQL schema that's included in your [Subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). > Note: Our API does not expose mutations because developers are expected to issue transactions directly against the underlying blockchain from their applications. @@ -403,7 +403,7 @@ All GraphQL types with `@entity` directives in your schema will be treated as en ### Subgraph Metadata -All subgraphs have an auto-generated `_Meta_` object, which provides access to subgraph metadata. This can be queried as follows: +All Subgraphs have an auto-generated `_Meta_` object, which provides access to Subgraph metadata. This can be queried as follows: ```graphQL { @@ -419,7 +419,7 @@ All subgraphs have an auto-generated `_Meta_` object, which provides access to s } ``` -If a block is provided, the metadata is as of that block, if not the latest indexed block is used. If provided, the block must be after the subgraph's start block, and less than or equal to the most recently indexed block. +If a block is provided, the metadata is as of that block, if not the latest indexed block is used. If provided, the block must be after the Subgraph's start block, and less than or equal to the most recently indexed block. `deployment` is a unique ID, corresponding to the IPFS CID of the `subgraph.yaml` file. @@ -427,6 +427,6 @@ If a block is provided, the metadata is as of that block, if not the latest inde - hash: the hash of the block - number: the block number -- timestamp: the timestamp of the block, if available (this is currently only available for subgraphs indexing EVM networks) +- timestamp: the timestamp of the block, if available (this is currently only available for Subgraphs indexing EVM networks) -`hasIndexingErrors` is a boolean identifying whether the subgraph encountered indexing errors at some past block +`hasIndexingErrors` is a boolean identifying whether the Subgraph encountered indexing errors at some past block From babc9e9f5c4d9a8d534f652b630c5ee8ac097fb6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:12:14 -0500 Subject: [PATCH 0232/1789] New translations graphql-api.mdx (Czech) --- .../cs/subgraphs/querying/graphql-api.mdx | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/website/src/pages/cs/subgraphs/querying/graphql-api.mdx b/website/src/pages/cs/subgraphs/querying/graphql-api.mdx index f0cc9b78b338..1a5e672ccbd5 100644 --- a/website/src/pages/cs/subgraphs/querying/graphql-api.mdx +++ b/website/src/pages/cs/subgraphs/querying/graphql-api.mdx @@ -6,13 +6,13 @@ Learn about the GraphQL Query API used in The Graph. ## What is GraphQL? -[GraphQL](https://graphql.org/learn/) is a query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query subgraphs. +[GraphQL](https://graphql.org/learn/) is a query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query Subgraphs. -To understand the larger role that GraphQL plays, review [developing](/subgraphs/developing/introduction/) and [creating a subgraph](/developing/creating-a-subgraph/). +To understand the larger role that GraphQL plays, review [developing](/subgraphs/developing/introduction/) and [creating a Subgraph](/developing/creating-a-subgraph/). ## Queries with GraphQL -In your subgraph schema you define types called `Entities`. For each `Entity` type, `entity` and `entities` fields will be generated on the top-level `Query` type. +In your Subgraph schema you define types called `Entities`. For each `Entity` type, `entity` and `entities` fields will be generated on the top-level `Query` type. > Note: `query` does not need to be included at the top of the `graphql` query when using The Graph. @@ -170,7 +170,7 @@ You can use suffixes like `_gt`, `_lte` for value comparison: You can also filter entities that were updated in or after a specified block with `_change_block(number_gte: Int)`. -To může být užitečné, pokud chcete načíst pouze entity, které se změnily například od posledního dotazování. Nebo může být užitečná pro zkoumání nebo ladění změn entit v podgrafu (v kombinaci s blokovým filtrem můžete izolovat pouze entity, které se změnily v určitém bloku). +This can be useful if you are looking to fetch only entities which have changed, for example since the last time you polled. Or alternatively it can be useful to investigate or debug how entities are changing in your Subgraph (if combined with a block filter, you can isolate only entities that changed in a specific block). ```graphql { @@ -329,18 +329,18 @@ This query will return `Challenge` entities, and their associated `Application` ### Fulltextové Vyhledávání dotazy -Fulltext search query fields provide an expressive text search API that can be added to the subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph/#defining-fulltext-search-fields) to add fulltext search to your subgraph. +Fulltext search query fields provide an expressive text search API that can be added to the Subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph/#defining-fulltext-search-fields) to add fulltext search to your Subgraph. Fulltext search queries have one required field, `text`, for supplying search terms. Several special fulltext operators are available to be used in this `text` search field. Operátory fulltextového vyhledávání: -| Symbol | Operátor | Popis | -| --- | --- | --- | -| `&` | `And` | Pro kombinaci více vyhledávacích výrazů do filtru pro entity, které obsahují všechny zadané výrazy | -| | | `Or` | Dotazy s více hledanými výrazy oddělenými operátorem nebo vrátí všechny entity, které odpovídají některému z uvedených výrazů | -| `<->` | `Follow by` | Zadejte vzdálenost mezi dvěma slovy. | -| `:*` | `Prefix` | Pomocí předponového výrazu vyhledejte slova, jejichž předpona se shoduje (vyžadovány 2 znaky) | +| Symbol | Operátor | Popis | +| ------ | ----------- | ----------------------------------------------------------------------------------------------------------------------------- | +| `&` | `And` | Pro kombinaci více vyhledávacích výrazů do filtru pro entity, které obsahují všechny zadané výrazy | +| | | `Or` | Dotazy s více hledanými výrazy oddělenými operátorem nebo vrátí všechny entity, které odpovídají některému z uvedených výrazů | +| `<->` | `Follow by` | Zadejte vzdálenost mezi dvěma slovy. | +| `:*` | `Prefix` | Pomocí předponového výrazu vyhledejte slova, jejichž předpona se shoduje (vyžadovány 2 znaky) | #### Příklady @@ -391,7 +391,7 @@ Graph Node implements [specification-based](https://spec.graphql.org/October2021 The schema of your dataSources, i.e. the entity types, values, and relationships that are available to query, are defined through the [GraphQL Interface Definition Language (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). -GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your subgraph is automatically generated from the GraphQL schema that's included in your [subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). +GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your Subgraph is automatically generated from the GraphQL schema that's included in your [Subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). > Note: Our API does not expose mutations because developers are expected to issue transactions directly against the underlying blockchain from their applications. @@ -403,7 +403,7 @@ All GraphQL types with `@entity` directives in your schema will be treated as en ### Metadata podgrafů -All subgraphs have an auto-generated `_Meta_` object, which provides access to subgraph metadata. This can be queried as follows: +All Subgraphs have an auto-generated `_Meta_` object, which provides access to Subgraph metadata. This can be queried as follows: ```graphQL { @@ -419,7 +419,7 @@ All subgraphs have an auto-generated `_Meta_` object, which provides access to s } ``` -Pokud je uveden blok, metadata se vztahují k tomuto bloku, pokud ne, použije se poslední indexovaný blok. Pokud je blok uveden, musí se nacházet za počátečním blokem podgrafu a musí být menší nebo roven poslednímu Indevovaný bloku. +If a block is provided, the metadata is as of that block, if not the latest indexed block is used. If provided, the block must be after the Subgraph's start block, and less than or equal to the most recently indexed block. `deployment` is a unique ID, corresponding to the IPFS CID of the `subgraph.yaml` file. @@ -427,6 +427,6 @@ Pokud je uveden blok, metadata se vztahují k tomuto bloku, pokud ne, použije s - hash: hash bloku - číslo: číslo bloku -- timestamp: časové razítko bloku, pokud je k dispozici (v současné době je k dispozici pouze pro podgrafy indexující sítě EVM) +- timestamp: the timestamp of the block, if available (this is currently only available for Subgraphs indexing EVM networks) -`hasIndexingErrors` is a boolean identifying whether the subgraph encountered indexing errors at some past block +`hasIndexingErrors` is a boolean identifying whether the Subgraph encountered indexing errors at some past block From 9a8894bfd687b96560706eeaf95fb551b6e7fc38 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:12:15 -0500 Subject: [PATCH 0233/1789] New translations graphql-api.mdx (German) --- .../de/subgraphs/querying/graphql-api.mdx | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/website/src/pages/de/subgraphs/querying/graphql-api.mdx b/website/src/pages/de/subgraphs/querying/graphql-api.mdx index e6636e20a53e..fed99334111e 100644 --- a/website/src/pages/de/subgraphs/querying/graphql-api.mdx +++ b/website/src/pages/de/subgraphs/querying/graphql-api.mdx @@ -6,13 +6,13 @@ Learn about the GraphQL Query API used in The Graph. ## What is GraphQL? -[GraphQL](https://graphql.org/learn/) is a query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query subgraphs. +[GraphQL](https://graphql.org/learn/) is a query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query Subgraphs. -To understand the larger role that GraphQL plays, review [developing](/subgraphs/developing/introduction/) and [creating a subgraph](/developing/creating-a-subgraph/). +To understand the larger role that GraphQL plays, review [developing](/subgraphs/developing/introduction/) and [creating a Subgraph](/developing/creating-a-subgraph/). ## Queries with GraphQL -In your subgraph schema you define types called `Entities`. For each `Entity` type, `entity` and `entities` fields will be generated on the top-level `Query` type. +In your Subgraph schema you define types called `Entities`. For each `Entity` type, `entity` and `entities` fields will be generated on the top-level `Query` type. > Note: `query` does not need to be included at the top of the `graphql` query when using The Graph. @@ -170,7 +170,7 @@ You can use suffixes like `_gt`, `_lte` for value comparison: You can also filter entities that were updated in or after a specified block with `_change_block(number_gte: Int)`. -This can be useful if you are looking to fetch only entities which have changed, for example since the last time you polled. Or alternatively it can be useful to investigate or debug how entities are changing in your subgraph (if combined with a block filter, you can isolate only entities that changed in a specific block). +This can be useful if you are looking to fetch only entities which have changed, for example since the last time you polled. Or alternatively it can be useful to investigate or debug how entities are changing in your Subgraph (if combined with a block filter, you can isolate only entities that changed in a specific block). ```graphql { @@ -329,18 +329,18 @@ This query will return `Challenge` entities, and their associated `Application` ### Fulltext Search Queries -Fulltext search query fields provide an expressive text search API that can be added to the subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph/#defining-fulltext-search-fields) to add fulltext search to your subgraph. +Fulltext search query fields provide an expressive text search API that can be added to the Subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph/#defining-fulltext-search-fields) to add fulltext search to your Subgraph. Fulltext search queries have one required field, `text`, for supplying search terms. Several special fulltext operators are available to be used in this `text` search field. Fulltext search operators: -| Symbol | Operator | Beschreibung | -| --- | --- | --- | -| `&` | `And` | For combining multiple search terms into a filter for entities that include all of the provided terms | -| | | `Or` | Queries with multiple search terms separated by the or operator will return all entities with a match from any of the provided terms | -| `<->` | `Follow by` | Specify the distance between two words. | -| `:*` | `Prefix` | Use the prefix search term to find words whose prefix match (2 characters required.) | +| Symbol | Operator | Beschreibung | +| ------ | ----------- | ------------------------------------------------------------------------------------------------------------------------------------ | +| `&` | `And` | For combining multiple search terms into a filter for entities that include all of the provided terms | +| | | `Or` | Queries with multiple search terms separated by the or operator will return all entities with a match from any of the provided terms | +| `<->` | `Follow by` | Specify the distance between two words. | +| `:*` | `Prefix` | Use the prefix search term to find words whose prefix match (2 characters required.) | #### Beispiele @@ -391,7 +391,7 @@ Graph Node implements [specification-based](https://spec.graphql.org/October2021 The schema of your dataSources, i.e. the entity types, values, and relationships that are available to query, are defined through the [GraphQL Interface Definition Language (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). -GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your subgraph is automatically generated from the GraphQL schema that's included in your [subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). +GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your Subgraph is automatically generated from the GraphQL schema that's included in your [Subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). > Note: Our API does not expose mutations because developers are expected to issue transactions directly against the underlying blockchain from their applications. @@ -403,7 +403,7 @@ All GraphQL types with `@entity` directives in your schema will be treated as en ### Subgraph Metadata -All subgraphs have an auto-generated `_Meta_` object, which provides access to subgraph metadata. This can be queried as follows: +All Subgraphs have an auto-generated `_Meta_` object, which provides access to Subgraph metadata. This can be queried as follows: ```graphQL { @@ -419,7 +419,7 @@ All subgraphs have an auto-generated `_Meta_` object, which provides access to s } ``` -If a block is provided, the metadata is as of that block, if not the latest indexed block is used. If provided, the block must be after the subgraph's start block, and less than or equal to the most recently indexed block. +If a block is provided, the metadata is as of that block, if not the latest indexed block is used. If provided, the block must be after the Subgraph's start block, and less than or equal to the most recently indexed block. `deployment` is a unique ID, corresponding to the IPFS CID of the `subgraph.yaml` file. @@ -427,6 +427,6 @@ If a block is provided, the metadata is as of that block, if not the latest inde - hash: the hash of the block - number: the block number -- timestamp: the timestamp of the block, if available (this is currently only available for subgraphs indexing EVM networks) +- timestamp: the timestamp of the block, if available (this is currently only available for Subgraphs indexing EVM networks) -`hasIndexingErrors` is a boolean identifying whether the subgraph encountered indexing errors at some past block +`hasIndexingErrors` is a boolean identifying whether the Subgraph encountered indexing errors at some past block From 63eef2a5e954566030a0c85ac4cbd218a3e36c46 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:12:16 -0500 Subject: [PATCH 0234/1789] New translations graphql-api.mdx (Italian) --- .../it/subgraphs/querying/graphql-api.mdx | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/website/src/pages/it/subgraphs/querying/graphql-api.mdx b/website/src/pages/it/subgraphs/querying/graphql-api.mdx index 45100b8f6d68..29547f648dea 100644 --- a/website/src/pages/it/subgraphs/querying/graphql-api.mdx +++ b/website/src/pages/it/subgraphs/querying/graphql-api.mdx @@ -6,13 +6,13 @@ Learn about the GraphQL Query API used in The Graph. ## What is GraphQL? -[GraphQL](https://graphql.org/learn/) is a query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query subgraphs. +[GraphQL](https://graphql.org/learn/) is a query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query Subgraphs. -To understand the larger role that GraphQL plays, review [developing](/subgraphs/developing/introduction/) and [creating a subgraph](/developing/creating-a-subgraph/). +To understand the larger role that GraphQL plays, review [developing](/subgraphs/developing/introduction/) and [creating a Subgraph](/developing/creating-a-subgraph/). ## Queries with GraphQL -In your subgraph schema you define types called `Entities`. For each `Entity` type, `entity` and `entities` fields will be generated on the top-level `Query` type. +In your Subgraph schema you define types called `Entities`. For each `Entity` type, `entity` and `entities` fields will be generated on the top-level `Query` type. > Note: `query` does not need to be included at the top of the `graphql` query when using The Graph. @@ -170,7 +170,7 @@ You can use suffixes like `_gt`, `_lte` for value comparison: You can also filter entities that were updated in or after a specified block with `_change_block(number_gte: Int)`. -Questo può essere utile se si vuole recuperare solo le entità che sono cambiate, ad esempio dall'ultima volta che è stato effettuato il polling. In alternativa, può essere utile per indagare o fare il debug di come le entità stanno cambiando nel subgraph (se combinato con un filtro di blocco, è possibile isolare solo le entità che sono cambiate in un blocco specifico). +This can be useful if you are looking to fetch only entities which have changed, for example since the last time you polled. Or alternatively it can be useful to investigate or debug how entities are changing in your Subgraph (if combined with a block filter, you can isolate only entities that changed in a specific block). ```graphql { @@ -329,18 +329,18 @@ This query will return `Challenge` entities, and their associated `Application` ### Query di ricerca fulltext -Fulltext search query fields provide an expressive text search API that can be added to the subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph/#defining-fulltext-search-fields) to add fulltext search to your subgraph. +Fulltext search query fields provide an expressive text search API that can be added to the Subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph/#defining-fulltext-search-fields) to add fulltext search to your Subgraph. Fulltext search queries have one required field, `text`, for supplying search terms. Several special fulltext operators are available to be used in this `text` search field. Operatori di ricerca fulltext: -| Simbolo | Operatore | Descrizione | -| --- | --- | --- | -| `&` | `And` | Per combinare più termini di ricerca in un filtro per le entità che includono tutti i termini forniti | -| | | `Or` | Le query con più termini di ricerca separati dall'operatore Or restituiranno tutte le entità con una corrispondenza tra i termini forniti | -| `<->` | `Follow by` | Specifica la distanza tra due parole. | -| `:*` | `Prefix` | Utilizzare il termine di ricerca del prefisso per trovare le parole il cui prefisso corrisponde (sono richiesti 2 caratteri.) | +| Simbolo | Operatore | Descrizione | +| ------- | ----------- | ----------------------------------------------------------------------------------------------------------------------------------------- | +| `&` | `And` | Per combinare più termini di ricerca in un filtro per le entità che includono tutti i termini forniti | +| | | `Or` | Le query con più termini di ricerca separati dall'operatore Or restituiranno tutte le entità con una corrispondenza tra i termini forniti | +| `<->` | `Follow by` | Specifica la distanza tra due parole. | +| `:*` | `Prefix` | Utilizzare il termine di ricerca del prefisso per trovare le parole il cui prefisso corrisponde (sono richiesti 2 caratteri.) | #### Esempi @@ -391,7 +391,7 @@ Graph Node implements [specification-based](https://spec.graphql.org/October2021 The schema of your dataSources, i.e. the entity types, values, and relationships that are available to query, are defined through the [GraphQL Interface Definition Language (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). -GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your subgraph is automatically generated from the GraphQL schema that's included in your [subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). +GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your Subgraph is automatically generated from the GraphQL schema that's included in your [Subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). > Note: Our API does not expose mutations because developers are expected to issue transactions directly against the underlying blockchain from their applications. @@ -403,7 +403,7 @@ All GraphQL types with `@entity` directives in your schema will be treated as en ### Metadati del Subgraph -All subgraphs have an auto-generated `_Meta_` object, which provides access to subgraph metadata. This can be queried as follows: +All Subgraphs have an auto-generated `_Meta_` object, which provides access to Subgraph metadata. This can be queried as follows: ```graphQL { @@ -419,7 +419,7 @@ All subgraphs have an auto-generated `_Meta_` object, which provides access to s } ``` -Se viene fornito un blocco, i metadati si riferiscono a quel blocco, altrimenti viene utilizzato il blocco indicizzato più recente. Se fornito, il blocco deve essere successivo al blocco iniziale del subgraph e inferiore o uguale al blocco indicizzato più recente. +If a block is provided, the metadata is as of that block, if not the latest indexed block is used. If provided, the block must be after the Subgraph's start block, and less than or equal to the most recently indexed block. `deployment` is a unique ID, corresponding to the IPFS CID of the `subgraph.yaml` file. @@ -427,6 +427,6 @@ Se viene fornito un blocco, i metadati si riferiscono a quel blocco, altrimenti - hash: l'hash del blocco - numero: il numero del blocco -- timestamp: il timestamp del blocco, se disponibile (attualmente è disponibile solo per i subgraph che indicizzano le reti EVM) +- timestamp: the timestamp of the block, if available (this is currently only available for Subgraphs indexing EVM networks) -`hasIndexingErrors` is a boolean identifying whether the subgraph encountered indexing errors at some past block +`hasIndexingErrors` is a boolean identifying whether the Subgraph encountered indexing errors at some past block From 3a1476e2481e54af59d31109a9e6daf9adbe92d3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:12:17 -0500 Subject: [PATCH 0235/1789] New translations graphql-api.mdx (Japanese) --- .../ja/subgraphs/querying/graphql-api.mdx | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/website/src/pages/ja/subgraphs/querying/graphql-api.mdx b/website/src/pages/ja/subgraphs/querying/graphql-api.mdx index e6fa6e325eea..24324d70ac5e 100644 --- a/website/src/pages/ja/subgraphs/querying/graphql-api.mdx +++ b/website/src/pages/ja/subgraphs/querying/graphql-api.mdx @@ -6,13 +6,13 @@ Learn about the GraphQL Query API used in The Graph. ## What is GraphQL? -[GraphQL](https://graphql.org/learn/) is a query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query subgraphs. +[GraphQL](https://graphql.org/learn/) is a query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query Subgraphs. -To understand the larger role that GraphQL plays, review [developing](/subgraphs/developing/introduction/) and [creating a subgraph](/developing/creating-a-subgraph/). +To understand the larger role that GraphQL plays, review [developing](/subgraphs/developing/introduction/) and [creating a Subgraph](/developing/creating-a-subgraph/). ## Queries with GraphQL -In your subgraph schema you define types called `Entities`. For each `Entity` type, `entity` and `entities` fields will be generated on the top-level `Query` type. +In your Subgraph schema you define types called `Entities`. For each `Entity` type, `entity` and `entities` fields will be generated on the top-level `Query` type. > Note: `query` does not need to be included at the top of the `graphql` query when using The Graph. @@ -170,7 +170,7 @@ You can use suffixes like `_gt`, `_lte` for value comparison: You can also filter entities that were updated in or after a specified block with `_change_block(number_gte: Int)`. -これは、前回のポーリング以降など、変更されたエンティティのみをフェッチする場合に役立ちます。または、サブグラフでエンティティがどのように変化しているかを調査またはデバッグするのに役立ちます (ブロック フィルターと組み合わせると、特定のブロックで変更されたエンティティのみを分離できます)。 +This can be useful if you are looking to fetch only entities which have changed, for example since the last time you polled. Or alternatively it can be useful to investigate or debug how entities are changing in your Subgraph (if combined with a block filter, you can isolate only entities that changed in a specific block). ```graphql { @@ -329,18 +329,18 @@ This query will return `Challenge` entities, and their associated `Application` ### 全文検索クエリ -Fulltext search query fields provide an expressive text search API that can be added to the subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph/#defining-fulltext-search-fields) to add fulltext search to your subgraph. +Fulltext search query fields provide an expressive text search API that can be added to the Subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph/#defining-fulltext-search-fields) to add fulltext search to your Subgraph. Fulltext search queries have one required field, `text`, for supplying search terms. Several special fulltext operators are available to be used in this `text` search field. 全文検索演算子: -| シンボル | オペレーター | 説明書き | -| --- | --- | --- | -| `&` | `And` | 複数の検索語を組み合わせて、指定したすべての検索語を含むエンティティをフィルタリングします。 | -| | | `Or` | 複数の検索語をオペレーターで区切って検索すると、指定した語のいずれかにマッチするすべてのエンティティが返されます。 | -| `<->` | `Follow by` | 2 つの単語の間の距離を指定します。 | -| `:*` | `Prefix` | プレフィックス検索語を使って、プレフィックスが一致する単語を検索します(2 文字必要) | +| シンボル | オペレーター | 説明書き | +| ------ | ----------- | --------------------------------------------------------- | +| `&` | `And` | 複数の検索語を組み合わせて、指定したすべての検索語を含むエンティティをフィルタリングします。 | +| | | `Or` | 複数の検索語をオペレーターで区切って検索すると、指定した語のいずれかにマッチするすべてのエンティティが返されます。 | +| `<->` | `Follow by` | 2 つの単語の間の距離を指定します。 | +| `:*` | `Prefix` | プレフィックス検索語を使って、プレフィックスが一致する単語を検索します(2 文字必要) | #### 例 @@ -391,7 +391,7 @@ Graph Node implements [specification-based](https://spec.graphql.org/October2021 The schema of your dataSources, i.e. the entity types, values, and relationships that are available to query, are defined through the [GraphQL Interface Definition Language (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). -GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your subgraph is automatically generated from the GraphQL schema that's included in your [subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). +GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your Subgraph is automatically generated from the GraphQL schema that's included in your [Subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). > Note: Our API does not expose mutations because developers are expected to issue transactions directly against the underlying blockchain from their applications. @@ -403,7 +403,7 @@ All GraphQL types with `@entity` directives in your schema will be treated as en ### サブグラフ メタデータ -All subgraphs have an auto-generated `_Meta_` object, which provides access to subgraph metadata. This can be queried as follows: +All Subgraphs have an auto-generated `_Meta_` object, which provides access to Subgraph metadata. This can be queried as follows: ```graphQL { @@ -419,7 +419,7 @@ All subgraphs have an auto-generated `_Meta_` object, which provides access to s } ``` -ブロックが提供されている場合、メタデータはそのブロックのものであり、そうでない場合は、最新のインデックス付きブロックが使用されます。提供される場合、ブロックはサブグラフの開始ブロックの後にあり、最後にインデックス付けされたブロック以下でなければなりません。 +If a block is provided, the metadata is as of that block, if not the latest indexed block is used. If provided, the block must be after the Subgraph's start block, and less than or equal to the most recently indexed block. `deployment` is a unique ID, corresponding to the IPFS CID of the `subgraph.yaml` file. @@ -427,6 +427,6 @@ All subgraphs have an auto-generated `_Meta_` object, which provides access to s - hash: ブロックのハッシュ - number: ブロック番号 -- timestamp: 可能であれば、ブロックのタイムスタンプ (これは現在、EVMネットワークのインデックスを作成するサブグラフでのみ利用可能) +- timestamp: the timestamp of the block, if available (this is currently only available for Subgraphs indexing EVM networks) -`hasIndexingErrors` is a boolean identifying whether the subgraph encountered indexing errors at some past block +`hasIndexingErrors` is a boolean identifying whether the Subgraph encountered indexing errors at some past block From 83807fd3f38e265795cd5365e1f5852f61ad0a4d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:12:18 -0500 Subject: [PATCH 0236/1789] New translations graphql-api.mdx (Korean) --- .../ko/subgraphs/querying/graphql-api.mdx | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/website/src/pages/ko/subgraphs/querying/graphql-api.mdx b/website/src/pages/ko/subgraphs/querying/graphql-api.mdx index b3003ece651a..b82afcfa252c 100644 --- a/website/src/pages/ko/subgraphs/querying/graphql-api.mdx +++ b/website/src/pages/ko/subgraphs/querying/graphql-api.mdx @@ -6,13 +6,13 @@ Learn about the GraphQL Query API used in The Graph. ## What is GraphQL? -[GraphQL](https://graphql.org/learn/) is a query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query subgraphs. +[GraphQL](https://graphql.org/learn/) is a query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query Subgraphs. -To understand the larger role that GraphQL plays, review [developing](/subgraphs/developing/introduction/) and [creating a subgraph](/developing/creating-a-subgraph/). +To understand the larger role that GraphQL plays, review [developing](/subgraphs/developing/introduction/) and [creating a Subgraph](/developing/creating-a-subgraph/). ## Queries with GraphQL -In your subgraph schema you define types called `Entities`. For each `Entity` type, `entity` and `entities` fields will be generated on the top-level `Query` type. +In your Subgraph schema you define types called `Entities`. For each `Entity` type, `entity` and `entities` fields will be generated on the top-level `Query` type. > Note: `query` does not need to be included at the top of the `graphql` query when using The Graph. @@ -170,7 +170,7 @@ You can use suffixes like `_gt`, `_lte` for value comparison: You can also filter entities that were updated in or after a specified block with `_change_block(number_gte: Int)`. -This can be useful if you are looking to fetch only entities which have changed, for example since the last time you polled. Or alternatively it can be useful to investigate or debug how entities are changing in your subgraph (if combined with a block filter, you can isolate only entities that changed in a specific block). +This can be useful if you are looking to fetch only entities which have changed, for example since the last time you polled. Or alternatively it can be useful to investigate or debug how entities are changing in your Subgraph (if combined with a block filter, you can isolate only entities that changed in a specific block). ```graphql { @@ -329,18 +329,18 @@ This query will return `Challenge` entities, and their associated `Application` ### Fulltext Search Queries -Fulltext search query fields provide an expressive text search API that can be added to the subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph/#defining-fulltext-search-fields) to add fulltext search to your subgraph. +Fulltext search query fields provide an expressive text search API that can be added to the Subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph/#defining-fulltext-search-fields) to add fulltext search to your Subgraph. Fulltext search queries have one required field, `text`, for supplying search terms. Several special fulltext operators are available to be used in this `text` search field. Fulltext search operators: -| Symbol | Operator | Description | -| --- | --- | --- | -| `&` | `And` | For combining multiple search terms into a filter for entities that include all of the provided terms | -| | | `Or` | Queries with multiple search terms separated by the or operator will return all entities with a match from any of the provided terms | -| `<->` | `Follow by` | Specify the distance between two words. | -| `:*` | `Prefix` | Use the prefix search term to find words whose prefix match (2 characters required.) | +| Symbol | Operator | Description | +| ------ | ----------- | ------------------------------------------------------------------------------------------------------------------------------------ | +| `&` | `And` | For combining multiple search terms into a filter for entities that include all of the provided terms | +| | | `Or` | Queries with multiple search terms separated by the or operator will return all entities with a match from any of the provided terms | +| `<->` | `Follow by` | Specify the distance between two words. | +| `:*` | `Prefix` | Use the prefix search term to find words whose prefix match (2 characters required.) | #### Examples @@ -391,7 +391,7 @@ Graph Node implements [specification-based](https://spec.graphql.org/October2021 The schema of your dataSources, i.e. the entity types, values, and relationships that are available to query, are defined through the [GraphQL Interface Definition Language (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). -GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your subgraph is automatically generated from the GraphQL schema that's included in your [subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). +GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your Subgraph is automatically generated from the GraphQL schema that's included in your [Subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). > Note: Our API does not expose mutations because developers are expected to issue transactions directly against the underlying blockchain from their applications. @@ -403,7 +403,7 @@ All GraphQL types with `@entity` directives in your schema will be treated as en ### Subgraph Metadata -All subgraphs have an auto-generated `_Meta_` object, which provides access to subgraph metadata. This can be queried as follows: +All Subgraphs have an auto-generated `_Meta_` object, which provides access to Subgraph metadata. This can be queried as follows: ```graphQL { @@ -419,7 +419,7 @@ All subgraphs have an auto-generated `_Meta_` object, which provides access to s } ``` -If a block is provided, the metadata is as of that block, if not the latest indexed block is used. If provided, the block must be after the subgraph's start block, and less than or equal to the most recently indexed block. +If a block is provided, the metadata is as of that block, if not the latest indexed block is used. If provided, the block must be after the Subgraph's start block, and less than or equal to the most recently indexed block. `deployment` is a unique ID, corresponding to the IPFS CID of the `subgraph.yaml` file. @@ -427,6 +427,6 @@ If a block is provided, the metadata is as of that block, if not the latest inde - hash: the hash of the block - number: the block number -- timestamp: the timestamp of the block, if available (this is currently only available for subgraphs indexing EVM networks) +- timestamp: the timestamp of the block, if available (this is currently only available for Subgraphs indexing EVM networks) -`hasIndexingErrors` is a boolean identifying whether the subgraph encountered indexing errors at some past block +`hasIndexingErrors` is a boolean identifying whether the Subgraph encountered indexing errors at some past block From 7e26abbc76735af8e45ff534c93fa30e6185dc69 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:12:19 -0500 Subject: [PATCH 0237/1789] New translations graphql-api.mdx (Dutch) --- .../nl/subgraphs/querying/graphql-api.mdx | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/website/src/pages/nl/subgraphs/querying/graphql-api.mdx b/website/src/pages/nl/subgraphs/querying/graphql-api.mdx index b3003ece651a..b82afcfa252c 100644 --- a/website/src/pages/nl/subgraphs/querying/graphql-api.mdx +++ b/website/src/pages/nl/subgraphs/querying/graphql-api.mdx @@ -6,13 +6,13 @@ Learn about the GraphQL Query API used in The Graph. ## What is GraphQL? -[GraphQL](https://graphql.org/learn/) is a query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query subgraphs. +[GraphQL](https://graphql.org/learn/) is a query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query Subgraphs. -To understand the larger role that GraphQL plays, review [developing](/subgraphs/developing/introduction/) and [creating a subgraph](/developing/creating-a-subgraph/). +To understand the larger role that GraphQL plays, review [developing](/subgraphs/developing/introduction/) and [creating a Subgraph](/developing/creating-a-subgraph/). ## Queries with GraphQL -In your subgraph schema you define types called `Entities`. For each `Entity` type, `entity` and `entities` fields will be generated on the top-level `Query` type. +In your Subgraph schema you define types called `Entities`. For each `Entity` type, `entity` and `entities` fields will be generated on the top-level `Query` type. > Note: `query` does not need to be included at the top of the `graphql` query when using The Graph. @@ -170,7 +170,7 @@ You can use suffixes like `_gt`, `_lte` for value comparison: You can also filter entities that were updated in or after a specified block with `_change_block(number_gte: Int)`. -This can be useful if you are looking to fetch only entities which have changed, for example since the last time you polled. Or alternatively it can be useful to investigate or debug how entities are changing in your subgraph (if combined with a block filter, you can isolate only entities that changed in a specific block). +This can be useful if you are looking to fetch only entities which have changed, for example since the last time you polled. Or alternatively it can be useful to investigate or debug how entities are changing in your Subgraph (if combined with a block filter, you can isolate only entities that changed in a specific block). ```graphql { @@ -329,18 +329,18 @@ This query will return `Challenge` entities, and their associated `Application` ### Fulltext Search Queries -Fulltext search query fields provide an expressive text search API that can be added to the subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph/#defining-fulltext-search-fields) to add fulltext search to your subgraph. +Fulltext search query fields provide an expressive text search API that can be added to the Subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph/#defining-fulltext-search-fields) to add fulltext search to your Subgraph. Fulltext search queries have one required field, `text`, for supplying search terms. Several special fulltext operators are available to be used in this `text` search field. Fulltext search operators: -| Symbol | Operator | Description | -| --- | --- | --- | -| `&` | `And` | For combining multiple search terms into a filter for entities that include all of the provided terms | -| | | `Or` | Queries with multiple search terms separated by the or operator will return all entities with a match from any of the provided terms | -| `<->` | `Follow by` | Specify the distance between two words. | -| `:*` | `Prefix` | Use the prefix search term to find words whose prefix match (2 characters required.) | +| Symbol | Operator | Description | +| ------ | ----------- | ------------------------------------------------------------------------------------------------------------------------------------ | +| `&` | `And` | For combining multiple search terms into a filter for entities that include all of the provided terms | +| | | `Or` | Queries with multiple search terms separated by the or operator will return all entities with a match from any of the provided terms | +| `<->` | `Follow by` | Specify the distance between two words. | +| `:*` | `Prefix` | Use the prefix search term to find words whose prefix match (2 characters required.) | #### Examples @@ -391,7 +391,7 @@ Graph Node implements [specification-based](https://spec.graphql.org/October2021 The schema of your dataSources, i.e. the entity types, values, and relationships that are available to query, are defined through the [GraphQL Interface Definition Language (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). -GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your subgraph is automatically generated from the GraphQL schema that's included in your [subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). +GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your Subgraph is automatically generated from the GraphQL schema that's included in your [Subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). > Note: Our API does not expose mutations because developers are expected to issue transactions directly against the underlying blockchain from their applications. @@ -403,7 +403,7 @@ All GraphQL types with `@entity` directives in your schema will be treated as en ### Subgraph Metadata -All subgraphs have an auto-generated `_Meta_` object, which provides access to subgraph metadata. This can be queried as follows: +All Subgraphs have an auto-generated `_Meta_` object, which provides access to Subgraph metadata. This can be queried as follows: ```graphQL { @@ -419,7 +419,7 @@ All subgraphs have an auto-generated `_Meta_` object, which provides access to s } ``` -If a block is provided, the metadata is as of that block, if not the latest indexed block is used. If provided, the block must be after the subgraph's start block, and less than or equal to the most recently indexed block. +If a block is provided, the metadata is as of that block, if not the latest indexed block is used. If provided, the block must be after the Subgraph's start block, and less than or equal to the most recently indexed block. `deployment` is a unique ID, corresponding to the IPFS CID of the `subgraph.yaml` file. @@ -427,6 +427,6 @@ If a block is provided, the metadata is as of that block, if not the latest inde - hash: the hash of the block - number: the block number -- timestamp: the timestamp of the block, if available (this is currently only available for subgraphs indexing EVM networks) +- timestamp: the timestamp of the block, if available (this is currently only available for Subgraphs indexing EVM networks) -`hasIndexingErrors` is a boolean identifying whether the subgraph encountered indexing errors at some past block +`hasIndexingErrors` is a boolean identifying whether the Subgraph encountered indexing errors at some past block From 4be36f4db8396af21469c1441e3b4ca548e98bb9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:12:20 -0500 Subject: [PATCH 0238/1789] New translations graphql-api.mdx (Polish) --- .../pl/subgraphs/querying/graphql-api.mdx | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/website/src/pages/pl/subgraphs/querying/graphql-api.mdx b/website/src/pages/pl/subgraphs/querying/graphql-api.mdx index b3003ece651a..b82afcfa252c 100644 --- a/website/src/pages/pl/subgraphs/querying/graphql-api.mdx +++ b/website/src/pages/pl/subgraphs/querying/graphql-api.mdx @@ -6,13 +6,13 @@ Learn about the GraphQL Query API used in The Graph. ## What is GraphQL? -[GraphQL](https://graphql.org/learn/) is a query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query subgraphs. +[GraphQL](https://graphql.org/learn/) is a query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query Subgraphs. -To understand the larger role that GraphQL plays, review [developing](/subgraphs/developing/introduction/) and [creating a subgraph](/developing/creating-a-subgraph/). +To understand the larger role that GraphQL plays, review [developing](/subgraphs/developing/introduction/) and [creating a Subgraph](/developing/creating-a-subgraph/). ## Queries with GraphQL -In your subgraph schema you define types called `Entities`. For each `Entity` type, `entity` and `entities` fields will be generated on the top-level `Query` type. +In your Subgraph schema you define types called `Entities`. For each `Entity` type, `entity` and `entities` fields will be generated on the top-level `Query` type. > Note: `query` does not need to be included at the top of the `graphql` query when using The Graph. @@ -170,7 +170,7 @@ You can use suffixes like `_gt`, `_lte` for value comparison: You can also filter entities that were updated in or after a specified block with `_change_block(number_gte: Int)`. -This can be useful if you are looking to fetch only entities which have changed, for example since the last time you polled. Or alternatively it can be useful to investigate or debug how entities are changing in your subgraph (if combined with a block filter, you can isolate only entities that changed in a specific block). +This can be useful if you are looking to fetch only entities which have changed, for example since the last time you polled. Or alternatively it can be useful to investigate or debug how entities are changing in your Subgraph (if combined with a block filter, you can isolate only entities that changed in a specific block). ```graphql { @@ -329,18 +329,18 @@ This query will return `Challenge` entities, and their associated `Application` ### Fulltext Search Queries -Fulltext search query fields provide an expressive text search API that can be added to the subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph/#defining-fulltext-search-fields) to add fulltext search to your subgraph. +Fulltext search query fields provide an expressive text search API that can be added to the Subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph/#defining-fulltext-search-fields) to add fulltext search to your Subgraph. Fulltext search queries have one required field, `text`, for supplying search terms. Several special fulltext operators are available to be used in this `text` search field. Fulltext search operators: -| Symbol | Operator | Description | -| --- | --- | --- | -| `&` | `And` | For combining multiple search terms into a filter for entities that include all of the provided terms | -| | | `Or` | Queries with multiple search terms separated by the or operator will return all entities with a match from any of the provided terms | -| `<->` | `Follow by` | Specify the distance between two words. | -| `:*` | `Prefix` | Use the prefix search term to find words whose prefix match (2 characters required.) | +| Symbol | Operator | Description | +| ------ | ----------- | ------------------------------------------------------------------------------------------------------------------------------------ | +| `&` | `And` | For combining multiple search terms into a filter for entities that include all of the provided terms | +| | | `Or` | Queries with multiple search terms separated by the or operator will return all entities with a match from any of the provided terms | +| `<->` | `Follow by` | Specify the distance between two words. | +| `:*` | `Prefix` | Use the prefix search term to find words whose prefix match (2 characters required.) | #### Examples @@ -391,7 +391,7 @@ Graph Node implements [specification-based](https://spec.graphql.org/October2021 The schema of your dataSources, i.e. the entity types, values, and relationships that are available to query, are defined through the [GraphQL Interface Definition Language (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). -GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your subgraph is automatically generated from the GraphQL schema that's included in your [subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). +GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your Subgraph is automatically generated from the GraphQL schema that's included in your [Subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). > Note: Our API does not expose mutations because developers are expected to issue transactions directly against the underlying blockchain from their applications. @@ -403,7 +403,7 @@ All GraphQL types with `@entity` directives in your schema will be treated as en ### Subgraph Metadata -All subgraphs have an auto-generated `_Meta_` object, which provides access to subgraph metadata. This can be queried as follows: +All Subgraphs have an auto-generated `_Meta_` object, which provides access to Subgraph metadata. This can be queried as follows: ```graphQL { @@ -419,7 +419,7 @@ All subgraphs have an auto-generated `_Meta_` object, which provides access to s } ``` -If a block is provided, the metadata is as of that block, if not the latest indexed block is used. If provided, the block must be after the subgraph's start block, and less than or equal to the most recently indexed block. +If a block is provided, the metadata is as of that block, if not the latest indexed block is used. If provided, the block must be after the Subgraph's start block, and less than or equal to the most recently indexed block. `deployment` is a unique ID, corresponding to the IPFS CID of the `subgraph.yaml` file. @@ -427,6 +427,6 @@ If a block is provided, the metadata is as of that block, if not the latest inde - hash: the hash of the block - number: the block number -- timestamp: the timestamp of the block, if available (this is currently only available for subgraphs indexing EVM networks) +- timestamp: the timestamp of the block, if available (this is currently only available for Subgraphs indexing EVM networks) -`hasIndexingErrors` is a boolean identifying whether the subgraph encountered indexing errors at some past block +`hasIndexingErrors` is a boolean identifying whether the Subgraph encountered indexing errors at some past block From f1f8d4dfa94a43f7791dc16874b8c6c863d66004 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:12:21 -0500 Subject: [PATCH 0239/1789] New translations graphql-api.mdx (Portuguese) --- .../pt/subgraphs/querying/graphql-api.mdx | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/website/src/pages/pt/subgraphs/querying/graphql-api.mdx b/website/src/pages/pt/subgraphs/querying/graphql-api.mdx index 2adfb642d2e7..fc80da6f7173 100644 --- a/website/src/pages/pt/subgraphs/querying/graphql-api.mdx +++ b/website/src/pages/pt/subgraphs/querying/graphql-api.mdx @@ -6,13 +6,13 @@ Aprenda sobre a API de Queries da GraphQL, usada no The Graph. ## O Que é a GraphQL? -A [GraphQL](https://graphql.org/learn/) é uma linguagem de queries para APIs e um sistema de tempo de execução (runtime) para executar esses queries, com os seus dados já existentes. O The Graph usa a GraphQL para fazer queries em subgraphs. +[GraphQL](https://graphql.org/learn/) is a query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query Subgraphs. -Para entender melhor o papel maior da GraphQL, veja [como desenvolver](/subgraphs/developing/introduction/) e [criar um subgraph](/developing/creating-a-subgraph/). +To understand the larger role that GraphQL plays, review [developing](/subgraphs/developing/introduction/) and [creating a Subgraph](/developing/creating-a-subgraph/). ## Queries com a GraphQL -No seu schema de subgraph, você definirá tipos chamados `Entities` ("Entidades"). Para cada tipo `Entity`, campos `entity` e `entities` serão gerados no tipo de nível superior `Query`. +In your Subgraph schema you define types called `Entities`. For each `Entity` type, `entity` and `entities` fields will be generated on the top-level `Query` type. > Observação: `query` não precisa ser incluído no topo do query `graphql` enquanto usar o The Graph. @@ -170,7 +170,7 @@ Faça um query sobre desafios com o resultado `failed` (falha): Também dá para filtrar entidades atualizadas dentro de, ou depois de, um bloco específico com `_change_block(number_gte: Int)`. -Isto pode servir caso mire retirar apenas entidades que mudaram, por exemplo, desde a última vez que você pesquisou. Também pode ser bom investigar ou debugar como as entidades mudam no seu subgraph (se combinado com um filtro de blocos, pode isolar apenas entidades que mudaram em um bloco específico). +This can be useful if you are looking to fetch only entities which have changed, for example since the last time you polled. Or alternatively it can be useful to investigate or debug how entities are changing in your Subgraph (if combined with a block filter, you can isolate only entities that changed in a specific block). ```graphql { @@ -329,18 +329,18 @@ Este query retornará entidades `Challenge` e as suas entidades `Application` as ### Consultas de Busca Fulltext -Campos de busca em full-text fornecem uma API de busca de texto expressiva, que pode ser adicionada e personalizada ao schema do subgraph. Para adicionar buscas em full-text ao seu subgraph, veja [Como Definir Campos de Busca em Full-Text](/developing/creating-a-subgraph/#defining-fulltext-search-fields). +Fulltext search query fields provide an expressive text search API that can be added to the Subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph/#defining-fulltext-search-fields) to add fulltext search to your Subgraph. Buscas em full-text têm um campo obrigatório, `text`, para ofertar termos de busca. Vários operadores especiais de full-text estão disponíveis para uso neste campo de busca `text`. Operadores de busca fulltext: -| Símbolo | Operador | Descrição | -| --- | --- | --- | -| `&` | `And` | Para combinar múltiplos termos de busca num filtro para entidades que incluem todos os termos fornecidos | -| | | `Or` | Consultas com vários termos de busca separados pelo operador or retornarão todas as entidades com uma correspondência de qualquer termo providenciado | -| `<->` | `Follow by` | Especifica a distância entre duas palavras. | -| `:*` | `Prefix` | Use o prefixo para encontrar palavras que correspondem a tal prefixo (2 caracteres necessários.) | +| Símbolo | Operador | Descrição | +| ------- | ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `&` | `And` | Para combinar múltiplos termos de busca num filtro para entidades que incluem todos os termos fornecidos | +| | | `Or` | Consultas com vários termos de busca separados pelo operador or retornarão todas as entidades com uma correspondência de qualquer termo providenciado | +| `<->` | `Follow by` | Especifica a distância entre duas palavras. | +| `:*` | `Prefix` | Use o prefixo para encontrar palavras que correspondem a tal prefixo (2 caracteres necessários.) | #### Exemplos @@ -391,7 +391,7 @@ O Graph Node implementa validações [baseadas em especificação](https://spec. O schema dos seus dataSources — por exemplo, os tipos de entidade, valores, e conexões que podem ser solicitados num query — é definido através da [Linguagem de Definição de Interface da GraphQL (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). -Os schemas GraphQL geralmente definem tipos de origem para `queries` (solicitações), `subscriptions` (inscrições) e `mutations` (mutações). O The Graph só apoia `queries`. A origem `Query` para o seu subgraph é gerada automaticamente a partir do schema GraphQL incluído no [manifest do seu subgraph](/developing/creating-a-subgraph/#components-of-a-subgraph). +GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your Subgraph is automatically generated from the GraphQL schema that's included in your [Subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). > Nota: A nossa API não expõe mutações, porque esperamos que os programadores emitam transações diretamente dos seus aplicativos perante a blockchain subjacente. @@ -403,7 +403,7 @@ Todos os tipos GraphQL com diretivos `@entity` no seu schema serão tratados com ### Metadados de Subgraph -Todos os subgraphs devem ter um objeto `_Meta_` gerado automaticamente, que permite acesso aos metadados do subgraph. Isto pode ser solicitado num query como o query mostrado a seguir: +All Subgraphs have an auto-generated `_Meta_` object, which provides access to Subgraph metadata. This can be queried as follows: ```graphQL { @@ -419,7 +419,7 @@ Todos os subgraphs devem ter um objeto `_Meta_` gerado automaticamente, que perm } ``` -Se um bloco for fornecido, os metadados são desde aquele bloco; e se não, é usado o último indexado. Se providenciado, o bloco deve ser após o inicial do subgraph, e menor ou igual ao bloco indexado mais recentemente. +If a block is provided, the metadata is as of that block, if not the latest indexed block is used. If provided, the block must be after the Subgraph's start block, and less than or equal to the most recently indexed block. `deployment` é uma ID única, correspondente ao CID PIFS do arquivo `subgraph.yaml`. @@ -427,6 +427,6 @@ O `block` fornece informações sobre o bloco mais recente (em consideração a - hash: o hash do bloco - number: o número do bloco -- timestamp: a hora do bloco, se disponível (disponível atualmente apenas para subgraphs que indexam redes EVM) +- timestamp: the timestamp of the block, if available (this is currently only available for Subgraphs indexing EVM networks) -`hasIndexingErrors` é um boolean que identifica se o subgraph encontrou erros de indexação em algum bloco passado +`hasIndexingErrors` is a boolean identifying whether the Subgraph encountered indexing errors at some past block From 527ff10006faf2b03550f22436b1bb48d5786081 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:12:22 -0500 Subject: [PATCH 0240/1789] New translations graphql-api.mdx (Russian) --- .../ru/subgraphs/querying/graphql-api.mdx | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/website/src/pages/ru/subgraphs/querying/graphql-api.mdx b/website/src/pages/ru/subgraphs/querying/graphql-api.mdx index cf058623eacf..50c71a910485 100644 --- a/website/src/pages/ru/subgraphs/querying/graphql-api.mdx +++ b/website/src/pages/ru/subgraphs/querying/graphql-api.mdx @@ -6,13 +6,13 @@ Learn about the GraphQL Query API used in The Graph. ## Что такое GraphQL? -[GraphQL](https://graphql.org/learn/) is a query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query subgraphs. +[GraphQL](https://graphql.org/learn/) is a query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query Subgraphs. -To understand the larger role that GraphQL plays, review [developing](/subgraphs/developing/introduction/) and [creating a subgraph](/developing/creating-a-subgraph/). +To understand the larger role that GraphQL plays, review [developing](/subgraphs/developing/introduction/) and [creating a Subgraph](/developing/creating-a-subgraph/). ## Queries with GraphQL -In your subgraph schema you define types called `Entities`. For each `Entity` type, `entity` and `entities` fields will be generated on the top-level `Query` type. +In your Subgraph schema you define types called `Entities`. For each `Entity` type, `entity` and `entities` fields will be generated on the top-level `Query` type. > Note: `query` does not need to be included at the top of the `graphql` query when using The Graph. @@ -170,7 +170,7 @@ You can use suffixes like `_gt`, `_lte` for value comparison: You can also filter entities that were updated in or after a specified block with `_change_block(number_gte: Int)`. -Это может быть полезно, если Вы хотите получить только объекты, которые изменились, например, с момента последнего опроса. Или, в качестве альтернативы, может быть полезно исследовать или отладить изменнения объектов в Вашем субграфе (в сочетании с фильтрацией блоков Вы можете изолировать только объекты, которые изменились в определенном блоке). +This can be useful if you are looking to fetch only entities which have changed, for example since the last time you polled. Or alternatively it can be useful to investigate or debug how entities are changing in your Subgraph (if combined with a block filter, you can isolate only entities that changed in a specific block). ```graphql { @@ -329,18 +329,18 @@ This query will return `Challenge` entities, and their associated `Application` ### Полнотекстовые поисковые запросы -Fulltext search query fields provide an expressive text search API that can be added to the subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph/#defining-fulltext-search-fields) to add fulltext search to your subgraph. +Fulltext search query fields provide an expressive text search API that can be added to the Subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph/#defining-fulltext-search-fields) to add fulltext search to your Subgraph. Fulltext search queries have one required field, `text`, for supplying search terms. Several special fulltext operators are available to be used in this `text` search field. Полнотекстовые поисковые операторы: -| Символ | Оператор | Описание | -| --- | --- | --- | -| `&` | `And` | Для объединения нескольких условий поиска в фильтр для объектов, которые включают все указанные условия | -| | | `Or` | Запросы с несколькими условиями поиска, разделенные оператором or, вернут все объекты, которые соответствуют любому из предоставленных условий | -| `<->` | `Follow by` | Укажите расстояние между двумя словами. | -| `:*` | `Prefix` | Используйте поисковый запрос по префиксу, чтобы найти слова с соответствующим префиксом (необходимо 2 символа) | +| Символ | Оператор | Описание | +| ------ | ----------- | ---------------------------------------------------------------------------------------------------------------------------------------------- | +| `&` | `And` | Для объединения нескольких условий поиска в фильтр для объектов, которые включают все указанные условия | +| | | `Or` | Запросы с несколькими условиями поиска, разделенные оператором or, вернут все объекты, которые соответствуют любому из предоставленных условий | +| `<->` | `Follow by` | Укажите расстояние между двумя словами. | +| `:*` | `Prefix` | Используйте поисковый запрос по префиксу, чтобы найти слова с соответствующим префиксом (необходимо 2 символа) | #### Примеры @@ -391,7 +391,7 @@ Graph Node implements [specification-based](https://spec.graphql.org/October2021 The schema of your dataSources, i.e. the entity types, values, and relationships that are available to query, are defined through the [GraphQL Interface Definition Language (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). -GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your subgraph is automatically generated from the GraphQL schema that's included in your [subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). +GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your Subgraph is automatically generated from the GraphQL schema that's included in your [Subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). > Note: Our API does not expose mutations because developers are expected to issue transactions directly against the underlying blockchain from their applications. @@ -403,7 +403,7 @@ All GraphQL types with `@entity` directives in your schema will be treated as en ### Метаданные субграфа -All subgraphs have an auto-generated `_Meta_` object, which provides access to subgraph metadata. This can be queried as follows: +All Subgraphs have an auto-generated `_Meta_` object, which provides access to Subgraph metadata. This can be queried as follows: ```graphQL { @@ -419,7 +419,7 @@ All subgraphs have an auto-generated `_Meta_` object, which provides access to s } ``` -Если предоставлен блок, метаданные относятся к этому блоку, в противном случае используется последний проиндексированный блок. Если предоставляется блок, он должен быть после начального блока субграфа и меньше или равен последнему проиндексированному блоку. +If a block is provided, the metadata is as of that block, if not the latest indexed block is used. If provided, the block must be after the Subgraph's start block, and less than or equal to the most recently indexed block. `deployment` is a unique ID, corresponding to the IPFS CID of the `subgraph.yaml` file. @@ -427,6 +427,6 @@ All subgraphs have an auto-generated `_Meta_` object, which provides access to s - hash: хэш блока - number: номер блока -- timestamp: временная метка блока, если она доступна (в настоящее время доступна только для субграфов, индексирующих сети EVM) +- timestamp: the timestamp of the block, if available (this is currently only available for Subgraphs indexing EVM networks) -`hasIndexingErrors` is a boolean identifying whether the subgraph encountered indexing errors at some past block +`hasIndexingErrors` is a boolean identifying whether the Subgraph encountered indexing errors at some past block From 4e1b497cb2b5141877f1a47d6a83619d5c3e5962 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:12:23 -0500 Subject: [PATCH 0241/1789] New translations graphql-api.mdx (Swedish) --- .../sv/subgraphs/querying/graphql-api.mdx | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/website/src/pages/sv/subgraphs/querying/graphql-api.mdx b/website/src/pages/sv/subgraphs/querying/graphql-api.mdx index e4c1fbcb94b3..17995ee09698 100644 --- a/website/src/pages/sv/subgraphs/querying/graphql-api.mdx +++ b/website/src/pages/sv/subgraphs/querying/graphql-api.mdx @@ -6,13 +6,13 @@ Learn about the GraphQL Query API used in The Graph. ## What is GraphQL? -[GraphQL](https://graphql.org/learn/) is a query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query subgraphs. +[GraphQL](https://graphql.org/learn/) is a query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query Subgraphs. -To understand the larger role that GraphQL plays, review [developing](/subgraphs/developing/introduction/) and [creating a subgraph](/developing/creating-a-subgraph/). +To understand the larger role that GraphQL plays, review [developing](/subgraphs/developing/introduction/) and [creating a Subgraph](/developing/creating-a-subgraph/). ## Queries with GraphQL -In your subgraph schema you define types called `Entities`. For each `Entity` type, `entity` and `entities` fields will be generated on the top-level `Query` type. +In your Subgraph schema you define types called `Entities`. For each `Entity` type, `entity` and `entities` fields will be generated on the top-level `Query` type. > Note: `query` does not need to be included at the top of the `graphql` query when using The Graph. @@ -170,7 +170,7 @@ You can use suffixes like `_gt`, `_lte` for value comparison: You can also filter entities that were updated in or after a specified block with `_change_block(number_gte: Int)`. -Detta kan vara användbart om du bara vill hämta enheter som har ändrats, till exempel sedan den senaste gången du pollade. Eller alternativt kan det vara användbart för att undersöka eller felsöka hur enheter förändras i din undergraf (om det kombineras med ett blockfilter kan du isolera endast enheter som ändrades i ett visst block). +This can be useful if you are looking to fetch only entities which have changed, for example since the last time you polled. Or alternatively it can be useful to investigate or debug how entities are changing in your Subgraph (if combined with a block filter, you can isolate only entities that changed in a specific block). ```graphql { @@ -329,18 +329,18 @@ This query will return `Challenge` entities, and their associated `Application` ### Fulltextsökförfrågningar -Fulltext search query fields provide an expressive text search API that can be added to the subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph/#defining-fulltext-search-fields) to add fulltext search to your subgraph. +Fulltext search query fields provide an expressive text search API that can be added to the Subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph/#defining-fulltext-search-fields) to add fulltext search to your Subgraph. Fulltext search queries have one required field, `text`, for supplying search terms. Several special fulltext operators are available to be used in this `text` search field. Fulltextsökoperatorer: -| Symbol | Operatör | Beskrivning | -| --- | --- | --- | -| `&` | `And` | För att kombinera flera söktermer till ett filter för entiteter som inkluderar alla de angivna termerna | -| | | `Or` | Förfrågningar med flera söktermer separerade av ellipsen kommer att returnera alla entiteter med en matchning från någon av de angivna termerna | -| `<->` | `Follow by` | Ange avståndet mellan två ord. | -| `:*` | `Prefix` | Använd prefixsöktermen för att hitta ord vars prefix matchar (2 tecken krävs.) | +| Symbol | Operatör | Beskrivning | +| ------ | ----------- | ----------------------------------------------------------------------------------------------------------------------------------------------- | +| `&` | `And` | För att kombinera flera söktermer till ett filter för entiteter som inkluderar alla de angivna termerna | +| | | `Or` | Förfrågningar med flera söktermer separerade av ellipsen kommer att returnera alla entiteter med en matchning från någon av de angivna termerna | +| `<->` | `Follow by` | Ange avståndet mellan två ord. | +| `:*` | `Prefix` | Använd prefixsöktermen för att hitta ord vars prefix matchar (2 tecken krävs.) | #### Exempel @@ -391,7 +391,7 @@ Graph Node implements [specification-based](https://spec.graphql.org/October2021 The schema of your dataSources, i.e. the entity types, values, and relationships that are available to query, are defined through the [GraphQL Interface Definition Language (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). -GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your subgraph is automatically generated from the GraphQL schema that's included in your [subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). +GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your Subgraph is automatically generated from the GraphQL schema that's included in your [Subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). > Note: Our API does not expose mutations because developers are expected to issue transactions directly against the underlying blockchain from their applications. @@ -403,7 +403,7 @@ All GraphQL types with `@entity` directives in your schema will be treated as en ### Metadata för undergrafer -All subgraphs have an auto-generated `_Meta_` object, which provides access to subgraph metadata. This can be queried as follows: +All Subgraphs have an auto-generated `_Meta_` object, which provides access to Subgraph metadata. This can be queried as follows: ```graphQL { @@ -419,7 +419,7 @@ All subgraphs have an auto-generated `_Meta_` object, which provides access to s } ``` -Om ett block anges är metadata från det blocket, om inte används det senast indexerade blocket. Om det anges måste blocket vara efter undergrafens startblock och mindre än eller lika med det senast indexerade blocket. +If a block is provided, the metadata is as of that block, if not the latest indexed block is used. If provided, the block must be after the Subgraph's start block, and less than or equal to the most recently indexed block. `deployment` is a unique ID, corresponding to the IPFS CID of the `subgraph.yaml` file. @@ -427,6 +427,6 @@ Om ett block anges är metadata från det blocket, om inte används det senast i - hash: blockets hash - nummer: blockets nummer -- timestamp: blockets timestamp, om tillgänglig (detta är för närvarande endast tillgängligt för undergrafer som indexerar EVM-nätverk) +- timestamp: the timestamp of the block, if available (this is currently only available for Subgraphs indexing EVM networks) -`hasIndexingErrors` is a boolean identifying whether the subgraph encountered indexing errors at some past block +`hasIndexingErrors` is a boolean identifying whether the Subgraph encountered indexing errors at some past block From 25f33e4a9c6aeaa5131061f95ba691a032ec72b5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:12:24 -0500 Subject: [PATCH 0242/1789] New translations graphql-api.mdx (Turkish) --- .../tr/subgraphs/querying/graphql-api.mdx | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/website/src/pages/tr/subgraphs/querying/graphql-api.mdx b/website/src/pages/tr/subgraphs/querying/graphql-api.mdx index 265c755683b9..73476546e410 100644 --- a/website/src/pages/tr/subgraphs/querying/graphql-api.mdx +++ b/website/src/pages/tr/subgraphs/querying/graphql-api.mdx @@ -6,13 +6,13 @@ Learn about the GraphQL Query API used in The Graph. ## What is GraphQL? -[GraphQL](https://graphql.org/learn/) is a query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query subgraphs. +[GraphQL](https://graphql.org/learn/) is a query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query Subgraphs. -To understand the larger role that GraphQL plays, review [developing](/subgraphs/developing/introduction/) and [creating a subgraph](/developing/creating-a-subgraph/). +To understand the larger role that GraphQL plays, review [developing](/subgraphs/developing/introduction/) and [creating a Subgraph](/developing/creating-a-subgraph/). ## Queries with GraphQL -In your subgraph schema you define types called `Entities`. For each `Entity` type, `entity` and `entities` fields will be generated on the top-level `Query` type. +In your Subgraph schema you define types called `Entities`. For each `Entity` type, `entity` and `entities` fields will be generated on the top-level `Query` type. > Note: `query` does not need to be included at the top of the `graphql` query when using The Graph. @@ -170,7 +170,7 @@ You can use suffixes like `_gt`, `_lte` for value comparison: You can also filter entities that were updated in or after a specified block with `_change_block(number_gte: Int)`. -Örneğin bu, son yoklamanızdan bu yana yalnızca değişen varlıkları almak istiyorsanız yararlı olabilir. Ya da alternatif olarak, subgraph'ınızda varlıkların nasıl değiştiğini araştırmak veya hata ayıklamak için yararlı olabilir (bir blok filtresiyle birleştirilirse, yalnızca belirli bir blokta değişen varlıkları izole edebilirsiniz). +This can be useful if you are looking to fetch only entities which have changed, for example since the last time you polled. Or alternatively it can be useful to investigate or debug how entities are changing in your Subgraph (if combined with a block filter, you can isolate only entities that changed in a specific block). ```graphql { @@ -329,18 +329,18 @@ This query will return `Challenge` entities, and their associated `Application` ### Tam Metin Arama Sorguları -Fulltext search query fields provide an expressive text search API that can be added to the subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph/#defining-fulltext-search-fields) to add fulltext search to your subgraph. +Fulltext search query fields provide an expressive text search API that can be added to the Subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph/#defining-fulltext-search-fields) to add fulltext search to your Subgraph. Fulltext search queries have one required field, `text`, for supplying search terms. Several special fulltext operators are available to be used in this `text` search field. Tam metin arama operatörleri: -| Symbol | Operator | Tanım | -| --- | --- | --- | -| `&` | `And` | For combining multiple search terms into a filter for entities that include all of the provided terms | -| | | `Or` | Queries with multiple search terms separated by the or operator will return all entities with a match from any of the provided terms | -| `<->` | `Follow by` | Specify the distance between two words. | -| `:*` | `Prefix` | Use the prefix search term to find words whose prefix match (2 characters required.) | +| Symbol | Operator | Tanım | +| ------ | ----------- | ------------------------------------------------------------------------------------------------------------------------------------ | +| `&` | `And` | For combining multiple search terms into a filter for entities that include all of the provided terms | +| | | `Or` | Queries with multiple search terms separated by the or operator will return all entities with a match from any of the provided terms | +| `<->` | `Follow by` | Specify the distance between two words. | +| `:*` | `Prefix` | Use the prefix search term to find words whose prefix match (2 characters required.) | #### Örnekler @@ -391,7 +391,7 @@ Graph Node implements [specification-based](https://spec.graphql.org/October2021 The schema of your dataSources, i.e. the entity types, values, and relationships that are available to query, are defined through the [GraphQL Interface Definition Language (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). -GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your subgraph is automatically generated from the GraphQL schema that's included in your [subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). +GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your Subgraph is automatically generated from the GraphQL schema that's included in your [Subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). > Note: Our API does not expose mutations because developers are expected to issue transactions directly against the underlying blockchain from their applications. @@ -403,7 +403,7 @@ All GraphQL types with `@entity` directives in your schema will be treated as en ### Subgraph Üst Verisi -All subgraphs have an auto-generated `_Meta_` object, which provides access to subgraph metadata. This can be queried as follows: +All Subgraphs have an auto-generated `_Meta_` object, which provides access to Subgraph metadata. This can be queried as follows: ```graphQL { @@ -419,7 +419,7 @@ All subgraphs have an auto-generated `_Meta_` object, which provides access to s } ``` -Eğer bir blok belirtilirse, üst veri o blokla ilgilidir; belirtilmezse en son dizinlenen blok dikkate alınır. Eğer belirtilirse, blok subgraph başlangıç bloğundan sonra olmalıdır ve en son indekslenen bloğa eşit veya daha küçük olmalıdır. +If a block is provided, the metadata is as of that block, if not the latest indexed block is used. If provided, the block must be after the Subgraph's start block, and less than or equal to the most recently indexed block. `deployment` is a unique ID, corresponding to the IPFS CID of the `subgraph.yaml` file. @@ -427,6 +427,6 @@ Eğer bir blok belirtilirse, üst veri o blokla ilgilidir; belirtilmezse en son - hash: the hash of the block - number: the block number -- timestamp: the timestamp of the block, if available (this is currently only available for subgraphs indexing EVM networks) +- timestamp: the timestamp of the block, if available (this is currently only available for Subgraphs indexing EVM networks) -`hasIndexingErrors` is a boolean identifying whether the subgraph encountered indexing errors at some past block +`hasIndexingErrors` is a boolean identifying whether the Subgraph encountered indexing errors at some past block From adb620f2911dd90d50081f6f5a7267b61c5f669b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:12:25 -0500 Subject: [PATCH 0243/1789] New translations graphql-api.mdx (Ukrainian) --- .../uk/subgraphs/querying/graphql-api.mdx | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/website/src/pages/uk/subgraphs/querying/graphql-api.mdx b/website/src/pages/uk/subgraphs/querying/graphql-api.mdx index b3003ece651a..b82afcfa252c 100644 --- a/website/src/pages/uk/subgraphs/querying/graphql-api.mdx +++ b/website/src/pages/uk/subgraphs/querying/graphql-api.mdx @@ -6,13 +6,13 @@ Learn about the GraphQL Query API used in The Graph. ## What is GraphQL? -[GraphQL](https://graphql.org/learn/) is a query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query subgraphs. +[GraphQL](https://graphql.org/learn/) is a query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query Subgraphs. -To understand the larger role that GraphQL plays, review [developing](/subgraphs/developing/introduction/) and [creating a subgraph](/developing/creating-a-subgraph/). +To understand the larger role that GraphQL plays, review [developing](/subgraphs/developing/introduction/) and [creating a Subgraph](/developing/creating-a-subgraph/). ## Queries with GraphQL -In your subgraph schema you define types called `Entities`. For each `Entity` type, `entity` and `entities` fields will be generated on the top-level `Query` type. +In your Subgraph schema you define types called `Entities`. For each `Entity` type, `entity` and `entities` fields will be generated on the top-level `Query` type. > Note: `query` does not need to be included at the top of the `graphql` query when using The Graph. @@ -170,7 +170,7 @@ You can use suffixes like `_gt`, `_lte` for value comparison: You can also filter entities that were updated in or after a specified block with `_change_block(number_gte: Int)`. -This can be useful if you are looking to fetch only entities which have changed, for example since the last time you polled. Or alternatively it can be useful to investigate or debug how entities are changing in your subgraph (if combined with a block filter, you can isolate only entities that changed in a specific block). +This can be useful if you are looking to fetch only entities which have changed, for example since the last time you polled. Or alternatively it can be useful to investigate or debug how entities are changing in your Subgraph (if combined with a block filter, you can isolate only entities that changed in a specific block). ```graphql { @@ -329,18 +329,18 @@ This query will return `Challenge` entities, and their associated `Application` ### Fulltext Search Queries -Fulltext search query fields provide an expressive text search API that can be added to the subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph/#defining-fulltext-search-fields) to add fulltext search to your subgraph. +Fulltext search query fields provide an expressive text search API that can be added to the Subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph/#defining-fulltext-search-fields) to add fulltext search to your Subgraph. Fulltext search queries have one required field, `text`, for supplying search terms. Several special fulltext operators are available to be used in this `text` search field. Fulltext search operators: -| Symbol | Operator | Description | -| --- | --- | --- | -| `&` | `And` | For combining multiple search terms into a filter for entities that include all of the provided terms | -| | | `Or` | Queries with multiple search terms separated by the or operator will return all entities with a match from any of the provided terms | -| `<->` | `Follow by` | Specify the distance between two words. | -| `:*` | `Prefix` | Use the prefix search term to find words whose prefix match (2 characters required.) | +| Symbol | Operator | Description | +| ------ | ----------- | ------------------------------------------------------------------------------------------------------------------------------------ | +| `&` | `And` | For combining multiple search terms into a filter for entities that include all of the provided terms | +| | | `Or` | Queries with multiple search terms separated by the or operator will return all entities with a match from any of the provided terms | +| `<->` | `Follow by` | Specify the distance between two words. | +| `:*` | `Prefix` | Use the prefix search term to find words whose prefix match (2 characters required.) | #### Examples @@ -391,7 +391,7 @@ Graph Node implements [specification-based](https://spec.graphql.org/October2021 The schema of your dataSources, i.e. the entity types, values, and relationships that are available to query, are defined through the [GraphQL Interface Definition Language (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). -GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your subgraph is automatically generated from the GraphQL schema that's included in your [subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). +GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your Subgraph is automatically generated from the GraphQL schema that's included in your [Subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). > Note: Our API does not expose mutations because developers are expected to issue transactions directly against the underlying blockchain from their applications. @@ -403,7 +403,7 @@ All GraphQL types with `@entity` directives in your schema will be treated as en ### Subgraph Metadata -All subgraphs have an auto-generated `_Meta_` object, which provides access to subgraph metadata. This can be queried as follows: +All Subgraphs have an auto-generated `_Meta_` object, which provides access to Subgraph metadata. This can be queried as follows: ```graphQL { @@ -419,7 +419,7 @@ All subgraphs have an auto-generated `_Meta_` object, which provides access to s } ``` -If a block is provided, the metadata is as of that block, if not the latest indexed block is used. If provided, the block must be after the subgraph's start block, and less than or equal to the most recently indexed block. +If a block is provided, the metadata is as of that block, if not the latest indexed block is used. If provided, the block must be after the Subgraph's start block, and less than or equal to the most recently indexed block. `deployment` is a unique ID, corresponding to the IPFS CID of the `subgraph.yaml` file. @@ -427,6 +427,6 @@ If a block is provided, the metadata is as of that block, if not the latest inde - hash: the hash of the block - number: the block number -- timestamp: the timestamp of the block, if available (this is currently only available for subgraphs indexing EVM networks) +- timestamp: the timestamp of the block, if available (this is currently only available for Subgraphs indexing EVM networks) -`hasIndexingErrors` is a boolean identifying whether the subgraph encountered indexing errors at some past block +`hasIndexingErrors` is a boolean identifying whether the Subgraph encountered indexing errors at some past block From 27942827c4d7e4485d2877b042cfe334e3ab7187 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:12:26 -0500 Subject: [PATCH 0244/1789] New translations graphql-api.mdx (Chinese Simplified) --- .../zh/subgraphs/querying/graphql-api.mdx | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/website/src/pages/zh/subgraphs/querying/graphql-api.mdx b/website/src/pages/zh/subgraphs/querying/graphql-api.mdx index 450adf6248ff..35547d0170e8 100644 --- a/website/src/pages/zh/subgraphs/querying/graphql-api.mdx +++ b/website/src/pages/zh/subgraphs/querying/graphql-api.mdx @@ -6,13 +6,13 @@ Learn about the GraphQL Query API used in The Graph. ## What is GraphQL? -[GraphQL](https://graphql.org/learn/) is a query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query subgraphs. +[GraphQL](https://graphql.org/learn/) is a query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query Subgraphs. -To understand the larger role that GraphQL plays, review [developing](/subgraphs/developing/introduction/) and [creating a subgraph](/developing/creating-a-subgraph/). +To understand the larger role that GraphQL plays, review [developing](/subgraphs/developing/introduction/) and [creating a Subgraph](/developing/creating-a-subgraph/). ## Queries with GraphQL -In your subgraph schema you define types called `Entities`. For each `Entity` type, `entity` and `entities` fields will be generated on the top-level `Query` type. +In your Subgraph schema you define types called `Entities`. For each `Entity` type, `entity` and `entities` fields will be generated on the top-level `Query` type. > Note: `query` does not need to be included at the top of the `graphql` query when using The Graph. @@ -170,7 +170,7 @@ You can use suffixes like `_gt`, `_lte` for value comparison: You can also filter entities that were updated in or after a specified block with `_change_block(number_gte: Int)`. -如果您只想获取已经更改的实体,例如自上次轮询以来改变的实体,那么这将非常有用。或者也可以调查或调试子图中实体的变化情况(如果与区块过滤器结合使用,则只能隔离在特定区块中发生变化的实体)。 +This can be useful if you are looking to fetch only entities which have changed, for example since the last time you polled. Or alternatively it can be useful to investigate or debug how entities are changing in your Subgraph (if combined with a block filter, you can isolate only entities that changed in a specific block). ```graphql { @@ -329,18 +329,18 @@ This query will return `Challenge` entities, and their associated `Application` ### 全文搜索查询 -Fulltext search query fields provide an expressive text search API that can be added to the subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph/#defining-fulltext-search-fields) to add fulltext search to your subgraph. +Fulltext search query fields provide an expressive text search API that can be added to the Subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph/#defining-fulltext-search-fields) to add fulltext search to your Subgraph. Fulltext search queries have one required field, `text`, for supplying search terms. Several special fulltext operators are available to be used in this `text` search field. 全文搜索运算符: -| 符号 | 运算符 | 描述 | -| ------ | ----------- | ---------------------------------------------------------------------- | -| `&` | `And` | 用于将多个搜索词组合到包含所有提供词条的实体的过滤器中 | +| 符号 | 运算符 | 描述 | +| ------ | ----------- | ------------------------------------- | +| `&` | `And` | 用于将多个搜索词组合到包含所有提供词条的实体的过滤器中 | | | | `Or` | 由 or 运算符分隔的多个搜索词的查询,将返回与任何提供的词匹配的所有实体 | -| `<->` | `Follow by` | 指定两个单词之间的距离。 | -| `:*` | `Prefix` | 使用前缀搜索词查找前缀匹配的单词(需要 2 个字符) | +| `<->` | `Follow by` | 指定两个单词之间的距离。 | +| `:*` | `Prefix` | 使用前缀搜索词查找前缀匹配的单词(需要 2 个字符) | #### 例子 @@ -391,7 +391,7 @@ Graph Node implements [specification-based](https://spec.graphql.org/October2021 The schema of your dataSources, i.e. the entity types, values, and relationships that are available to query, are defined through the [GraphQL Interface Definition Language (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). -GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your subgraph is automatically generated from the GraphQL schema that's included in your [subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). +GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your Subgraph is automatically generated from the GraphQL schema that's included in your [Subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). > Note: Our API does not expose mutations because developers are expected to issue transactions directly against the underlying blockchain from their applications. @@ -403,7 +403,7 @@ All GraphQL types with `@entity` directives in your schema will be treated as en ### 子图元数据 -All subgraphs have an auto-generated `_Meta_` object, which provides access to subgraph metadata. This can be queried as follows: +All Subgraphs have an auto-generated `_Meta_` object, which provides access to Subgraph metadata. This can be queried as follows: ```graphQL { @@ -419,7 +419,7 @@ All subgraphs have an auto-generated `_Meta_` object, which provides access to s } ``` -如果提供了区块,则元数据为该区块的元数据,如果未使用最新的索引区块。如果提供,则区块必须在子图的起始区块之后,并且小于或等于最近索引的区块。 +If a block is provided, the metadata is as of that block, if not the latest indexed block is used. If provided, the block must be after the Subgraph's start block, and less than or equal to the most recently indexed block. `deployment` is a unique ID, corresponding to the IPFS CID of the `subgraph.yaml` file. @@ -427,6 +427,6 @@ All subgraphs have an auto-generated `_Meta_` object, which provides access to s - hash:区块的哈希 - number:区块编号 -- timestamp:区块的时间戳(如果可用)(当前仅适用于索引EVM网络的子图) +- timestamp: the timestamp of the block, if available (this is currently only available for Subgraphs indexing EVM networks) -`hasIndexingErrors` is a boolean identifying whether the subgraph encountered indexing errors at some past block +`hasIndexingErrors` is a boolean identifying whether the Subgraph encountered indexing errors at some past block From 281cad18cdef4375bda9fff11b73b363aff79efc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:12:28 -0500 Subject: [PATCH 0245/1789] New translations graphql-api.mdx (Urdu (Pakistan)) --- .../ur/subgraphs/querying/graphql-api.mdx | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/website/src/pages/ur/subgraphs/querying/graphql-api.mdx b/website/src/pages/ur/subgraphs/querying/graphql-api.mdx index d981496823bc..001e215cace8 100644 --- a/website/src/pages/ur/subgraphs/querying/graphql-api.mdx +++ b/website/src/pages/ur/subgraphs/querying/graphql-api.mdx @@ -6,13 +6,13 @@ Learn about the GraphQL Query API used in The Graph. ## What is GraphQL? -[GraphQL](https://graphql.org/learn/) is a query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query subgraphs. +[GraphQL](https://graphql.org/learn/) is a query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query Subgraphs. -To understand the larger role that GraphQL plays, review [developing](/subgraphs/developing/introduction/) and [creating a subgraph](/developing/creating-a-subgraph/). +To understand the larger role that GraphQL plays, review [developing](/subgraphs/developing/introduction/) and [creating a Subgraph](/developing/creating-a-subgraph/). ## Queries with GraphQL -In your subgraph schema you define types called `Entities`. For each `Entity` type, `entity` and `entities` fields will be generated on the top-level `Query` type. +In your Subgraph schema you define types called `Entities`. For each `Entity` type, `entity` and `entities` fields will be generated on the top-level `Query` type. > Note: `query` does not need to be included at the top of the `graphql` query when using The Graph. @@ -170,7 +170,7 @@ You can use suffixes like `_gt`, `_lte` for value comparison: You can also filter entities that were updated in or after a specified block with `_change_block(number_gte: Int)`. -یہ کارآمد ہو سکتا ہے اگر آپ صرف ان ہستیوں کو لانے کے خواہاں ہیں جو تبدیل ہو چکی ہیں، مثال کے طور پر آخری بار جب آپ نے پول کیا تھا۔ یا متبادل طور پر یہ تحقیق کرنا یا ڈیبگ کرنا مفید ہو سکتا ہے کہ آپ کے سب گراف میں ہستی کیسے تبدیل ہو رہی ہیں (اگر بلاک فلٹر کے ساتھ ملایا جائے تو، آپ صرف ان ہستیوں کو الگ تھلگ کر سکتے ہیں جو ایک مخصوص بلاک میں تبدیل ہوئی ہیں). +This can be useful if you are looking to fetch only entities which have changed, for example since the last time you polled. Or alternatively it can be useful to investigate or debug how entities are changing in your Subgraph (if combined with a block filter, you can isolate only entities that changed in a specific block). ```graphql { @@ -329,18 +329,18 @@ This query will return `Challenge` entities, and their associated `Application` ### فل ٹیکسٹ تلاش کے کیوریز -Fulltext search query fields provide an expressive text search API that can be added to the subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph/#defining-fulltext-search-fields) to add fulltext search to your subgraph. +Fulltext search query fields provide an expressive text search API that can be added to the Subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph/#defining-fulltext-search-fields) to add fulltext search to your Subgraph. Fulltext search queries have one required field, `text`, for supplying search terms. Several special fulltext operators are available to be used in this `text` search field. فل ٹیکسٹ سرچ آپریٹرز: -| علامت | آپریٹر | تفصیل | -| --- | --- | --- | -| `&` | `And` | ایک سے زیادہ تلاش کی اصطلاحات کو ایک فلٹر میں یکجا کرنے کے لیے ان ہستیوں کے لیے جس میں فراہم کردہ تمام اصطلاحات شامل ہوں | -| | | `Or` | Or آپریٹر کے ذریعہ الگ کردہ متعدد تلاش کی اصطلاحات کے ساتھ کیوریز فراہم کردہ شرائط میں سے کسی سے بھی مماثلت کے ساتھ تمام ہستیوں کو واپس کریں گے | -| `<->` | `Follow by` | دو الفاظ کے درمیان فاصلہ بتائیں. | -| `:*` | `Prefix` | ایسے الفاظ تلاش کرنے کے لیے پریفکس ​​تلاش کی اصطلاح استعمال کریں جن کا سابقہ ​​مماثل ہو (۲ حروف درکار ہیں.) | +| علامت | آپریٹر | تفصیل | +| ------ | ----------- | ----------------------------------------------------------------------------------------------------------------------------------------------- | +| `&` | `And` | ایک سے زیادہ تلاش کی اصطلاحات کو ایک فلٹر میں یکجا کرنے کے لیے ان ہستیوں کے لیے جس میں فراہم کردہ تمام اصطلاحات شامل ہوں | +| | | `Or` | Or آپریٹر کے ذریعہ الگ کردہ متعدد تلاش کی اصطلاحات کے ساتھ کیوریز فراہم کردہ شرائط میں سے کسی سے بھی مماثلت کے ساتھ تمام ہستیوں کو واپس کریں گے | +| `<->` | `Follow by` | دو الفاظ کے درمیان فاصلہ بتائیں. | +| `:*` | `Prefix` | ایسے الفاظ تلاش کرنے کے لیے پریفکس ​​تلاش کی اصطلاح استعمال کریں جن کا سابقہ ​​مماثل ہو (۲ حروف درکار ہیں.) | #### مثالیں @@ -391,7 +391,7 @@ Graph Node implements [specification-based](https://spec.graphql.org/October2021 The schema of your dataSources, i.e. the entity types, values, and relationships that are available to query, are defined through the [GraphQL Interface Definition Language (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). -GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your subgraph is automatically generated from the GraphQL schema that's included in your [subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). +GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your Subgraph is automatically generated from the GraphQL schema that's included in your [Subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). > Note: Our API does not expose mutations because developers are expected to issue transactions directly against the underlying blockchain from their applications. @@ -403,7 +403,7 @@ All GraphQL types with `@entity` directives in your schema will be treated as en ### سب گراف میٹا ڈیٹا -All subgraphs have an auto-generated `_Meta_` object, which provides access to subgraph metadata. This can be queried as follows: +All Subgraphs have an auto-generated `_Meta_` object, which provides access to Subgraph metadata. This can be queried as follows: ```graphQL { @@ -419,7 +419,7 @@ All subgraphs have an auto-generated `_Meta_` object, which provides access to s } ``` -اگر کوئی بلاک فراہم کیا جاتا ہے تو، میٹا ڈیٹا اس بلاک کا ہوتا ہے، اگر تازہ ترین انڈیکسڈ بلاک استعمال نہیں کیا جاتا ہے۔ اگر فراہم کیا گیا ہو، تو بلاک سب گراف کے اسٹارٹ بلاک کے بعد ہونا چاہیے، اور حال ہی میں انڈیکس کیے گئے بلاک سے کم یا اس کے برابر ہونا چاہیے. +If a block is provided, the metadata is as of that block, if not the latest indexed block is used. If provided, the block must be after the Subgraph's start block, and less than or equal to the most recently indexed block. `deployment` is a unique ID, corresponding to the IPFS CID of the `subgraph.yaml` file. @@ -427,6 +427,6 @@ All subgraphs have an auto-generated `_Meta_` object, which provides access to s - ہیش: بلاک کی ہیش - نمبر: بلاک نمبر -- ٹائم اسٹیمپ: بلاک کا ٹائم اسٹیمپ، اگر دستیاب ہو (یہ فی الحال صرف ای وی ایم نیٹ ورکس کو انڈیکس کرنے والے سب گرافس کے لیے دستیاب ہے) +- timestamp: the timestamp of the block, if available (this is currently only available for Subgraphs indexing EVM networks) -`hasIndexingErrors` is a boolean identifying whether the subgraph encountered indexing errors at some past block +`hasIndexingErrors` is a boolean identifying whether the Subgraph encountered indexing errors at some past block From d890b1f9590420f3128cd4c04b9c127772adb50f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:12:29 -0500 Subject: [PATCH 0246/1789] New translations graphql-api.mdx (Vietnamese) --- .../vi/subgraphs/querying/graphql-api.mdx | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/website/src/pages/vi/subgraphs/querying/graphql-api.mdx b/website/src/pages/vi/subgraphs/querying/graphql-api.mdx index 3056a573e67f..459f22ec299c 100644 --- a/website/src/pages/vi/subgraphs/querying/graphql-api.mdx +++ b/website/src/pages/vi/subgraphs/querying/graphql-api.mdx @@ -6,13 +6,13 @@ Learn about the GraphQL Query API used in The Graph. ## What is GraphQL? -[GraphQL](https://graphql.org/learn/) is a query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query subgraphs. +[GraphQL](https://graphql.org/learn/) is a query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query Subgraphs. -To understand the larger role that GraphQL plays, review [developing](/subgraphs/developing/introduction/) and [creating a subgraph](/developing/creating-a-subgraph/). +To understand the larger role that GraphQL plays, review [developing](/subgraphs/developing/introduction/) and [creating a Subgraph](/developing/creating-a-subgraph/). ## Queries with GraphQL -In your subgraph schema you define types called `Entities`. For each `Entity` type, `entity` and `entities` fields will be generated on the top-level `Query` type. +In your Subgraph schema you define types called `Entities`. For each `Entity` type, `entity` and `entities` fields will be generated on the top-level `Query` type. > Note: `query` does not need to be included at the top of the `graphql` query when using The Graph. @@ -170,7 +170,7 @@ You can use suffixes like `_gt`, `_lte` for value comparison: You can also filter entities that were updated in or after a specified block with `_change_block(number_gte: Int)`. -This can be useful if you are looking to fetch only entities which have changed, for example since the last time you polled. Or alternatively it can be useful to investigate or debug how entities are changing in your subgraph (if combined with a block filter, you can isolate only entities that changed in a specific block). +This can be useful if you are looking to fetch only entities which have changed, for example since the last time you polled. Or alternatively it can be useful to investigate or debug how entities are changing in your Subgraph (if combined with a block filter, you can isolate only entities that changed in a specific block). ```graphql { @@ -329,18 +329,18 @@ This query will return `Challenge` entities, and their associated `Application` ### Fulltext Search Queries -Fulltext search query fields provide an expressive text search API that can be added to the subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph/#defining-fulltext-search-fields) to add fulltext search to your subgraph. +Fulltext search query fields provide an expressive text search API that can be added to the Subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph/#defining-fulltext-search-fields) to add fulltext search to your Subgraph. Fulltext search queries have one required field, `text`, for supplying search terms. Several special fulltext operators are available to be used in this `text` search field. Fulltext search operators: -| Biểu tượng | Toán tử | Miêu tả | -| --- | --- | --- | -| `&` | `And` | Để kết hợp nhiều cụm từ tìm kiếm thành một bộ lọc cho các thực thể bao gồm tất cả các cụm từ được cung cấp | -| | | `Or` | Các truy vấn có nhiều cụm từ tìm kiếm được phân tách bằng toán tử hoặc sẽ trả về tất cả các thực thể có kết quả khớp với bất kỳ cụm từ nào được cung cấp | -| `<->` | `Follow by` | Chỉ định khoảng cách giữa hai từ. | -| `:*` | `Prefix` | Sử dụng cụm từ tìm kiếm tiền tố để tìm các từ có tiền tố khớp với nhau (yêu cầu 2 ký tự.) | +| Biểu tượng | Toán tử | Miêu tả | +| ---------- | ----------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `&` | `And` | Để kết hợp nhiều cụm từ tìm kiếm thành một bộ lọc cho các thực thể bao gồm tất cả các cụm từ được cung cấp | +| | | `Or` | Các truy vấn có nhiều cụm từ tìm kiếm được phân tách bằng toán tử hoặc sẽ trả về tất cả các thực thể có kết quả khớp với bất kỳ cụm từ nào được cung cấp | +| `<->` | `Follow by` | Chỉ định khoảng cách giữa hai từ. | +| `:*` | `Prefix` | Sử dụng cụm từ tìm kiếm tiền tố để tìm các từ có tiền tố khớp với nhau (yêu cầu 2 ký tự.) | #### Examples @@ -391,7 +391,7 @@ Graph Node implements [specification-based](https://spec.graphql.org/October2021 The schema of your dataSources, i.e. the entity types, values, and relationships that are available to query, are defined through the [GraphQL Interface Definition Language (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). -GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your subgraph is automatically generated from the GraphQL schema that's included in your [subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). +GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your Subgraph is automatically generated from the GraphQL schema that's included in your [Subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). > Note: Our API does not expose mutations because developers are expected to issue transactions directly against the underlying blockchain from their applications. @@ -403,7 +403,7 @@ All GraphQL types with `@entity` directives in your schema will be treated as en ### Subgraph Metadata -All subgraphs have an auto-generated `_Meta_` object, which provides access to subgraph metadata. This can be queried as follows: +All Subgraphs have an auto-generated `_Meta_` object, which provides access to Subgraph metadata. This can be queried as follows: ```graphQL { @@ -419,7 +419,7 @@ All subgraphs have an auto-generated `_Meta_` object, which provides access to s } ``` -If a block is provided, the metadata is as of that block, if not the latest indexed block is used. If provided, the block must be after the subgraph's start block, and less than or equal to the most recently indexed block. +If a block is provided, the metadata is as of that block, if not the latest indexed block is used. If provided, the block must be after the Subgraph's start block, and less than or equal to the most recently indexed block. `deployment` is a unique ID, corresponding to the IPFS CID of the `subgraph.yaml` file. @@ -427,6 +427,6 @@ If a block is provided, the metadata is as of that block, if not the latest inde - hash: the hash of the block - number: the block number -- timestamp: the timestamp of the block, if available (this is currently only available for subgraphs indexing EVM networks) +- timestamp: the timestamp of the block, if available (this is currently only available for Subgraphs indexing EVM networks) -`hasIndexingErrors` is a boolean identifying whether the subgraph encountered indexing errors at some past block +`hasIndexingErrors` is a boolean identifying whether the Subgraph encountered indexing errors at some past block From 2c4577862a3d60aa067a9e1790599d93d73f115f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:12:30 -0500 Subject: [PATCH 0247/1789] New translations graphql-api.mdx (Marathi) --- .../mr/subgraphs/querying/graphql-api.mdx | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/website/src/pages/mr/subgraphs/querying/graphql-api.mdx b/website/src/pages/mr/subgraphs/querying/graphql-api.mdx index c506e4c260a8..0cb4f07b2393 100644 --- a/website/src/pages/mr/subgraphs/querying/graphql-api.mdx +++ b/website/src/pages/mr/subgraphs/querying/graphql-api.mdx @@ -6,13 +6,13 @@ Learn about the GraphQL Query API used in The Graph. ## What is GraphQL? -[GraphQL](https://graphql.org/learn/) is a query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query subgraphs. +[GraphQL](https://graphql.org/learn/) is a query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query Subgraphs. -To understand the larger role that GraphQL plays, review [developing](/subgraphs/developing/introduction/) and [creating a subgraph](/developing/creating-a-subgraph/). +To understand the larger role that GraphQL plays, review [developing](/subgraphs/developing/introduction/) and [creating a Subgraph](/developing/creating-a-subgraph/). ## Queries with GraphQL -In your subgraph schema you define types called `Entities`. For each `Entity` type, `entity` and `entities` fields will be generated on the top-level `Query` type. +In your Subgraph schema you define types called `Entities`. For each `Entity` type, `entity` and `entities` fields will be generated on the top-level `Query` type. > Note: `query` does not need to be included at the top of the `graphql` query when using The Graph. @@ -170,7 +170,7 @@ You can use suffixes like `_gt`, `_lte` for value comparison: You can also filter entities that were updated in or after a specified block with `_change_block(number_gte: Int)`. -This can be useful if you are looking to fetch only entities which have changed, for example since the last time you polled. Or alternatively it can be useful to investigate or debug how entities are changing in your subgraph (if combined with a block filter, you can isolate only entities that changed in a specific block). +This can be useful if you are looking to fetch only entities which have changed, for example since the last time you polled. Or alternatively it can be useful to investigate or debug how entities are changing in your Subgraph (if combined with a block filter, you can isolate only entities that changed in a specific block). ```graphql { @@ -329,18 +329,18 @@ This query will return `Challenge` entities, and their associated `Application` ### Fulltext Search Queries -Fulltext search query fields provide an expressive text search API that can be added to the subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph/#defining-fulltext-search-fields) to add fulltext search to your subgraph. +Fulltext search query fields provide an expressive text search API that can be added to the Subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph/#defining-fulltext-search-fields) to add fulltext search to your Subgraph. Fulltext search queries have one required field, `text`, for supplying search terms. Several special fulltext operators are available to be used in this `text` search field. Fulltext search operators: -| Symbol | Operator | वर्णन | -| --- | --- | --- | -| `&` | `And` | For combining multiple search terms into a filter for entities that include all of the provided terms | -| | | `Or` | Queries with multiple search terms separated by the or operator will return all entities with a match from any of the provided terms | -| `<->` | `Follow by` | Specify the distance between two words. | -| `:*` | `Prefix` | Use the prefix search term to find words whose prefix match (2 characters required.) | +| Symbol | Operator | वर्णन | +| ------ | ----------- | ------------------------------------------------------------------------------------------------------------------------------------ | +| `&` | `And` | For combining multiple search terms into a filter for entities that include all of the provided terms | +| | | `Or` | Queries with multiple search terms separated by the or operator will return all entities with a match from any of the provided terms | +| `<->` | `Follow by` | Specify the distance between two words. | +| `:*` | `Prefix` | Use the prefix search term to find words whose prefix match (2 characters required.) | #### Examples @@ -391,7 +391,7 @@ Graph Node implements [specification-based](https://spec.graphql.org/October2021 The schema of your dataSources, i.e. the entity types, values, and relationships that are available to query, are defined through the [GraphQL Interface Definition Language (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). -GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your subgraph is automatically generated from the GraphQL schema that's included in your [subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). +GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your Subgraph is automatically generated from the GraphQL schema that's included in your [Subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). > Note: Our API does not expose mutations because developers are expected to issue transactions directly against the underlying blockchain from their applications. @@ -403,7 +403,7 @@ All GraphQL types with `@entity` directives in your schema will be treated as en ### Subgraph Metadata -All subgraphs have an auto-generated `_Meta_` object, which provides access to subgraph metadata. This can be queried as follows: +All Subgraphs have an auto-generated `_Meta_` object, which provides access to Subgraph metadata. This can be queried as follows: ```graphQL { @@ -419,7 +419,7 @@ All subgraphs have an auto-generated `_Meta_` object, which provides access to s } ``` -If a block is provided, the metadata is as of that block, if not the latest indexed block is used. If provided, the block must be after the subgraph's start block, and less than or equal to the most recently indexed block. +If a block is provided, the metadata is as of that block, if not the latest indexed block is used. If provided, the block must be after the Subgraph's start block, and less than or equal to the most recently indexed block. `deployment` is a unique ID, corresponding to the IPFS CID of the `subgraph.yaml` file. @@ -427,6 +427,6 @@ If a block is provided, the metadata is as of that block, if not the latest inde - hash: the hash of the block - number: the block number -- timestamp: the timestamp of the block, if available (this is currently only available for subgraphs indexing EVM networks) +- timestamp: the timestamp of the block, if available (this is currently only available for Subgraphs indexing EVM networks) -`hasIndexingErrors` is a boolean identifying whether the subgraph encountered indexing errors at some past block +`hasIndexingErrors` is a boolean identifying whether the Subgraph encountered indexing errors at some past block From 3f73c9c46b5a3a6e7ca447f1875a29bd0cd9eb54 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:12:31 -0500 Subject: [PATCH 0248/1789] New translations graphql-api.mdx (Hindi) --- .../hi/subgraphs/querying/graphql-api.mdx | 34 +++++++++---------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/website/src/pages/hi/subgraphs/querying/graphql-api.mdx b/website/src/pages/hi/subgraphs/querying/graphql-api.mdx index ecfc90819e64..15a8f1fc4f0b 100644 --- a/website/src/pages/hi/subgraphs/querying/graphql-api.mdx +++ b/website/src/pages/hi/subgraphs/querying/graphql-api.mdx @@ -6,13 +6,13 @@ The Graph में उपयोग किए जाने वाले GraphQL ## GraphQL क्या है? -[GraphQL](https://graphql.org/learn/) is a query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query subgraphs. +[GraphQL](https://graphql.org/learn/) is a query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query Subgraphs. -To understand the larger role that GraphQL plays, review [developing](/subgraphs/developing/introduction/) and [creating a subgraph](/developing/creating-a-subgraph/). +To understand the larger role that GraphQL plays, review [developing](/subgraphs/developing/introduction/) and [creating a Subgraph](/developing/creating-a-subgraph/). ## GraphQL के साथ क्वेरीज़ -In your subgraph schema you define types called `Entities`. For each `Entity` type, `entity` and `entities` fields will be generated on the top-level `Query` type. +In your Subgraph schema you define types called `Entities`. For each `Entity` type, `entity` and `entities` fields will be generated on the top-level `Query` type. > Note: `query` does not need to be included at the top of the `graphql` query when using The Graph. @@ -170,7 +170,7 @@ You can use suffixes like `_gt`, `_lte` for value comparison: You can also filter entities that were updated in or after a specified block with `_change_block(number_gte: Int)`. -This can be useful if you are looking to fetch only entities which have changed, for example since the last time you polled. Or alternatively it can be useful to investigate or debug how entities are changing in your subgraph (if combined with a block filter, you can isolate only entities that changed in a specific block). +This can be useful if you are looking to fetch only entities which have changed, for example since the last time you polled. Or alternatively it can be useful to investigate or debug how entities are changing in your Subgraph (if combined with a block filter, you can isolate only entities that changed in a specific block). ```graphql { @@ -329,18 +329,18 @@ This query will return `Challenge` entities, and their associated `Application` ### पूर्ण पाठ खोज प्रश्न -Fulltext search query fields provide an expressive text search API that can be added to the subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph/#defining-fulltext-search-fields) to add fulltext search to your subgraph. +Fulltext search query fields provide an expressive text search API that can be added to the Subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph/#defining-fulltext-search-fields) to add fulltext search to your Subgraph. Fulltext search queries have one required field, `text`, for supplying search terms. Several special fulltext operators are available to be used in this `text` search field. पूर्ण पाठ खोज ऑपरेटर: -| प्रतीक | ऑपरेटर | Description | -| --- | --- | --- | -| `&` | `And` | सभी प्रदान किए गए शब्दों को शामिल करने वाली संस्थाओं के लिए एक से अधिक खोज शब्दों को फ़िल्टर में संयोजित करने के लिए | -| | | `Or` | या ऑपरेटर द्वारा अलग किए गए एकाधिक खोज शब्दों वाली क्वेरी सभी संस्थाओं को प्रदान की गई शर्तों में से किसी से मेल के साथ वापस कर देगी | -| `<->` | `Follow by` | दो शब्दों के बीच की दूरी निर्दिष्ट करें। | -| `:*` | `Prefix` | उन शब्दों को खोजने के लिए उपसर्ग खोज शब्द का उपयोग करें जिनके उपसर्ग मेल खाते हैं (2 वर्ण आवश्यक हैं।) | +| प्रतीक | ऑपरेटर | Description | +| ------ | ----------- | ------------------------------------------------------------------------------------------------------------------------------------ | +| `&` | `And` | सभी प्रदान किए गए शब्दों को शामिल करने वाली संस्थाओं के लिए एक से अधिक खोज शब्दों को फ़िल्टर में संयोजित करने के लिए | +| | | `Or` | या ऑपरेटर द्वारा अलग किए गए एकाधिक खोज शब्दों वाली क्वेरी सभी संस्थाओं को प्रदान की गई शर्तों में से किसी से मेल के साथ वापस कर देगी | +| `<->` | `Follow by` | दो शब्दों के बीच की दूरी निर्दिष्ट करें। | +| `:*` | `Prefix` | उन शब्दों को खोजने के लिए उपसर्ग खोज शब्द का उपयोग करें जिनके उपसर्ग मेल खाते हैं (2 वर्ण आवश्यक हैं।) | #### उदाहरण @@ -391,7 +391,7 @@ Graph Node अपने द्वारा प्राप्त GraphQL क् आपके डेटा स्रोतों का स्कीमा, अर्थात् उपलब्ध प्रश्न करने के लिए संस्थाओं की प्रकार, मान और उनके बीच के संबंध, GraphQL Interface Definition Language (IDL)(https://facebook.github.io/graphql/draft/#sec-Type-System) के माध्यम से परिभाषित किए गए हैं। -GraphQL स्कीमा आम तौर पर queries, subscriptions और mutations के लिए रूट प्रकार परिभाषित करते हैं। The Graph केवल queries का समर्थन करता है। आपके सबग्राफ के लिए रूट Query प्रकार स्वचालित रूप से उस GraphQL स्कीमा से उत्पन्न होता है जो आपके सबग्राफ manifest(/developing/creating-a-subgraph/#components-of-a-subgraph) में शामिल होता है। +GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your Subgraph is automatically generated from the GraphQL schema that's included in your [Subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). > ध्यान दें: हमारा एपीआई म्यूटेशन को उजागर नहीं करता है क्योंकि डेवलपर्स से उम्मीद की जाती है कि वे अपने एप्लिकेशन से अंतर्निहित ब्लॉकचेन के खिलाफ सीधे लेन-देन(transaction) जारी करेंगे। @@ -403,7 +403,7 @@ GraphQL स्कीमा आम तौर पर queries, subscriptions और ### सबग्राफ मेटाडेटा -सभी सबग्राफमें एक स्वचालित रूप से जनरेट किया गया _Meta_ ऑब्जेक्ट होता है, जो Subgraph मेटाडेटा तक पहुँच प्रदान करता है। इसे इस प्रकार क्वेरी किया जा सकता है: +All Subgraphs have an auto-generated `_Meta_` object, which provides access to Subgraph metadata. This can be queried as follows: ```graphQL { @@ -419,14 +419,14 @@ GraphQL स्कीमा आम तौर पर queries, subscriptions और } ``` -If a block is provided, the metadata is as of that block, if not the latest indexed block is used. If provided, the block must be after the subgraph's start block, and less than or equal to the most recently indexed block. +If a block is provided, the metadata is as of that block, if not the latest indexed block is used. If provided, the block must be after the Subgraph's start block, and less than or equal to the most recently indexed block. deployment एक विशिष्ट ID है, जो subgraph.yaml फ़ाइल के IPFS CID के अनुरूप है। -block नवीनतम ब्लॉक के बारे में जानकारी प्रदान करता है (किसी भी ब्लॉक सीमाओं को ध्यान में रखते हुए जो कि \_meta में पास की जाती हैं): +block नवीनतम ब्लॉक के बारे में जानकारी प्रदान करता है (किसी भी ब्लॉक सीमाओं को ध्यान में रखते हुए जो कि _meta में पास की जाती हैं): - हैश: ब्लॉक का हैश - नंबर: ब्लॉक नंबर -- टाइमस्टैम्प: ब्लॉक का टाइमस्टैम्प, यदि उपलब्ध हो (यह वर्तमान में केवल ईवीएम नेटवर्क को इंडेक्स करने वाले सबग्राफ के लिए उपलब्ध है) +- timestamp: the timestamp of the block, if available (this is currently only available for Subgraphs indexing EVM networks) -hasIndexingErrors एक बूलियन है जो यह पहचानता है कि क्या सबग्राफ ने किसी पिछले ब्लॉक पर इंडेक्सिंग त्रुटियों का सामना किया था। +`hasIndexingErrors` is a boolean identifying whether the Subgraph encountered indexing errors at some past block From 8388a681118c9803636d22998d66affc896a2b50 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:12:32 -0500 Subject: [PATCH 0249/1789] New translations graphql-api.mdx (Swahili) --- .../sw/subgraphs/querying/graphql-api.mdx | 432 ++++++++++++++++++ 1 file changed, 432 insertions(+) create mode 100644 website/src/pages/sw/subgraphs/querying/graphql-api.mdx diff --git a/website/src/pages/sw/subgraphs/querying/graphql-api.mdx b/website/src/pages/sw/subgraphs/querying/graphql-api.mdx new file mode 100644 index 000000000000..b82afcfa252c --- /dev/null +++ b/website/src/pages/sw/subgraphs/querying/graphql-api.mdx @@ -0,0 +1,432 @@ +--- +title: GraphQL API +--- + +Learn about the GraphQL Query API used in The Graph. + +## What is GraphQL? + +[GraphQL](https://graphql.org/learn/) is a query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query Subgraphs. + +To understand the larger role that GraphQL plays, review [developing](/subgraphs/developing/introduction/) and [creating a Subgraph](/developing/creating-a-subgraph/). + +## Queries with GraphQL + +In your Subgraph schema you define types called `Entities`. For each `Entity` type, `entity` and `entities` fields will be generated on the top-level `Query` type. + +> Note: `query` does not need to be included at the top of the `graphql` query when using The Graph. + +### Examples + +Query for a single `Token` entity defined in your schema: + +```graphql +{ + token(id: "1") { + id + owner + } +} +``` + +> Note: When querying for a single entity, the `id` field is required, and it must be written as a string. + +Query all `Token` entities: + +```graphql +{ + tokens { + id + owner + } +} +``` + +### Sorting + +When querying a collection, you may: + +- Use the `orderBy` parameter to sort by a specific attribute. +- Use the `orderDirection` to specify the sort direction, `asc` for ascending or `desc` for descending. + +#### Example + +```graphql +{ + tokens(orderBy: price, orderDirection: asc) { + id + owner + } +} +``` + +#### Example for nested entity sorting + +As of Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) entities can be sorted on the basis of nested entities. + +The following example shows tokens sorted by the name of their owner: + +```graphql +{ + tokens(orderBy: owner__name, orderDirection: asc) { + id + owner { + name + } + } +} +``` + +> Currently, you can sort by one-level deep `String` or `ID` types on `@entity` and `@derivedFrom` fields. Unfortunately, [sorting by interfaces on one level-deep entities](https://github.com/graphprotocol/graph-node/pull/4058), sorting by fields which are arrays and nested entities is not yet supported. + +### Pagination + +When querying a collection, it's best to: + +- Use the `first` parameter to paginate from the beginning of the collection. + - The default sort order is by `ID` in ascending alphanumeric order, **not** by creation time. +- Use the `skip` parameter to skip entities and paginate. For instance, `first:100` shows the first 100 entities and `first:100, skip:100` shows the next 100 entities. +- Avoid using `skip` values in queries because they generally perform poorly. To retrieve a large number of items, it's best to page through entities based on an attribute as shown in the previous example above. + +#### Example using `first` + +Query the first 10 tokens: + +```graphql +{ + tokens(first: 10) { + id + owner + } +} +``` + +To query for groups of entities in the middle of a collection, the `skip` parameter may be used in conjunction with the `first` parameter to skip a specified number of entities starting at the beginning of the collection. + +#### Example using `first` and `skip` + +Query 10 `Token` entities, offset by 10 places from the beginning of the collection: + +```graphql +{ + tokens(first: 10, skip: 10) { + id + owner + } +} +``` + +#### Example using `first` and `id_ge` + +If a client needs to retrieve a large number of entities, it's more performant to base queries on an attribute and filter by that attribute. For example, a client could retrieve a large number of tokens using this query: + +```graphql +query manyTokens($lastID: String) { + tokens(first: 1000, where: { id_gt: $lastID }) { + id + owner + } +} +``` + +The first time, it would send the query with `lastID = ""`, and for subsequent requests it would set `lastID` to the `id` attribute of the last entity in the previous request. This approach will perform significantly better than using increasing `skip` values. + +### Filtering + +- You can use the `where` parameter in your queries to filter for different properties. +- You can filter on multiple values within the `where` parameter. + +#### Example using `where` + +Query challenges with `failed` outcome: + +```graphql +{ + challenges(where: { outcome: "failed" }) { + challenger + outcome + application { + id + } + } +} +``` + +You can use suffixes like `_gt`, `_lte` for value comparison: + +#### Example for range filtering + +```graphql +{ + applications(where: { deposit_gt: "10000000000" }) { + id + whitelisted + deposit + } +} +``` + +#### Example for block filtering + +You can also filter entities that were updated in or after a specified block with `_change_block(number_gte: Int)`. + +This can be useful if you are looking to fetch only entities which have changed, for example since the last time you polled. Or alternatively it can be useful to investigate or debug how entities are changing in your Subgraph (if combined with a block filter, you can isolate only entities that changed in a specific block). + +```graphql +{ + applications(where: { _change_block: { number_gte: 100 } }) { + id + whitelisted + deposit + } +} +``` + +#### Example for nested entity filtering + +Filtering on the basis of nested entities is possible in the fields with the `_` suffix. + +This can be useful if you are looking to fetch only entities whose child-level entities meet the provided conditions. + +```graphql +{ + challenges(where: { application_: { id: 1 } }) { + challenger + outcome + application { + id + } + } +} +``` + +#### Logical operators + +As of Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) you can group multiple parameters in the same `where` argument using the `and` or the `or` operators to filter results based on more than one criteria. + +##### `AND` Operator + +The following example filters for challenges with `outcome` `succeeded` and `number` greater than or equal to `100`. + +```graphql +{ + challenges(where: { and: [{ number_gte: 100 }, { outcome: "succeeded" }] }) { + challenger + outcome + application { + id + } + } +} +``` + +> **Syntactic sugar:** You can simplify the above query by removing the `and` operator by passing a sub-expression separated by commas. +> +> ```graphql +> { +> challenges(where: { number_gte: 100, outcome: "succeeded" }) { +> challenger +> outcome +> application { +> id +> } +> } +> } +> ``` + +##### `OR` Operator + +The following example filters for challenges with `outcome` `succeeded` or `number` greater than or equal to `100`. + +```graphql +{ + challenges(where: { or: [{ number_gte: 100 }, { outcome: "succeeded" }] }) { + challenger + outcome + application { + id + } + } +} +``` + +> **Note**: When constructing queries, it is important to consider the performance impact of using the `or` operator. While `or` can be a useful tool for broadening search results, it can also have significant costs. One of the main issues with `or` is that it can cause queries to slow down. This is because `or` requires the database to scan through multiple indexes, which can be a time-consuming process. To avoid these issues, it is recommended that developers use and operators instead of or whenever possible. This allows for more precise filtering and can lead to faster, more accurate queries. + +#### All Filters + +Full list of parameter suffixes: + +``` +_ +_not +_gt +_lt +_gte +_lte +_in +_not_in +_contains +_contains_nocase +_not_contains +_not_contains_nocase +_starts_with +_starts_with_nocase +_ends_with +_ends_with_nocase +_not_starts_with +_not_starts_with_nocase +_not_ends_with +_not_ends_with_nocase +``` + +> Please note that some suffixes are only supported for specific types. For example, `Boolean` only supports `_not`, `_in`, and `_not_in`, but `_` is available only for object and interface types. + +In addition, the following global filters are available as part of `where` argument: + +```graphql +_change_block(number_gte: Int) +``` + +### Time-travel queries + +You can query the state of your entities not just for the latest block, which is the default, but also for an arbitrary block in the past. The block at which a query should happen can be specified either by its block number or its block hash by including a `block` argument in the toplevel fields of queries. + +The result of such a query will not change over time, i.e., querying at a certain past block will return the same result no matter when it is executed, with the exception that if you query at a block very close to the head of the chain, the result might change if that block turns out to **not** be on the main chain and the chain gets reorganized. Once a block can be considered final, the result of the query will not change. + +> Note: The current implementation is still subject to certain limitations that might violate these guarantees. The implementation can not always tell that a given block hash is not on the main chain at all, or if a query result by a block hash for a block that is not yet considered final could be influenced by a block reorganization running concurrently with the query. They do not affect the results of queries by block hash when the block is final and known to be on the main chain. [This issue](https://github.com/graphprotocol/graph-node/issues/1405) explains what these limitations are in detail. + +#### Example + +```graphql +{ + challenges(block: { number: 8000000 }) { + challenger + outcome + application { + id + } + } +} +``` + +This query will return `Challenge` entities, and their associated `Application` entities, as they existed directly after processing block number 8,000,000. + +#### Example + +```graphql +{ + challenges(block: { hash: "0x5a0b54d5dc17e0aadc383d2db43b0a0d3e029c4c" }) { + challenger + outcome + application { + id + } + } +} +``` + +This query will return `Challenge` entities, and their associated `Application` entities, as they existed directly after processing the block with the given hash. + +### Fulltext Search Queries + +Fulltext search query fields provide an expressive text search API that can be added to the Subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph/#defining-fulltext-search-fields) to add fulltext search to your Subgraph. + +Fulltext search queries have one required field, `text`, for supplying search terms. Several special fulltext operators are available to be used in this `text` search field. + +Fulltext search operators: + +| Symbol | Operator | Description | +| ------ | ----------- | ------------------------------------------------------------------------------------------------------------------------------------ | +| `&` | `And` | For combining multiple search terms into a filter for entities that include all of the provided terms | +| | | `Or` | Queries with multiple search terms separated by the or operator will return all entities with a match from any of the provided terms | +| `<->` | `Follow by` | Specify the distance between two words. | +| `:*` | `Prefix` | Use the prefix search term to find words whose prefix match (2 characters required.) | + +#### Examples + +Using the `or` operator, this query will filter to blog entities with variations of either "anarchism" or "crumpet" in their fulltext fields. + +```graphql +{ + blogSearch(text: "anarchism | crumpets") { + id + title + body + author + } +} +``` + +The `follow by` operator specifies a words a specific distance apart in the fulltext documents. The following query will return all blogs with variations of "decentralize" followed by "philosophy" + +```graphql +{ + blogSearch(text: "decentralized <-> philosophy") { + id + title + body + author + } +} +``` + +Combine fulltext operators to make more complex filters. With a pretext search operator combined with a follow by this example query will match all blog entities with words that start with "lou" followed by "music". + +```graphql +{ + blogSearch(text: "lou:* <-> music") { + id + title + body + author + } +} +``` + +### Validation + +Graph Node implements [specification-based](https://spec.graphql.org/October2021/#sec-Validation) validation of the GraphQL queries it receives using [graphql-tools-rs](https://github.com/dotansimha/graphql-tools-rs#validation-rules), which is based on the [graphql-js reference implementation](https://github.com/graphql/graphql-js/tree/main/src/validation). Queries which fail a validation rule do so with a standard error - visit the [GraphQL spec](https://spec.graphql.org/October2021/#sec-Validation) to learn more. + +## Schema + +The schema of your dataSources, i.e. the entity types, values, and relationships that are available to query, are defined through the [GraphQL Interface Definition Language (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). + +GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your Subgraph is automatically generated from the GraphQL schema that's included in your [Subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). + +> Note: Our API does not expose mutations because developers are expected to issue transactions directly against the underlying blockchain from their applications. + +### Entities + +All GraphQL types with `@entity` directives in your schema will be treated as entities and must have an `ID` field. + +> **Note:** Currently, all types in your schema must have an `@entity` directive. In the future, we will treat types without an `@entity` directive as value objects, but this is not yet supported. + +### Subgraph Metadata + +All Subgraphs have an auto-generated `_Meta_` object, which provides access to Subgraph metadata. This can be queried as follows: + +```graphQL +{ + _meta(block: { number: 123987 }) { + block { + number + hash + timestamp + } + deployment + hasIndexingErrors + } +} +``` + +If a block is provided, the metadata is as of that block, if not the latest indexed block is used. If provided, the block must be after the Subgraph's start block, and less than or equal to the most recently indexed block. + +`deployment` is a unique ID, corresponding to the IPFS CID of the `subgraph.yaml` file. + +`block` provides information about the latest block (taking into account any block constraints passed to `_meta`): + +- hash: the hash of the block +- number: the block number +- timestamp: the timestamp of the block, if available (this is currently only available for Subgraphs indexing EVM networks) + +`hasIndexingErrors` is a boolean identifying whether the Subgraph encountered indexing errors at some past block From 42d02dea0eff69147f216b1616762f2f026d35cb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:12:33 -0500 Subject: [PATCH 0250/1789] New translations python.mdx (Romanian) --- website/src/pages/ro/subgraphs/querying/python.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/ro/subgraphs/querying/python.mdx b/website/src/pages/ro/subgraphs/querying/python.mdx index 0937e4f7862d..ed0d078a4175 100644 --- a/website/src/pages/ro/subgraphs/querying/python.mdx +++ b/website/src/pages/ro/subgraphs/querying/python.mdx @@ -3,7 +3,7 @@ title: Query The Graph with Python and Subgrounds sidebarTitle: Python (Subgrounds) --- -Subgrounds is an intuitive Python library for querying subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! +Subgrounds is an intuitive Python library for querying Subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect Subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! Subgrounds offers a simple Pythonic API for building GraphQL queries, automates tedious workflows such as pagination, and empowers advanced users through controlled schema transformations. @@ -17,14 +17,14 @@ pip install --upgrade subgrounds python -m pip install --upgrade subgrounds ``` -Once installed, you can test out subgrounds with the following query. The following example grabs a subgraph for the Aave v2 protocol and queries the top 5 markets ordered by TVL (Total Value Locked), selects their name and their TVL (in USD) and returns the data as a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). +Once installed, you can test out subgrounds with the following query. The following example grabs a Subgraph for the Aave v2 protocol and queries the top 5 markets ordered by TVL (Total Value Locked), selects their name and their TVL (in USD) and returns the data as a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). ```python from subgrounds import Subgrounds sg = Subgrounds() -# Load the subgraph +# Load the Subgraph aave_v2 = sg.load_subgraph( "https://api.thegraph.com/subgraphs/name/messari/aave-v2-ethereum") From c86047c795046b91bcd612eda170612ac7764024 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:12:34 -0500 Subject: [PATCH 0251/1789] New translations python.mdx (French) --- website/src/pages/fr/subgraphs/querying/python.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/fr/subgraphs/querying/python.mdx b/website/src/pages/fr/subgraphs/querying/python.mdx index f8d2b0741c18..3822aadae1ce 100644 --- a/website/src/pages/fr/subgraphs/querying/python.mdx +++ b/website/src/pages/fr/subgraphs/querying/python.mdx @@ -3,7 +3,7 @@ title: Interroger The Graph avec Python et Subgrounds sidebarTitle: Python (Subgrounds) --- -Subgrounds est une librairie Python utilisée pour les requêtes Subgraph. Cette librairie a été conçue par [Playgrounds](https://playgrounds.network/). Subgrounds permet de connecter directement les données d'un Subgraph à un environnement de données Python, permettant l'utilisation de librairies comme [pandas](https://pandas.pydata.org/) afin de faire de l'analyse de données! +Subgrounds is an intuitive Python library for querying Subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect Subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! Subgrounds propose une API Python simplifiée afin de construire des requêtes GraphQL. Subgrounds automatise les workflows fastidieux comme la pagination, et donne aux utilisateurs avancés plus de pouvoir grâce à des transformations de schéma contrôlées. @@ -17,14 +17,14 @@ pip install --upgrade subgrounds python -m pip install --upgrade subgrounds ``` -Une fois installé, vous pouvez tester Subgrounds avec la requête suivante. La requête ci-dessous récupère un Subgraph pour le protocole Aave v2 et interroge les 5 principaux marchés par TVL (Total Value Locked - Valeur Totale Verouillée), sélectionne leur nom et leur TVL (en USD) et renvoie les données sous forme de DataFrame Panda [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). +Once installed, you can test out subgrounds with the following query. The following example grabs a Subgraph for the Aave v2 protocol and queries the top 5 markets ordered by TVL (Total Value Locked), selects their name and their TVL (in USD) and returns the data as a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). ```python from subgrounds import Subgrounds sg = Subgrounds() -# Load the subgraph +# Load the Subgraph aave_v2 = sg.load_subgraph( "https://api.thegraph.com/subgraphs/name/messari/aave-v2-ethereum") @@ -54,4 +54,4 @@ Subgrounds est développé et maintenu par l'équipe de [Playgrounds](https://pl - [Requêtes concurrentes](https://docs.playgrounds.network/subgrounds/getting_started/async/) - Améliorez vos requêtes en les parallélisant. - [Export de données en CSVs](https://docs.playgrounds.network/subgrounds/faq/exporting/) - - A quick article on how to seamlessly save your data as CSVs for further analysis. + - Un article rapide sur la manière d'enregistrer de manière transparente vos données au format CSV en vue d'une analyse ultérieure. From 26216697568eca575f6e903e05ab8b13e5ea0616 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:12:35 -0500 Subject: [PATCH 0252/1789] New translations python.mdx (Spanish) --- website/src/pages/es/subgraphs/querying/python.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/es/subgraphs/querying/python.mdx b/website/src/pages/es/subgraphs/querying/python.mdx index d51fd5deb007..4f2ad9280b58 100644 --- a/website/src/pages/es/subgraphs/querying/python.mdx +++ b/website/src/pages/es/subgraphs/querying/python.mdx @@ -3,7 +3,7 @@ title: Query The Graph with Python and Subgrounds sidebarTitle: Python (Subgrounds) --- -Subgrounds is an intuitive Python library for querying subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! +Subgrounds is an intuitive Python library for querying Subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect Subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! Subgrounds offers a simple Pythonic API for building GraphQL queries, automates tedious workflows such as pagination, and empowers advanced users through controlled schema transformations. @@ -17,14 +17,14 @@ pip install --upgrade subgrounds python -m pip install --upgrade subgrounds ``` -Once installed, you can test out subgrounds with the following query. The following example grabs a subgraph for the Aave v2 protocol and queries the top 5 markets ordered by TVL (Total Value Locked), selects their name and their TVL (in USD) and returns the data as a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). +Once installed, you can test out subgrounds with the following query. The following example grabs a Subgraph for the Aave v2 protocol and queries the top 5 markets ordered by TVL (Total Value Locked), selects their name and their TVL (in USD) and returns the data as a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). ```python from subgrounds import Subgrounds sg = Subgrounds() -# Load the subgraph +# Load the Subgraph aave_v2 = sg.load_subgraph( "https://api.thegraph.com/subgraphs/name/messari/aave-v2-ethereum") From 6609f7d17c905b755b4c224b94484ef0bba6f678 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:12:36 -0500 Subject: [PATCH 0253/1789] New translations python.mdx (Arabic) --- website/src/pages/ar/subgraphs/querying/python.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/ar/subgraphs/querying/python.mdx b/website/src/pages/ar/subgraphs/querying/python.mdx index 0937e4f7862d..ed0d078a4175 100644 --- a/website/src/pages/ar/subgraphs/querying/python.mdx +++ b/website/src/pages/ar/subgraphs/querying/python.mdx @@ -3,7 +3,7 @@ title: Query The Graph with Python and Subgrounds sidebarTitle: Python (Subgrounds) --- -Subgrounds is an intuitive Python library for querying subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! +Subgrounds is an intuitive Python library for querying Subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect Subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! Subgrounds offers a simple Pythonic API for building GraphQL queries, automates tedious workflows such as pagination, and empowers advanced users through controlled schema transformations. @@ -17,14 +17,14 @@ pip install --upgrade subgrounds python -m pip install --upgrade subgrounds ``` -Once installed, you can test out subgrounds with the following query. The following example grabs a subgraph for the Aave v2 protocol and queries the top 5 markets ordered by TVL (Total Value Locked), selects their name and their TVL (in USD) and returns the data as a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). +Once installed, you can test out subgrounds with the following query. The following example grabs a Subgraph for the Aave v2 protocol and queries the top 5 markets ordered by TVL (Total Value Locked), selects their name and their TVL (in USD) and returns the data as a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). ```python from subgrounds import Subgrounds sg = Subgrounds() -# Load the subgraph +# Load the Subgraph aave_v2 = sg.load_subgraph( "https://api.thegraph.com/subgraphs/name/messari/aave-v2-ethereum") From b29d0f6676da38c3a2bc0d5e4488b2e927dd5799 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:12:37 -0500 Subject: [PATCH 0254/1789] New translations python.mdx (Czech) --- website/src/pages/cs/subgraphs/querying/python.mdx | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/website/src/pages/cs/subgraphs/querying/python.mdx b/website/src/pages/cs/subgraphs/querying/python.mdx index 669e95c19183..51e3b966a2b5 100644 --- a/website/src/pages/cs/subgraphs/querying/python.mdx +++ b/website/src/pages/cs/subgraphs/querying/python.mdx @@ -3,7 +3,7 @@ title: Query The Graph with Python and Subgrounds sidebarTitle: Python (Subgrounds) --- -Subgrounds je intuitivní knihovna Pythonu pro dotazování na podgrafy, vytvořená [Playgrounds](https://playgrounds.network/). Umožňuje přímo připojit data subgrafů k datovému prostředí Pythonu, což vám umožní používat knihovny jako [pandas](https://pandas.pydata.org/) k provádění analýzy dat! +Subgrounds is an intuitive Python library for querying Subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect Subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! Subgrounds nabízí jednoduché Pythonic API pro vytváření dotazů GraphQL, automatizuje zdlouhavé pracovní postupy, jako je stránkování, a umožňuje pokročilým uživatelům řízené transformace schémat. @@ -17,24 +17,24 @@ pip install --upgrade subgrounds python -m pip install --upgrade subgrounds ``` -Po instalaci můžete vyzkoušet podklady pomocí následujícího dotazu. Následující příklad uchopí podgraf pro protokol Aave v2 a dotazuje se na 5 největších trhů seřazených podle TVL (Total Value Locked), vybere jejich název a jejich TVL (v USD) a vrátí data jako pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). +Once installed, you can test out subgrounds with the following query. The following example grabs a Subgraph for the Aave v2 protocol and queries the top 5 markets ordered by TVL (Total Value Locked), selects their name and their TVL (in USD) and returns the data as a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). ```python from subgrounds import Subgrounds sg = Subgrounds() -# Načtení podgrafu +# Load the Subgraph aave_v2 = sg.load_subgraph( "https://api.thegraph.com/subgraphs/name/messari/aave-v2-ethereum") -# Sestavte dotaz +# Construct the query latest_markets = aave_v2.Query.markets( orderBy=aave_v2.Market.totalValueLockedUSD, - orderDirection="desc", + orderDirection='desc', first=5, ) -# Vrátit dotaz do datového rámce +# Return query to a dataframe sg.query_df([ latest_markets.name, latest_markets.totalValueLockedUSD, From 4c5b2f4b9eda00ec7e0045474bb88af9d3618a0d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:12:38 -0500 Subject: [PATCH 0255/1789] New translations python.mdx (German) --- website/src/pages/de/subgraphs/querying/python.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/de/subgraphs/querying/python.mdx b/website/src/pages/de/subgraphs/querying/python.mdx index a6640d513d6e..716b10005d26 100644 --- a/website/src/pages/de/subgraphs/querying/python.mdx +++ b/website/src/pages/de/subgraphs/querying/python.mdx @@ -3,7 +3,7 @@ title: Query The Graph with Python and Subgrounds sidebarTitle: Python (Subgrounds) --- -Subgrounds is an intuitive Python library for querying subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! +Subgrounds is an intuitive Python library for querying Subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect Subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! Subgrounds offers a simple Pythonic API for building GraphQL queries, automates tedious workflows such as pagination, and empowers advanced users through controlled schema transformations. @@ -17,14 +17,14 @@ pip install --upgrade subgrounds python -m pip install --upgrade subgrounds ``` -Once installed, you can test out subgrounds with the following query. The following example grabs a subgraph for the Aave v2 protocol and queries the top 5 markets ordered by TVL (Total Value Locked), selects their name and their TVL (in USD) and returns the data as a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). +Once installed, you can test out subgrounds with the following query. The following example grabs a Subgraph for the Aave v2 protocol and queries the top 5 markets ordered by TVL (Total Value Locked), selects their name and their TVL (in USD) and returns the data as a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). ```python from subgrounds import Subgrounds sg = Subgrounds() -# Load the subgraph +# Load the Subgraph aave_v2 = sg.load_subgraph( "https://api.thegraph.com/subgraphs/name/messari/aave-v2-ethereum") From bb14b72d1cdd2ca006f6ad55df140116ab27146e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:12:39 -0500 Subject: [PATCH 0256/1789] New translations python.mdx (Italian) --- website/src/pages/it/subgraphs/querying/python.mdx | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/website/src/pages/it/subgraphs/querying/python.mdx b/website/src/pages/it/subgraphs/querying/python.mdx index 55cae50be8a9..c289ab7ea6b0 100644 --- a/website/src/pages/it/subgraphs/querying/python.mdx +++ b/website/src/pages/it/subgraphs/querying/python.mdx @@ -3,7 +3,7 @@ title: Query The Graph with Python and Subgrounds sidebarTitle: Python (Subgrounds) --- -Subgrounds è una libreria Python intuitiva per query dei subgraph, realizzata da [Playgrounds](https://playgrounds.network/). Permette di collegare direttamente i dati dei subgraph a un ambiente dati Python, consentendo di utilizzare librerie come [pandas](https://pandas.pydata.org/) per eseguire analisi dei dati! +Subgrounds is an intuitive Python library for querying Subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect Subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! Subgrounds offre una semplice API Pythonic per la creazione di query GraphQL, automatizza i flussi di lavoro più noiosi come la paginazione, e dà agli utenti avanzati la possibilità di effettuare trasformazioni controllate dello schema. @@ -17,24 +17,24 @@ pip install --upgrade subgrounds python -m pip install --upgrade subgrounds ``` -Una volta installato, è possibile testare subgrounds con la seguente query. L'esempio seguente prende un subgraph per il protocollo Aave v2 e effettua query dei primi 5 mercati ordinati per TVL (Total Value Locked), seleziona il loro nome e il loro TVL (in USD) e restituisce i dati come pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). +Once installed, you can test out subgrounds with the following query. The following example grabs a Subgraph for the Aave v2 protocol and queries the top 5 markets ordered by TVL (Total Value Locked), selects their name and their TVL (in USD) and returns the data as a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). ```python -da subgrounds import Subgrounds +from subgrounds import Subgrounds sg = Subgrounds() -# Caricare il subgraph +# Load the Subgraph aave_v2 = sg.load_subgraph( "https://api.thegraph.com/subgraphs/name/messari/aave-v2-ethereum") -# Costruire la query +# Construct the query latest_markets = aave_v2.Query.markets( orderBy=aave_v2.Market.totalValueLockedUSD, orderDirection='desc', first=5, ) -# Restituire la query a un dataframe +# Return query to a dataframe sg.query_df([ latest_markets.name, latest_markets.totalValueLockedUSD, From 5d605862e6ebf38795b42adf420672b09f57344c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:12:39 -0500 Subject: [PATCH 0257/1789] New translations python.mdx (Japanese) --- .../pages/ja/subgraphs/querying/python.mdx | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/website/src/pages/ja/subgraphs/querying/python.mdx b/website/src/pages/ja/subgraphs/querying/python.mdx index 4a42ae3275b4..cae61f4b49e0 100644 --- a/website/src/pages/ja/subgraphs/querying/python.mdx +++ b/website/src/pages/ja/subgraphs/querying/python.mdx @@ -3,7 +3,7 @@ title: Query The Graph with Python and Subgrounds sidebarTitle: Python (Subgrounds) --- -Subgroundsは、[Playgrounds](https://playgrounds.network/)によって構築された、サブグラフをクエリするための直感的なPythonライブラリです。サブグラフデータを直接Pythonデータ環境に接続し、[pandas](https://pandas.pydata.org/)のようなライブラリを使用してデータ分析を行うことができます! +Subgrounds is an intuitive Python library for querying Subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect Subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! Subgroundsは、GraphQLクエリを構築するためのシンプルなPythonic APIを提供し、ページ分割のような面倒なワークフローを自動化し、制御されたスキーマ変換によって高度なユーザーを支援します。 @@ -17,27 +17,27 @@ pip install --upgrade subgrounds python -m pip install --upgrade subgrounds ``` -インストールしたら、以下のクエリでsubgroundsを試すことができる。以下の例では、Aave v2 プロトコルのサブグラフを取得し、TVL (Total Value Locked) 順に並べられた上位 5 つの市場をクエリし、その名前と TVL (USD) を選択し、pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame) としてデータを返します。 +Once installed, you can test out subgrounds with the following query. The following example grabs a Subgraph for the Aave v2 protocol and queries the top 5 markets ordered by TVL (Total Value Locked), selects their name and their TVL (in USD) and returns the data as a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). ```python from subgrounds import Subgrounds sg = Subgrounds() -# サブグラフを読み込む +# Load the Subgraph aave_v2 = sg.load_subgraph( "https://api.thegraph.com/subgraphs/name/messari/aave-v2-ethereum") -# クエリの構築 +# Construct the query latest_markets = aave_v2.Query.markets( - orderBy=aave_v2.Market.totalValueLockedUSD、 - orderDirection='desc'、 - first=5、 + orderBy=aave_v2.Market.totalValueLockedUSD, + orderDirection='desc', + first=5, ) -# クエリをデータフレームに戻す +# Return query to a dataframe sg.query_df([ - latest_markets.name、 - latest_markets.totalValueLockedUSD、 + latest_markets.name, + latest_markets.totalValueLockedUSD, ]) ``` From 586ba599704f72d749e5ea366b978731a61c23c9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:12:40 -0500 Subject: [PATCH 0258/1789] New translations python.mdx (Korean) --- website/src/pages/ko/subgraphs/querying/python.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/ko/subgraphs/querying/python.mdx b/website/src/pages/ko/subgraphs/querying/python.mdx index 0937e4f7862d..ed0d078a4175 100644 --- a/website/src/pages/ko/subgraphs/querying/python.mdx +++ b/website/src/pages/ko/subgraphs/querying/python.mdx @@ -3,7 +3,7 @@ title: Query The Graph with Python and Subgrounds sidebarTitle: Python (Subgrounds) --- -Subgrounds is an intuitive Python library for querying subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! +Subgrounds is an intuitive Python library for querying Subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect Subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! Subgrounds offers a simple Pythonic API for building GraphQL queries, automates tedious workflows such as pagination, and empowers advanced users through controlled schema transformations. @@ -17,14 +17,14 @@ pip install --upgrade subgrounds python -m pip install --upgrade subgrounds ``` -Once installed, you can test out subgrounds with the following query. The following example grabs a subgraph for the Aave v2 protocol and queries the top 5 markets ordered by TVL (Total Value Locked), selects their name and their TVL (in USD) and returns the data as a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). +Once installed, you can test out subgrounds with the following query. The following example grabs a Subgraph for the Aave v2 protocol and queries the top 5 markets ordered by TVL (Total Value Locked), selects their name and their TVL (in USD) and returns the data as a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). ```python from subgrounds import Subgrounds sg = Subgrounds() -# Load the subgraph +# Load the Subgraph aave_v2 = sg.load_subgraph( "https://api.thegraph.com/subgraphs/name/messari/aave-v2-ethereum") From 04d8dc685353167bd7e8fb593b298be3a7e90501 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:12:41 -0500 Subject: [PATCH 0259/1789] New translations python.mdx (Dutch) --- website/src/pages/nl/subgraphs/querying/python.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/nl/subgraphs/querying/python.mdx b/website/src/pages/nl/subgraphs/querying/python.mdx index 0937e4f7862d..ed0d078a4175 100644 --- a/website/src/pages/nl/subgraphs/querying/python.mdx +++ b/website/src/pages/nl/subgraphs/querying/python.mdx @@ -3,7 +3,7 @@ title: Query The Graph with Python and Subgrounds sidebarTitle: Python (Subgrounds) --- -Subgrounds is an intuitive Python library for querying subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! +Subgrounds is an intuitive Python library for querying Subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect Subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! Subgrounds offers a simple Pythonic API for building GraphQL queries, automates tedious workflows such as pagination, and empowers advanced users through controlled schema transformations. @@ -17,14 +17,14 @@ pip install --upgrade subgrounds python -m pip install --upgrade subgrounds ``` -Once installed, you can test out subgrounds with the following query. The following example grabs a subgraph for the Aave v2 protocol and queries the top 5 markets ordered by TVL (Total Value Locked), selects their name and their TVL (in USD) and returns the data as a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). +Once installed, you can test out subgrounds with the following query. The following example grabs a Subgraph for the Aave v2 protocol and queries the top 5 markets ordered by TVL (Total Value Locked), selects their name and their TVL (in USD) and returns the data as a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). ```python from subgrounds import Subgrounds sg = Subgrounds() -# Load the subgraph +# Load the Subgraph aave_v2 = sg.load_subgraph( "https://api.thegraph.com/subgraphs/name/messari/aave-v2-ethereum") From b3e7e1e4e58c9e44c5b76eb7fc548fc0481603d9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:12:42 -0500 Subject: [PATCH 0260/1789] New translations python.mdx (Polish) --- website/src/pages/pl/subgraphs/querying/python.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/pl/subgraphs/querying/python.mdx b/website/src/pages/pl/subgraphs/querying/python.mdx index 0937e4f7862d..ed0d078a4175 100644 --- a/website/src/pages/pl/subgraphs/querying/python.mdx +++ b/website/src/pages/pl/subgraphs/querying/python.mdx @@ -3,7 +3,7 @@ title: Query The Graph with Python and Subgrounds sidebarTitle: Python (Subgrounds) --- -Subgrounds is an intuitive Python library for querying subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! +Subgrounds is an intuitive Python library for querying Subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect Subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! Subgrounds offers a simple Pythonic API for building GraphQL queries, automates tedious workflows such as pagination, and empowers advanced users through controlled schema transformations. @@ -17,14 +17,14 @@ pip install --upgrade subgrounds python -m pip install --upgrade subgrounds ``` -Once installed, you can test out subgrounds with the following query. The following example grabs a subgraph for the Aave v2 protocol and queries the top 5 markets ordered by TVL (Total Value Locked), selects their name and their TVL (in USD) and returns the data as a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). +Once installed, you can test out subgrounds with the following query. The following example grabs a Subgraph for the Aave v2 protocol and queries the top 5 markets ordered by TVL (Total Value Locked), selects their name and their TVL (in USD) and returns the data as a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). ```python from subgrounds import Subgrounds sg = Subgrounds() -# Load the subgraph +# Load the Subgraph aave_v2 = sg.load_subgraph( "https://api.thegraph.com/subgraphs/name/messari/aave-v2-ethereum") From 6e22bd19b8d8e04ce00c82a9558ada759806b13e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:12:43 -0500 Subject: [PATCH 0261/1789] New translations python.mdx (Portuguese) --- website/src/pages/pt/subgraphs/querying/python.mdx | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/website/src/pages/pt/subgraphs/querying/python.mdx b/website/src/pages/pt/subgraphs/querying/python.mdx index ced5c995611e..2e0eb17b4379 100644 --- a/website/src/pages/pt/subgraphs/querying/python.mdx +++ b/website/src/pages/pt/subgraphs/querying/python.mdx @@ -3,7 +3,7 @@ title: Queries no The Graph com Python e Subgrounds sidebarTitle: Python (Subgrounds) --- -Subgrounds é uma biblioteca intuitiva em Python para a construção de subgraphs, construída pela [Playgrounds](https://playgrounds.network/). Ela permite-lhe conectar diretamente dados de subgraph a um ambiente de dados em Python e usar bibliotecas como [pandas](https://pandas.pydata.org/) para realizar análises de dados! +Subgrounds is an intuitive Python library for querying Subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect Subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! O Subgrounds oferece uma API simples para a construção de queries em GraphQL, automatiza fluxos de trabalho entediantes, como a paginação, e empodera utilizadores avançados via transformações controladas de schema. @@ -17,24 +17,24 @@ pip install --upgrade subgrounds python -m pip install --upgrade subgrounds ``` -Quando instalado, você pode testar o subgrounds com o seguinte query. O exemplo a seguir coleta um subgraph do protocolo Aave v2 e realiza queries para os 5 maiores mercados ordenados por valor total bloqueado, seleciona os seus nomes e o seu VTB (em USD) e retorna os dados como um [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame) em pandas. +Once installed, you can test out subgrounds with the following query. The following example grabs a Subgraph for the Aave v2 protocol and queries the top 5 markets ordered by TVL (Total Value Locked), selects their name and their TVL (in USD) and returns the data as a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). ```python from subgrounds import Subgrounds sg = Subgrounds() -# Carrega o subgraph +# Load the Subgraph aave_v2 = sg.load_subgraph( "https://api.thegraph.com/subgraphs/name/messari/aave-v2-ethereum") -# Constrói o query +# Construct the query latest_markets = aave_v2.Query.markets( orderBy=aave_v2.Market.totalValueLockedUSD, orderDirection='desc', first=5, ) -# Retorna o query a um quadro de dados +# Return query to a dataframe sg.query_df([ latest_markets.name, latest_markets.totalValueLockedUSD, From 640e1780fea9da828ee6a33b7379f134a2d00082 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:12:44 -0500 Subject: [PATCH 0262/1789] New translations python.mdx (Russian) --- website/src/pages/ru/subgraphs/querying/python.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/ru/subgraphs/querying/python.mdx b/website/src/pages/ru/subgraphs/querying/python.mdx index b450ba9276de..70dc87b4850e 100644 --- a/website/src/pages/ru/subgraphs/querying/python.mdx +++ b/website/src/pages/ru/subgraphs/querying/python.mdx @@ -3,7 +3,7 @@ title: Query The Graph with Python and Subgrounds sidebarTitle: Python (Subgrounds) --- -Subgrounds is an intuitive Python library for querying subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! +Subgrounds is an intuitive Python library for querying Subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect Subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! Subgrounds offers a simple Pythonic API for building GraphQL queries, automates tedious workflows such as pagination, and empowers advanced users through controlled schema transformations. @@ -17,14 +17,14 @@ pip install --upgrade subgrounds python -m pip install --upgrade subgrounds ``` -Once installed, you can test out subgrounds with the following query. The following example grabs a subgraph for the Aave v2 protocol and queries the top 5 markets ordered by TVL (Total Value Locked), selects their name and their TVL (in USD) and returns the data as a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). +Once installed, you can test out subgrounds with the following query. The following example grabs a Subgraph for the Aave v2 protocol and queries the top 5 markets ordered by TVL (Total Value Locked), selects their name and their TVL (in USD) and returns the data as a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). ```python from subgrounds import Subgrounds sg = Subgrounds() -# Load the subgraph +# Load the Subgraph aave_v2 = sg.load_subgraph( "https://api.thegraph.com/subgraphs/name/messari/aave-v2-ethereum") From 3301963655bd0b9b1899d548168dd152eb08eec9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:12:45 -0500 Subject: [PATCH 0263/1789] New translations python.mdx (Swedish) --- website/src/pages/sv/subgraphs/querying/python.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/sv/subgraphs/querying/python.mdx b/website/src/pages/sv/subgraphs/querying/python.mdx index 213b45f144b3..3a987546c454 100644 --- a/website/src/pages/sv/subgraphs/querying/python.mdx +++ b/website/src/pages/sv/subgraphs/querying/python.mdx @@ -3,7 +3,7 @@ title: Query The Graph with Python and Subgrounds sidebarTitle: Python (Subgrounds) --- -Subgrounds is an intuitive Python library for querying subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! +Subgrounds is an intuitive Python library for querying Subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect Subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! Subgrounds offers a simple Pythonic API for building GraphQL queries, automates tedious workflows such as pagination, and empowers advanced users through controlled schema transformations. @@ -17,14 +17,14 @@ pip install --upgrade subgrounds python -m pip install --upgrade subgrounds ``` -Once installed, you can test out subgrounds with the following query. The following example grabs a subgraph for the Aave v2 protocol and queries the top 5 markets ordered by TVL (Total Value Locked), selects their name and their TVL (in USD) and returns the data as a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). +Once installed, you can test out subgrounds with the following query. The following example grabs a Subgraph for the Aave v2 protocol and queries the top 5 markets ordered by TVL (Total Value Locked), selects their name and their TVL (in USD) and returns the data as a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). ```python from subgrounds import Subgrounds sg = Subgrounds() -# Load the subgraph +# Load the Subgraph aave_v2 = sg.load_subgraph( "https://api.thegraph.com/subgraphs/name/messari/aave-v2-ethereum") From 2feb86348a22a8e2a3f8a6c7d9798642ead23653 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:12:46 -0500 Subject: [PATCH 0264/1789] New translations python.mdx (Turkish) --- website/src/pages/tr/subgraphs/querying/python.mdx | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/website/src/pages/tr/subgraphs/querying/python.mdx b/website/src/pages/tr/subgraphs/querying/python.mdx index dc82e0010623..25c22bea8534 100644 --- a/website/src/pages/tr/subgraphs/querying/python.mdx +++ b/website/src/pages/tr/subgraphs/querying/python.mdx @@ -3,7 +3,7 @@ title: Query The Graph with Python and Subgrounds sidebarTitle: Python (Subgrounds) --- -Subgrounds, [Playgrounds](https://playgrounds.network/) tarafından oluşturulmuş, subgraph sorgulamak için kullanılan sezgisel bir Python kütüphanesidir. Bu kütüphane, subgraph verilerini doğrudan bir Python veri ortamına bağlamanıza olanak tanır ve [pandas](https://pandas.pydata.org/) gibi kütüphaneleri kullanarak veri analizi yapmanıza imkan sağlar! +Subgrounds is an intuitive Python library for querying Subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect Subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! Subgrounds, GraphQL sorguları oluşturmak için sayfalandırma gibi sıkıcı iş akışlarını otomatikleştiren ve kontrollü şema dönüşümleri aracılığıyla ileri düzey kullanıcıları güçlendiren basit bir Pythonic API sunar. @@ -17,24 +17,24 @@ pip install --upgrade subgrounds python -m pip install --upgrade subgrounds ``` -Kurulum tamamlandıktan sonra, aşağıdaki sorgu ile subgrounds'ı test edebilirsiniz. Aşağıdaki örnek, Aave v2 protokolü için bir subgraph çeker ve TVL'ye (Toplam Kilitli Varlık) göre sıralanan en üst 5 pazarı sorgular, adlarını ve TVL'lerini (USD cinsinden) seçer ve verileri bir pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame) olarak döndürür. +Once installed, you can test out subgrounds with the following query. The following example grabs a Subgraph for the Aave v2 protocol and queries the top 5 markets ordered by TVL (Total Value Locked), selects their name and their TVL (in USD) and returns the data as a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). ```python from subgrounds import Subgrounds sg = Subgrounds() -# Subgraph'ı yükleme +# Load the Subgraph aave_v2 = sg.load_subgraph( "https://api.thegraph.com/subgraphs/name/messari/aave-v2-ethereum") -# Sorguyu oluşturma +# Construct the query latest_markets = aave_v2.Query.markets( orderBy=aave_v2.Market.totalValueLockedUSD, orderDirection='desc', first=5, ) -# Sorguyu bir veri çerçevesine döndürme +# Return query to a dataframe sg.query_df([ latest_markets.name, latest_markets.totalValueLockedUSD, From d77672fbc453fda1ca2fd3341665fa5287bbac0f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:12:47 -0500 Subject: [PATCH 0265/1789] New translations python.mdx (Ukrainian) --- website/src/pages/uk/subgraphs/querying/python.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/uk/subgraphs/querying/python.mdx b/website/src/pages/uk/subgraphs/querying/python.mdx index 0937e4f7862d..ed0d078a4175 100644 --- a/website/src/pages/uk/subgraphs/querying/python.mdx +++ b/website/src/pages/uk/subgraphs/querying/python.mdx @@ -3,7 +3,7 @@ title: Query The Graph with Python and Subgrounds sidebarTitle: Python (Subgrounds) --- -Subgrounds is an intuitive Python library for querying subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! +Subgrounds is an intuitive Python library for querying Subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect Subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! Subgrounds offers a simple Pythonic API for building GraphQL queries, automates tedious workflows such as pagination, and empowers advanced users through controlled schema transformations. @@ -17,14 +17,14 @@ pip install --upgrade subgrounds python -m pip install --upgrade subgrounds ``` -Once installed, you can test out subgrounds with the following query. The following example grabs a subgraph for the Aave v2 protocol and queries the top 5 markets ordered by TVL (Total Value Locked), selects their name and their TVL (in USD) and returns the data as a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). +Once installed, you can test out subgrounds with the following query. The following example grabs a Subgraph for the Aave v2 protocol and queries the top 5 markets ordered by TVL (Total Value Locked), selects their name and their TVL (in USD) and returns the data as a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). ```python from subgrounds import Subgrounds sg = Subgrounds() -# Load the subgraph +# Load the Subgraph aave_v2 = sg.load_subgraph( "https://api.thegraph.com/subgraphs/name/messari/aave-v2-ethereum") From ff7bc1be8218817c539db397f38e918f3d640e0d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:12:48 -0500 Subject: [PATCH 0266/1789] New translations python.mdx (Chinese Simplified) --- website/src/pages/zh/subgraphs/querying/python.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/zh/subgraphs/querying/python.mdx b/website/src/pages/zh/subgraphs/querying/python.mdx index a1372fbf300d..3efd363546d9 100644 --- a/website/src/pages/zh/subgraphs/querying/python.mdx +++ b/website/src/pages/zh/subgraphs/querying/python.mdx @@ -3,7 +3,7 @@ title: Query The Graph with Python and Subgrounds sidebarTitle: Python (Subgrounds) --- -Subgrounds is an intuitive Python library for querying subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! +Subgrounds is an intuitive Python library for querying Subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect Subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! Subgrounds offers a simple Pythonic API for building GraphQL queries, automates tedious workflows such as pagination, and empowers advanced users through controlled schema transformations. @@ -17,14 +17,14 @@ pip install --upgrade subgrounds python -m pip install --upgrade subgrounds ``` -Once installed, you can test out subgrounds with the following query. The following example grabs a subgraph for the Aave v2 protocol and queries the top 5 markets ordered by TVL (Total Value Locked), selects their name and their TVL (in USD) and returns the data as a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). +Once installed, you can test out subgrounds with the following query. The following example grabs a Subgraph for the Aave v2 protocol and queries the top 5 markets ordered by TVL (Total Value Locked), selects their name and their TVL (in USD) and returns the data as a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). ```python from subgrounds import Subgrounds sg = Subgrounds() -# Load the subgraph +# Load the Subgraph aave_v2 = sg.load_subgraph( "https://api.thegraph.com/subgraphs/name/messari/aave-v2-ethereum") From f726006ba5c3eebfcdd1858041b4171442e204f9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:12:49 -0500 Subject: [PATCH 0267/1789] New translations python.mdx (Urdu (Pakistan)) --- website/src/pages/ur/subgraphs/querying/python.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/ur/subgraphs/querying/python.mdx b/website/src/pages/ur/subgraphs/querying/python.mdx index b5abcce57b6d..2f9e2327b65e 100644 --- a/website/src/pages/ur/subgraphs/querying/python.mdx +++ b/website/src/pages/ur/subgraphs/querying/python.mdx @@ -3,7 +3,7 @@ title: Query The Graph with Python and Subgrounds sidebarTitle: Python (Subgrounds) --- -Subgrounds is an intuitive Python library for querying subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! +Subgrounds is an intuitive Python library for querying Subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect Subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! Subgrounds offers a simple Pythonic API for building GraphQL queries, automates tedious workflows such as pagination, and empowers advanced users through controlled schema transformations. @@ -17,14 +17,14 @@ pip install --upgrade subgrounds python -m pip install --upgrade subgrounds ``` -Once installed, you can test out subgrounds with the following query. The following example grabs a subgraph for the Aave v2 protocol and queries the top 5 markets ordered by TVL (Total Value Locked), selects their name and their TVL (in USD) and returns the data as a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). +Once installed, you can test out subgrounds with the following query. The following example grabs a Subgraph for the Aave v2 protocol and queries the top 5 markets ordered by TVL (Total Value Locked), selects their name and their TVL (in USD) and returns the data as a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). ```python from subgrounds import Subgrounds sg = Subgrounds() -# Load the subgraph +# Load the Subgraph aave_v2 = sg.load_subgraph( "https://api.thegraph.com/subgraphs/name/messari/aave-v2-ethereum") From f3b5d8fdd28a93e8708652fc0f4a091db9ceb9a0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:12:50 -0500 Subject: [PATCH 0268/1789] New translations python.mdx (Vietnamese) --- website/src/pages/vi/subgraphs/querying/python.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/vi/subgraphs/querying/python.mdx b/website/src/pages/vi/subgraphs/querying/python.mdx index 0937e4f7862d..ed0d078a4175 100644 --- a/website/src/pages/vi/subgraphs/querying/python.mdx +++ b/website/src/pages/vi/subgraphs/querying/python.mdx @@ -3,7 +3,7 @@ title: Query The Graph with Python and Subgrounds sidebarTitle: Python (Subgrounds) --- -Subgrounds is an intuitive Python library for querying subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! +Subgrounds is an intuitive Python library for querying Subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect Subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! Subgrounds offers a simple Pythonic API for building GraphQL queries, automates tedious workflows such as pagination, and empowers advanced users through controlled schema transformations. @@ -17,14 +17,14 @@ pip install --upgrade subgrounds python -m pip install --upgrade subgrounds ``` -Once installed, you can test out subgrounds with the following query. The following example grabs a subgraph for the Aave v2 protocol and queries the top 5 markets ordered by TVL (Total Value Locked), selects their name and their TVL (in USD) and returns the data as a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). +Once installed, you can test out subgrounds with the following query. The following example grabs a Subgraph for the Aave v2 protocol and queries the top 5 markets ordered by TVL (Total Value Locked), selects their name and their TVL (in USD) and returns the data as a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). ```python from subgrounds import Subgrounds sg = Subgrounds() -# Load the subgraph +# Load the Subgraph aave_v2 = sg.load_subgraph( "https://api.thegraph.com/subgraphs/name/messari/aave-v2-ethereum") From b8120f453ecb1c6dbbf21f8fbd5e14ca81a3a512 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:12:51 -0500 Subject: [PATCH 0269/1789] New translations python.mdx (Marathi) --- website/src/pages/mr/subgraphs/querying/python.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/mr/subgraphs/querying/python.mdx b/website/src/pages/mr/subgraphs/querying/python.mdx index 020814827402..bfeabae0b868 100644 --- a/website/src/pages/mr/subgraphs/querying/python.mdx +++ b/website/src/pages/mr/subgraphs/querying/python.mdx @@ -3,7 +3,7 @@ title: Query The Graph with Python and Subgrounds sidebarTitle: Python (Subgrounds) --- -Subgrounds is an intuitive Python library for querying subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! +Subgrounds is an intuitive Python library for querying Subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect Subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! Subgrounds offers a simple Pythonic API for building GraphQL queries, automates tedious workflows such as pagination, and empowers advanced users through controlled schema transformations. @@ -17,14 +17,14 @@ pip install --upgrade subgrounds python -m pip install --upgrade subgrounds ``` -Once installed, you can test out subgrounds with the following query. The following example grabs a subgraph for the Aave v2 protocol and queries the top 5 markets ordered by TVL (Total Value Locked), selects their name and their TVL (in USD) and returns the data as a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). +Once installed, you can test out subgrounds with the following query. The following example grabs a Subgraph for the Aave v2 protocol and queries the top 5 markets ordered by TVL (Total Value Locked), selects their name and their TVL (in USD) and returns the data as a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). ```python from subgrounds import Subgrounds sg = Subgrounds() -# Load the subgraph +# Load the Subgraph aave_v2 = sg.load_subgraph( "https://api.thegraph.com/subgraphs/name/messari/aave-v2-ethereum") From fd311260059a13790648117da52a7fee2a238174 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:12:52 -0500 Subject: [PATCH 0270/1789] New translations python.mdx (Hindi) --- website/src/pages/hi/subgraphs/querying/python.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/hi/subgraphs/querying/python.mdx b/website/src/pages/hi/subgraphs/querying/python.mdx index 22e9b71da321..53eeeab34bb4 100644 --- a/website/src/pages/hi/subgraphs/querying/python.mdx +++ b/website/src/pages/hi/subgraphs/querying/python.mdx @@ -3,7 +3,7 @@ title: Query The Graph with Python and Subgrounds sidebarTitle: Python (Subgrounds) --- -Subgrounds is an intuitive Python library for querying subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! +Subgrounds is an intuitive Python library for querying Subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect Subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! Subgrounds offers a simple Pythonic API for building GraphQL queries, automates tedious workflows such as pagination, and empowers advanced users through controlled schema transformations. @@ -17,14 +17,14 @@ pip install --upgrade subgrounds python -m pip install --upgrade subgrounds ``` -Once installed, you can test out subgrounds with the following query. The following example grabs a subgraph for the Aave v2 protocol and queries the top 5 markets ordered by TVL (Total Value Locked), selects their name and their TVL (in USD) and returns the data as a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). +Once installed, you can test out subgrounds with the following query. The following example grabs a Subgraph for the Aave v2 protocol and queries the top 5 markets ordered by TVL (Total Value Locked), selects their name and their TVL (in USD) and returns the data as a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). ```python from subgrounds import Subgrounds sg = Subgrounds() -# Load the subgraph +# Load the Subgraph aave_v2 = sg.load_subgraph( "https://api.thegraph.com/subgraphs/name/messari/aave-v2-ethereum") From f29c434919651a3eed52dfac0abe1ef36e38ebf5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:12:53 -0500 Subject: [PATCH 0271/1789] New translations python.mdx (Swahili) --- .../pages/sw/subgraphs/querying/python.mdx | 57 +++++++++++++++++++ 1 file changed, 57 insertions(+) create mode 100644 website/src/pages/sw/subgraphs/querying/python.mdx diff --git a/website/src/pages/sw/subgraphs/querying/python.mdx b/website/src/pages/sw/subgraphs/querying/python.mdx new file mode 100644 index 000000000000..ed0d078a4175 --- /dev/null +++ b/website/src/pages/sw/subgraphs/querying/python.mdx @@ -0,0 +1,57 @@ +--- +title: Query The Graph with Python and Subgrounds +sidebarTitle: Python (Subgrounds) +--- + +Subgrounds is an intuitive Python library for querying Subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect Subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! + +Subgrounds offers a simple Pythonic API for building GraphQL queries, automates tedious workflows such as pagination, and empowers advanced users through controlled schema transformations. + +## Getting Started + +Subgrounds requires Python 3.10 or higher and is available on [pypi](https://pypi.org/project/subgrounds/). + +```bash +pip install --upgrade subgrounds +# or +python -m pip install --upgrade subgrounds +``` + +Once installed, you can test out subgrounds with the following query. The following example grabs a Subgraph for the Aave v2 protocol and queries the top 5 markets ordered by TVL (Total Value Locked), selects their name and their TVL (in USD) and returns the data as a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). + +```python +from subgrounds import Subgrounds + +sg = Subgrounds() + +# Load the Subgraph +aave_v2 = sg.load_subgraph( + "https://api.thegraph.com/subgraphs/name/messari/aave-v2-ethereum") + +# Construct the query +latest_markets = aave_v2.Query.markets( + orderBy=aave_v2.Market.totalValueLockedUSD, + orderDirection='desc', + first=5, +) +# Return query to a dataframe +sg.query_df([ + latest_markets.name, + latest_markets.totalValueLockedUSD, +]) +``` + +## Documentation + +Subgrounds is built and maintained by the [Playgrounds](https://playgrounds.network/) team and can be accessed on the [Playgrounds docs](https://docs.playgrounds.network/subgrounds). + +Since subgrounds has a large feature set to explore, here are some helpful starting places: + +- [Getting Started with Querying](https://docs.playgrounds.network/subgrounds/getting_started/basics/) + - A good first step for how to build queries with subgrounds. +- [Building Synthetic Fields](https://docs.playgrounds.network/subgrounds/getting_started/synthetic_fields/) + - A gentle introduction to defining synthetic fields that transform data defined from the schema. +- [Concurrent Queries](https://docs.playgrounds.network/subgrounds/getting_started/async/) + - Learn how to level up your queries by parallelizing them. +- [Exporting Data to CSVs](https://docs.playgrounds.network/subgrounds/faq/exporting/) + - A quick article on how to seamlessly save your data as CSVs for further analysis. From 1ac17d896e6b35de9c28c0e764f27f801ca79a23 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:12:54 -0500 Subject: [PATCH 0272/1789] New translations arbitrum-faq.mdx (Romanian) --- website/src/pages/ro/archived/arbitrum/arbitrum-faq.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/ro/archived/arbitrum/arbitrum-faq.mdx b/website/src/pages/ro/archived/arbitrum/arbitrum-faq.mdx index 562824e64e95..d121f5a2d0f3 100644 --- a/website/src/pages/ro/archived/arbitrum/arbitrum-faq.mdx +++ b/website/src/pages/ro/archived/arbitrum/arbitrum-faq.mdx @@ -14,7 +14,7 @@ By scaling The Graph on L2, network participants can now benefit from: - Security inherited from Ethereum -Scaling the protocol smart contracts onto L2 allows network participants to interact more frequently at a reduced cost in gas fees. For example, Indexers can open and close allocations more frequently to index a greater number of subgraphs. Developers can deploy and update subgraphs more easily, and Delegators can delegate GRT more frequently. Curators can add or remove signal to a larger number of subgraphs–actions previously considered too cost-prohibitive to perform frequently due to gas. +Scaling the protocol smart contracts onto L2 allows network participants to interact more frequently at a reduced cost in gas fees. For example, Indexers can open and close allocations more frequently to index a greater number of Subgraphs. Developers can deploy and update Subgraphs more easily, and Delegators can delegate GRT more frequently. Curators can add or remove signal to a larger number of Subgraphs–actions previously considered too cost-prohibitive to perform frequently due to gas. The Graph community decided to move forward with Arbitrum last year after the outcome of the [GIP-0031](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) discussion. @@ -39,7 +39,7 @@ To take advantage of using The Graph on L2, use this dropdown switcher to toggle ![Dropdown switcher to toggle Arbitrum](/img/arbitrum-screenshot-toggle.png) -## As a subgraph developer, data consumer, Indexer, Curator, or Delegator, what do I need to do now? +## As a Subgraph developer, data consumer, Indexer, Curator, or Delegator, what do I need to do now? Network participants must move to Arbitrum to continue participating in The Graph Network. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) for additional support. @@ -51,9 +51,9 @@ All smart contracts have been thoroughly [audited](https://github.com/graphproto Everything has been tested thoroughly, and a contingency plan is in place to ensure a safe and seamless transition. Details can be found [here](https://forum.thegraph.com/t/gip-0037-the-graph-arbitrum-deployment-with-linear-rewards-minted-in-l2/3551#risks-and-security-considerations-20). -## Are existing subgraphs on Ethereum working? +## Are existing Subgraphs on Ethereum working? -All subgraphs are now on Arbitrum. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) to ensure your subgraphs operate seamlessly. +All Subgraphs are now on Arbitrum. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) to ensure your Subgraphs operate seamlessly. ## Does GRT have a new smart contract deployed on Arbitrum? From 62e103cc1d8c426767d35cc3ed192044625e64fd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:12:55 -0500 Subject: [PATCH 0273/1789] New translations arbitrum-faq.mdx (French) --- website/src/pages/fr/archived/arbitrum/arbitrum-faq.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/fr/archived/arbitrum/arbitrum-faq.mdx b/website/src/pages/fr/archived/arbitrum/arbitrum-faq.mdx index b2f6d7382c61..3aeb3de89d39 100644 --- a/website/src/pages/fr/archived/arbitrum/arbitrum-faq.mdx +++ b/website/src/pages/fr/archived/arbitrum/arbitrum-faq.mdx @@ -14,7 +14,7 @@ Grâce à la mise à l'échelle de The Graph sur la L2, les participants du rés - La sécurité héritée d'Ethereum -La mise à l'échelle des contrats intelligents du protocole sur la L2 permet aux participants du réseau d'interagir plus fréquemment pour un coût réduit en termes de frais de gaz. Par exemple, les Indexeurs peuvent ouvrir et fermer des allocations plus fréquemment pour indexer un plus grand nombre de subgraphs. Les développeurs peuvent déployer et mettre à jour des subgraphs plus facilement, et les Déléguateurs peuvent déléguer des GRT plus fréquemment. Les Curateurs peuvent ajouter ou supprimer des signaux dans un plus grand nombre de subgraphs - des actions auparavant considérées comme trop coûteuses pour être effectuées fréquemment en raison des frais de gaz. +Scaling the protocol smart contracts onto L2 allows network participants to interact more frequently at a reduced cost in gas fees. For example, Indexers can open and close allocations more frequently to index a greater number of Subgraphs. Developers can deploy and update Subgraphs more easily, and Delegators can delegate GRT more frequently. Curators can add or remove signal to a larger number of Subgraphs–actions previously considered too cost-prohibitive to perform frequently due to gas. La communauté Graph a décidé d'avancer avec Arbitrum l'année dernière après le résultat de la discussion [GIP-0031](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305). @@ -39,7 +39,7 @@ Pour tirer parti de l'utilisation de The Graph sur L2, utilisez ce sélecteur d [Sélecteur déroulant pour activer Arbitrum](/img/arbitrum-screenshot-toggle.png) -## En tant que développeur de subgraphs, consommateur de données, indexeur, curateur ou délégateur, que dois-je faire maintenant ? +## As a Subgraph developer, data consumer, Indexer, Curator, or Delegator, what do I need to do now? Network participants must move to Arbitrum to continue participating in The Graph Network. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) for additional support. @@ -51,9 +51,9 @@ Tous les contrats intelligents ont été soigneusement [vérifiés](https://gith Tout a été testé minutieusement et un plan d’urgence est en place pour assurer une transition sûre et fluide. Les détails peuvent être trouvés [here](https://forum.thegraph.com/t/gip-0037-the-graph-arbitrum-deployment-with-linear-rewards-minted-in-l2/3551#risks-and- considérations de sécurité-20). -## Les subgraphs existants sur Ethereum fonctionnent  t-ils? +## Are existing Subgraphs on Ethereum working? -All subgraphs are now on Arbitrum. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) to ensure your subgraphs operate seamlessly. +All Subgraphs are now on Arbitrum. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) to ensure your Subgraphs operate seamlessly. ## GRT a-t-il un nouveau contrat intelligent déployé sur Arbitrum ? From 37586e32b7dbed48c6cf1fcda0b876ac4be590a3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:12:56 -0500 Subject: [PATCH 0274/1789] New translations arbitrum-faq.mdx (Spanish) --- website/src/pages/es/archived/arbitrum/arbitrum-faq.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/es/archived/arbitrum/arbitrum-faq.mdx b/website/src/pages/es/archived/arbitrum/arbitrum-faq.mdx index 85ad70c11ca2..2b7fe7284fc8 100644 --- a/website/src/pages/es/archived/arbitrum/arbitrum-faq.mdx +++ b/website/src/pages/es/archived/arbitrum/arbitrum-faq.mdx @@ -14,7 +14,7 @@ By scaling The Graph on L2, network participants can now benefit from: - Security inherited from Ethereum -Scaling the protocol smart contracts onto L2 allows network participants to interact more frequently at a reduced cost in gas fees. For example, Indexers can open and close allocations more frequently to index a greater number of subgraphs. Developers can deploy and update subgraphs more easily, and Delegators can delegate GRT more frequently. Curators can add or remove signal to a larger number of subgraphs–actions previously considered too cost-prohibitive to perform frequently due to gas. +Scaling the protocol smart contracts onto L2 allows network participants to interact more frequently at a reduced cost in gas fees. For example, Indexers can open and close allocations more frequently to index a greater number of Subgraphs. Developers can deploy and update Subgraphs more easily, and Delegators can delegate GRT more frequently. Curators can add or remove signal to a larger number of Subgraphs–actions previously considered too cost-prohibitive to perform frequently due to gas. The Graph community decided to move forward with Arbitrum last year after the outcome of the [GIP-0031](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) discussion. @@ -39,7 +39,7 @@ Para aprovechar el uso de The Graph en L2, usa este conmutador desplegable para ![Dropdown switcher to toggle Arbitrum](/img/arbitrum-screenshot-toggle.png) -## Como developer de subgrafos, consumidor de datos, Indexador, Curador o Delegador, ¿qué debo hacer ahora? +## As a Subgraph developer, data consumer, Indexer, Curator, or Delegator, what do I need to do now? Network participants must move to Arbitrum to continue participating in The Graph Network. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) for additional support. @@ -51,9 +51,9 @@ All smart contracts have been thoroughly [audited](https://github.com/graphproto Everything has been tested thoroughly, and a contingency plan is in place to ensure a safe and seamless transition. Details can be found [here](https://forum.thegraph.com/t/gip-0037-the-graph-arbitrum-deployment-with-linear-rewards-minted-in-l2/3551#risks-and-security-considerations-20). -## Are existing subgraphs on Ethereum working? +## Are existing Subgraphs on Ethereum working? -All subgraphs are now on Arbitrum. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) to ensure your subgraphs operate seamlessly. +All Subgraphs are now on Arbitrum. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) to ensure your Subgraphs operate seamlessly. ## Does GRT have a new smart contract deployed on Arbitrum? From 4dae89be28beb81de5044b5f183a295dc4009860 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:12:58 -0500 Subject: [PATCH 0275/1789] New translations arbitrum-faq.mdx (Arabic) --- website/src/pages/ar/archived/arbitrum/arbitrum-faq.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/ar/archived/arbitrum/arbitrum-faq.mdx b/website/src/pages/ar/archived/arbitrum/arbitrum-faq.mdx index 898175b05cad..e1dbbea03383 100644 --- a/website/src/pages/ar/archived/arbitrum/arbitrum-faq.mdx +++ b/website/src/pages/ar/archived/arbitrum/arbitrum-faq.mdx @@ -14,7 +14,7 @@ By scaling The Graph on L2, network participants can now benefit from: - Security inherited from Ethereum -Scaling the protocol smart contracts onto L2 allows network participants to interact more frequently at a reduced cost in gas fees. For example, Indexers can open and close allocations more frequently to index a greater number of subgraphs. Developers can deploy and update subgraphs more easily, and Delegators can delegate GRT more frequently. Curators can add or remove signal to a larger number of subgraphs–actions previously considered too cost-prohibitive to perform frequently due to gas. +Scaling the protocol smart contracts onto L2 allows network participants to interact more frequently at a reduced cost in gas fees. For example, Indexers can open and close allocations more frequently to index a greater number of Subgraphs. Developers can deploy and update Subgraphs more easily, and Delegators can delegate GRT more frequently. Curators can add or remove signal to a larger number of Subgraphs–actions previously considered too cost-prohibitive to perform frequently due to gas. The Graph community decided to move forward with Arbitrum last year after the outcome of the [GIP-0031](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) discussion. @@ -39,7 +39,7 @@ To take advantage of using The Graph on L2, use this dropdown switcher to toggle ![Dropdown switcher to toggle Arbitrum](/img/arbitrum-screenshot-toggle.png) -## As a subgraph developer, data consumer, Indexer, Curator, or Delegator, what do I need to do now? +## As a Subgraph developer, data consumer, Indexer, Curator, or Delegator, what do I need to do now? Network participants must move to Arbitrum to continue participating in The Graph Network. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) for additional support. @@ -51,9 +51,9 @@ All smart contracts have been thoroughly [audited](https://github.com/graphproto Everything has been tested thoroughly, and a contingency plan is in place to ensure a safe and seamless transition. Details can be found [here](https://forum.thegraph.com/t/gip-0037-the-graph-arbitrum-deployment-with-linear-rewards-minted-in-l2/3551#risks-and-security-considerations-20). -## Are existing subgraphs on Ethereum working? +## Are existing Subgraphs on Ethereum working? -All subgraphs are now on Arbitrum. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) to ensure your subgraphs operate seamlessly. +All Subgraphs are now on Arbitrum. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) to ensure your Subgraphs operate seamlessly. ## Does GRT have a new smart contract deployed on Arbitrum? From 58beaea01b0a2251bf85603af8b4bc0fdb421dcf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:12:59 -0500 Subject: [PATCH 0276/1789] New translations arbitrum-faq.mdx (Czech) --- website/src/pages/cs/archived/arbitrum/arbitrum-faq.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/cs/archived/arbitrum/arbitrum-faq.mdx b/website/src/pages/cs/archived/arbitrum/arbitrum-faq.mdx index 050d1a0641aa..df47adfff704 100644 --- a/website/src/pages/cs/archived/arbitrum/arbitrum-faq.mdx +++ b/website/src/pages/cs/archived/arbitrum/arbitrum-faq.mdx @@ -14,7 +14,7 @@ By scaling The Graph on L2, network participants can now benefit from: - Zabezpečení zděděné po Ethereum -Scaling the protocol smart contracts onto L2 allows network participants to interact more frequently at a reduced cost in gas fees. For example, Indexers can open and close allocations more frequently to index a greater number of subgraphs. Developers can deploy and update subgraphs more easily, and Delegators can delegate GRT more frequently. Curators can add or remove signal to a larger number of subgraphs–actions previously considered too cost-prohibitive to perform frequently due to gas. +Scaling the protocol smart contracts onto L2 allows network participants to interact more frequently at a reduced cost in gas fees. For example, Indexers can open and close allocations more frequently to index a greater number of Subgraphs. Developers can deploy and update Subgraphs more easily, and Delegators can delegate GRT more frequently. Curators can add or remove signal to a larger number of Subgraphs–actions previously considered too cost-prohibitive to perform frequently due to gas. Komunita Graf se v loňském roce rozhodla pokračovat v Arbitrum po výsledku diskuze [GIP-0031] (https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305). @@ -39,7 +39,7 @@ Pro využití výhod používání a Graf na L2 použijte rozevírací přepína ![Dropdown switcher to toggle Arbitrum](/img/arbitrum-screenshot-toggle.png) -## Jako vývojář podgrafů, Spotřebitel dat, indexer, kurátor, nebo delegátor, co mám nyní udělat? +## As a Subgraph developer, data consumer, Indexer, Curator, or Delegator, what do I need to do now? Network participants must move to Arbitrum to continue participating in The Graph Network. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) for additional support. @@ -51,9 +51,9 @@ Všechny chytré smlouvy byly důkladně [auditovány](https://github.com/graphp Vše bylo důkladně otestováno, a je připraven pohotovostní plán, který zajistí bezpečný a bezproblémový přechod. Podrobnosti naleznete [here](https://forum.thegraph.com/t/gip-0037-the-graph-arbitrum-deployment-with-linear-rewards-minted-in-l2/3551#risks-and-security-considerations-20). -## Are existing subgraphs on Ethereum working? +## Are existing Subgraphs on Ethereum working? -All subgraphs are now on Arbitrum. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) to ensure your subgraphs operate seamlessly. +All Subgraphs are now on Arbitrum. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) to ensure your Subgraphs operate seamlessly. ## Does GRT have a new smart contract deployed on Arbitrum? From e42e7c89288d28c169b07afdf6e7c59f5d018e36 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:12:59 -0500 Subject: [PATCH 0277/1789] New translations arbitrum-faq.mdx (German) --- website/src/pages/de/archived/arbitrum/arbitrum-faq.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/de/archived/arbitrum/arbitrum-faq.mdx b/website/src/pages/de/archived/arbitrum/arbitrum-faq.mdx index 54809f94fd9c..68424dbd3e04 100644 --- a/website/src/pages/de/archived/arbitrum/arbitrum-faq.mdx +++ b/website/src/pages/de/archived/arbitrum/arbitrum-faq.mdx @@ -14,7 +14,7 @@ Durch die Skalierung von The Graph auf L2 können die Netzwerkteilnehmer nun von - Von Ethereum übernommene Sicherheit -Die Skalierung der Smart Contracts des Protokolls auf L2 ermöglicht den Netzwerkteilnehmern eine häufigere Interaktion zu geringeren Kosten in Form von Gasgebühren. So können Indexer beispielsweise häufiger Zuweisungen öffnen und schließen, um eine größere Anzahl von Subgraphen zu indexieren. Entwickler können Subgraphen leichter bereitstellen und aktualisieren, und Delegatoren können GRT häufiger delegieren. Kuratoren können einer größeren Anzahl von Subgraphen Signale hinzufügen oder entfernen - Aktionen, die bisher aufgrund der Kosten zu kostspielig waren, um sie häufig durchzuführen. +Scaling the protocol smart contracts onto L2 allows network participants to interact more frequently at a reduced cost in gas fees. For example, Indexers can open and close allocations more frequently to index a greater number of Subgraphs. Developers can deploy and update Subgraphs more easily, and Delegators can delegate GRT more frequently. Curators can add or remove signal to a larger number of Subgraphs–actions previously considered too cost-prohibitive to perform frequently due to gas. Die The Graph-Community beschloss letztes Jahr nach dem Ergebnis der [GIP-0031](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305)-Diskussion, mit Arbitrum weiterzumachen. @@ -39,7 +39,7 @@ Um die Vorteile von The Graph auf L2 zu nutzen, verwenden Sie diesen Dropdown-Sc ![Dropdown-Schalter zum Aktivieren von Arbitrum](/img/arbitrum-screenshot-toggle.png) -## Was muss ich als Entwickler von Subgraphen, Datenkonsument, Indexer, Kurator oder Delegator jetzt tun? +## As a Subgraph developer, data consumer, Indexer, Curator, or Delegator, what do I need to do now? Netzwerk-Teilnehmer müssen zu Arbitrum wechseln, um weiterhin am The Graph Network teilnehmen zu können. Weitere Unterstützung finden Sie im [Leitfaden zum L2 Transfer Tool](/archived/arbitrum/l2-transfer-tools-guide/). @@ -51,9 +51,9 @@ Alle Smart Contracts wurden gründlich [audited] (https://github.com/graphprotoc Alles wurde gründlich getestet, und es gibt einen Notfallplan, um einen sicheren und nahtlosen Übergang zu gewährleisten. Einzelheiten finden Sie [here](https://forum.thegraph.com/t/gip-0037-the-graph-arbitrum-deployment-with-linear-rewards-minted-in-l2/3551#risks-and-security-considerations-20). -## Funktionieren die vorhandenen Subgraphen auf Ethereum? +## Are existing Subgraphs on Ethereum working? -Alle Subgraphen sind jetzt auf Arbitrum. Bitte lesen Sie den [Leitfaden zum L2 Transfer Tool](/archived/arbitrum/l2-transfer-tools-guide/), um sicherzustellen, dass Ihre Subgraphen reibungslos funktionieren. +All Subgraphs are now on Arbitrum. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) to ensure your Subgraphs operate seamlessly. ## Verfügt GRT über einen neuen Smart Contract, der auf Arbitrum eingesetzt wird? From b02426b514c98c2b5cdb250c65d1f970999ca48f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:13:01 -0500 Subject: [PATCH 0278/1789] New translations arbitrum-faq.mdx (Italian) --- website/src/pages/it/archived/arbitrum/arbitrum-faq.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/it/archived/arbitrum/arbitrum-faq.mdx b/website/src/pages/it/archived/arbitrum/arbitrum-faq.mdx index 4b6ef7df03fc..5c4dc7fa3aa3 100644 --- a/website/src/pages/it/archived/arbitrum/arbitrum-faq.mdx +++ b/website/src/pages/it/archived/arbitrum/arbitrum-faq.mdx @@ -14,7 +14,7 @@ By scaling The Graph on L2, network participants can now benefit from: - Sicurezza ereditata da Ethereum -Scaling the protocol smart contracts onto L2 allows network participants to interact more frequently at a reduced cost in gas fees. For example, Indexers can open and close allocations more frequently to index a greater number of subgraphs. Developers can deploy and update subgraphs more easily, and Delegators can delegate GRT more frequently. Curators can add or remove signal to a larger number of subgraphs–actions previously considered too cost-prohibitive to perform frequently due to gas. +Scaling the protocol smart contracts onto L2 allows network participants to interact more frequently at a reduced cost in gas fees. For example, Indexers can open and close allocations more frequently to index a greater number of Subgraphs. Developers can deploy and update Subgraphs more easily, and Delegators can delegate GRT more frequently. Curators can add or remove signal to a larger number of Subgraphs–actions previously considered too cost-prohibitive to perform frequently due to gas. La comunità di The Graph ha deciso di procedere con Arbitrum l'anno scorso dopo l'esito della discussione [GIP-0031](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305). @@ -39,7 +39,7 @@ Per sfruttare l'utilizzo di The Graph su L2, utilizza il selettore a discesa per ![Selettore a discesa per cambiare a Arbitrum](/img/arbitrum-screenshot-toggle.png) -## In quanto sviluppatore di subgraph, consumatore di dati, Indexer, Curator o Delegator, cosa devo fare ora? +## As a Subgraph developer, data consumer, Indexer, Curator, or Delegator, what do I need to do now? Network participants must move to Arbitrum to continue participating in The Graph Network. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) for additional support. @@ -51,9 +51,9 @@ All smart contracts have been thoroughly [audited](https://github.com/graphproto Tutto è stato testato accuratamente e un piano di contingenza è in atto per garantire una transizione sicura e senza intoppi. I dettagli possono essere trovati [qui](https://forum.thegraph.com/t/gip-0037-the-graph-arbitrum-deployment-with-linear-rewards-minted-in-l2/3551#risks-and-security-considerations-20). -## Are existing subgraphs on Ethereum working? +## Are existing Subgraphs on Ethereum working? -All subgraphs are now on Arbitrum. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) to ensure your subgraphs operate seamlessly. +All Subgraphs are now on Arbitrum. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) to ensure your Subgraphs operate seamlessly. ## Does GRT have a new smart contract deployed on Arbitrum? From c9e0bece652415a31b2a291db035ab0424b186e6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:13:02 -0500 Subject: [PATCH 0279/1789] New translations arbitrum-faq.mdx (Japanese) --- website/src/pages/ja/archived/arbitrum/arbitrum-faq.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/ja/archived/arbitrum/arbitrum-faq.mdx b/website/src/pages/ja/archived/arbitrum/arbitrum-faq.mdx index 3ab2bdbbf83b..cc0c098f0af1 100644 --- a/website/src/pages/ja/archived/arbitrum/arbitrum-faq.mdx +++ b/website/src/pages/ja/archived/arbitrum/arbitrum-faq.mdx @@ -14,7 +14,7 @@ By scaling The Graph on L2, network participants can now benefit from: - イーサリアムから継承したセキュリティ -Scaling the protocol smart contracts onto L2 allows network participants to interact more frequently at a reduced cost in gas fees. For example, Indexers can open and close allocations more frequently to index a greater number of subgraphs. Developers can deploy and update subgraphs more easily, and Delegators can delegate GRT more frequently. Curators can add or remove signal to a larger number of subgraphs–actions previously considered too cost-prohibitive to perform frequently due to gas. +Scaling the protocol smart contracts onto L2 allows network participants to interact more frequently at a reduced cost in gas fees. For example, Indexers can open and close allocations more frequently to index a greater number of Subgraphs. Developers can deploy and update Subgraphs more easily, and Delegators can delegate GRT more frequently. Curators can add or remove signal to a larger number of Subgraphs–actions previously considered too cost-prohibitive to perform frequently due to gas. Graph コミュニティは、[GIP-0031](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) の議論の結果を受けて、昨年 Arbitrum を進めることを決定しました。 @@ -39,7 +39,7 @@ L2でのThe Graphの活用には、このドロップダウンスイッチャー ![Arbitrum を切り替えるドロップダウン スイッチャー](/img/arbitrum-screenshot-toggle.png) -## サブグラフ開発者、データ消費者、インデクサー、キュレーター、デリゲーターは何をする必要がありますか? +## As a Subgraph developer, data consumer, Indexer, Curator, or Delegator, what do I need to do now? Network participants must move to Arbitrum to continue participating in The Graph Network. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) for additional support. @@ -51,9 +51,9 @@ All smart contracts have been thoroughly [audited](https://github.com/graphproto すべてが徹底的にテストされており、安全かつシームレスな移行を保証するための緊急時対応計画が整備されています。詳細は[here](https://forum.thegraph.com/t/gip-0037-the-graph-arbitrum-deployment-with-linear-rewards-minted-in-l2/3551#risks-and-security-considerations-20)をご覧ください。 -## Are existing subgraphs on Ethereum working? +## Are existing Subgraphs on Ethereum working? -All subgraphs are now on Arbitrum. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) to ensure your subgraphs operate seamlessly. +All Subgraphs are now on Arbitrum. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) to ensure your Subgraphs operate seamlessly. ## Does GRT have a new smart contract deployed on Arbitrum? From bc807833833ca5f51021762cdf6c6f904c0304d7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:13:03 -0500 Subject: [PATCH 0280/1789] New translations arbitrum-faq.mdx (Korean) --- website/src/pages/ko/archived/arbitrum/arbitrum-faq.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/ko/archived/arbitrum/arbitrum-faq.mdx b/website/src/pages/ko/archived/arbitrum/arbitrum-faq.mdx index 562824e64e95..d121f5a2d0f3 100644 --- a/website/src/pages/ko/archived/arbitrum/arbitrum-faq.mdx +++ b/website/src/pages/ko/archived/arbitrum/arbitrum-faq.mdx @@ -14,7 +14,7 @@ By scaling The Graph on L2, network participants can now benefit from: - Security inherited from Ethereum -Scaling the protocol smart contracts onto L2 allows network participants to interact more frequently at a reduced cost in gas fees. For example, Indexers can open and close allocations more frequently to index a greater number of subgraphs. Developers can deploy and update subgraphs more easily, and Delegators can delegate GRT more frequently. Curators can add or remove signal to a larger number of subgraphs–actions previously considered too cost-prohibitive to perform frequently due to gas. +Scaling the protocol smart contracts onto L2 allows network participants to interact more frequently at a reduced cost in gas fees. For example, Indexers can open and close allocations more frequently to index a greater number of Subgraphs. Developers can deploy and update Subgraphs more easily, and Delegators can delegate GRT more frequently. Curators can add or remove signal to a larger number of Subgraphs–actions previously considered too cost-prohibitive to perform frequently due to gas. The Graph community decided to move forward with Arbitrum last year after the outcome of the [GIP-0031](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) discussion. @@ -39,7 +39,7 @@ To take advantage of using The Graph on L2, use this dropdown switcher to toggle ![Dropdown switcher to toggle Arbitrum](/img/arbitrum-screenshot-toggle.png) -## As a subgraph developer, data consumer, Indexer, Curator, or Delegator, what do I need to do now? +## As a Subgraph developer, data consumer, Indexer, Curator, or Delegator, what do I need to do now? Network participants must move to Arbitrum to continue participating in The Graph Network. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) for additional support. @@ -51,9 +51,9 @@ All smart contracts have been thoroughly [audited](https://github.com/graphproto Everything has been tested thoroughly, and a contingency plan is in place to ensure a safe and seamless transition. Details can be found [here](https://forum.thegraph.com/t/gip-0037-the-graph-arbitrum-deployment-with-linear-rewards-minted-in-l2/3551#risks-and-security-considerations-20). -## Are existing subgraphs on Ethereum working? +## Are existing Subgraphs on Ethereum working? -All subgraphs are now on Arbitrum. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) to ensure your subgraphs operate seamlessly. +All Subgraphs are now on Arbitrum. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) to ensure your Subgraphs operate seamlessly. ## Does GRT have a new smart contract deployed on Arbitrum? From a3ac5bc142372f3107582323bbcfef76de1457b6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:13:03 -0500 Subject: [PATCH 0281/1789] New translations arbitrum-faq.mdx (Dutch) --- website/src/pages/nl/archived/arbitrum/arbitrum-faq.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/nl/archived/arbitrum/arbitrum-faq.mdx b/website/src/pages/nl/archived/arbitrum/arbitrum-faq.mdx index ee8b300ccb87..0e19e7062073 100644 --- a/website/src/pages/nl/archived/arbitrum/arbitrum-faq.mdx +++ b/website/src/pages/nl/archived/arbitrum/arbitrum-faq.mdx @@ -14,7 +14,7 @@ By scaling The Graph on L2, network participants can now benefit from: - Veiligheid overgenomen van Ethereum -Scaling the protocol smart contracts onto L2 allows network participants to interact more frequently at a reduced cost in gas fees. For example, Indexers can open and close allocations more frequently to index a greater number of subgraphs. Developers can deploy and update subgraphs more easily, and Delegators can delegate GRT more frequently. Curators can add or remove signal to a larger number of subgraphs–actions previously considered too cost-prohibitive to perform frequently due to gas. +Scaling the protocol smart contracts onto L2 allows network participants to interact more frequently at a reduced cost in gas fees. For example, Indexers can open and close allocations more frequently to index a greater number of Subgraphs. Developers can deploy and update Subgraphs more easily, and Delegators can delegate GRT more frequently. Curators can add or remove signal to a larger number of Subgraphs–actions previously considered too cost-prohibitive to perform frequently due to gas. The Graph gemeenschap heeft vorig jaar besloten om door te gaan met Arbitrum na de uitkomst van [GIP-0031](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) discussie. @@ -39,7 +39,7 @@ Om gebruik te maken van The Graph op L2, gebruik deze keuzeschakelaar om te wiss ![Dropdown switcher to toggle Arbitrum](/img/arbitrum-screenshot-toggle.png) -## Als een subgraph ontwikkelaar, data consument, Indexer, Curator, of Delegator, wat moet ik nu doen? +## As a Subgraph developer, data consumer, Indexer, Curator, or Delegator, what do I need to do now? Network participants must move to Arbitrum to continue participating in The Graph Network. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) for additional support. @@ -51,9 +51,9 @@ All smart contracts have been thoroughly [audited](https://github.com/graphproto Alles is grondig getest, en een eventualiteiten plan is gemaakt en klaargezet voor een veilige en naadloze transitie. Details kunnen [hier](https://forum.thegraph.com/t/gip-0037-the-graph-arbitrum-deployment-with-linear-rewards-minted-in-l2/3551#risks-and-security-considerations-20) gevonden worden. -## Are existing subgraphs on Ethereum working? +## Are existing Subgraphs on Ethereum working? -All subgraphs are now on Arbitrum. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) to ensure your subgraphs operate seamlessly. +All Subgraphs are now on Arbitrum. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) to ensure your Subgraphs operate seamlessly. ## Does GRT have a new smart contract deployed on Arbitrum? From d9a8a4fcac2d01edff3385147b296523359081ac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:13:04 -0500 Subject: [PATCH 0282/1789] New translations arbitrum-faq.mdx (Polish) --- website/src/pages/pl/archived/arbitrum/arbitrum-faq.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/pl/archived/arbitrum/arbitrum-faq.mdx b/website/src/pages/pl/archived/arbitrum/arbitrum-faq.mdx index 8e3f51fe99c9..8322010a2d88 100644 --- a/website/src/pages/pl/archived/arbitrum/arbitrum-faq.mdx +++ b/website/src/pages/pl/archived/arbitrum/arbitrum-faq.mdx @@ -14,7 +14,7 @@ By scaling The Graph on L2, network participants can now benefit from: - Bezpieczeństwo jako spuścizna sieci Ethereum -Scaling the protocol smart contracts onto L2 allows network participants to interact more frequently at a reduced cost in gas fees. For example, Indexers can open and close allocations more frequently to index a greater number of subgraphs. Developers can deploy and update subgraphs more easily, and Delegators can delegate GRT more frequently. Curators can add or remove signal to a larger number of subgraphs–actions previously considered too cost-prohibitive to perform frequently due to gas. +Scaling the protocol smart contracts onto L2 allows network participants to interact more frequently at a reduced cost in gas fees. For example, Indexers can open and close allocations more frequently to index a greater number of Subgraphs. Developers can deploy and update Subgraphs more easily, and Delegators can delegate GRT more frequently. Curators can add or remove signal to a larger number of Subgraphs–actions previously considered too cost-prohibitive to perform frequently due to gas. W zeszłym roku społeczność The Graph postanowiła pójść o krok do przodu z Arbitrum po wynikach dyskusji [GIP-0031](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305). @@ -39,7 +39,7 @@ By w pełni wykorzystać wszystkie zalety używania protokołu The Graph na L2 w ![Przejście do listy zawierającej Arbitrum](/img/arbitrum-screenshot-toggle.png) -## Co powinien wiedzieć na ten temat subgraf developer, konsument danych, indekser, kurator lub delegator? +## As a Subgraph developer, data consumer, Indexer, Curator, or Delegator, what do I need to do now? Network participants must move to Arbitrum to continue participating in The Graph Network. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) for additional support. @@ -51,9 +51,9 @@ All smart contracts have been thoroughly [audited](https://github.com/graphproto Wszystko zostało dokładnie przetestowane i przygotowano plan awaryjny, aby zapewnić bezpieczne i płynne przeniesienie. Szczegóły można znaleźć [tutaj](https://forum.thegraph.com/t/gip-0037-the-graph-arbitrum-deployment-with-linear-rewards-minted-in-l2/3551#risks-and-security-considerations-20). -## Are existing subgraphs on Ethereum working? +## Are existing Subgraphs on Ethereum working? -All subgraphs are now on Arbitrum. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) to ensure your subgraphs operate seamlessly. +All Subgraphs are now on Arbitrum. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) to ensure your Subgraphs operate seamlessly. ## Does GRT have a new smart contract deployed on Arbitrum? From f2209f2c112982b96d317b6ca5d6db63db810fac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:13:05 -0500 Subject: [PATCH 0283/1789] New translations arbitrum-faq.mdx (Portuguese) --- website/src/pages/pt/archived/arbitrum/arbitrum-faq.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/pt/archived/arbitrum/arbitrum-faq.mdx b/website/src/pages/pt/archived/arbitrum/arbitrum-faq.mdx index 0c1ba5b192ef..7932ad2508bd 100644 --- a/website/src/pages/pt/archived/arbitrum/arbitrum-faq.mdx +++ b/website/src/pages/pt/archived/arbitrum/arbitrum-faq.mdx @@ -14,7 +14,7 @@ By scaling The Graph on L2, network participants can now benefit from: - Herdar segurança do Ethereum -Scaling the protocol smart contracts onto L2 allows network participants to interact more frequently at a reduced cost in gas fees. For example, Indexers can open and close allocations more frequently to index a greater number of subgraphs. Developers can deploy and update subgraphs more easily, and Delegators can delegate GRT more frequently. Curators can add or remove signal to a larger number of subgraphs–actions previously considered too cost-prohibitive to perform frequently due to gas. +Scaling the protocol smart contracts onto L2 allows network participants to interact more frequently at a reduced cost in gas fees. For example, Indexers can open and close allocations more frequently to index a greater number of Subgraphs. Developers can deploy and update Subgraphs more easily, and Delegators can delegate GRT more frequently. Curators can add or remove signal to a larger number of Subgraphs–actions previously considered too cost-prohibitive to perform frequently due to gas. A comunidade do The Graph prosseguiu com o Arbitrum no ano passado, após o resultado da discussão [GIP-0031](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305). @@ -39,7 +39,7 @@ Para aproveitar o The Graph na L2, use este switcher de dropdown para alternar e ![Dropdown switcher to toggle Arbitrum](/img/arbitrum-screenshot-toggle.png) -## Como um programador, consumidor de dados, Indexador, Curador ou Delegante, o que devo fazer agora? +## As a Subgraph developer, data consumer, Indexer, Curator, or Delegator, what do I need to do now? Network participants must move to Arbitrum to continue participating in The Graph Network. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) for additional support. @@ -51,9 +51,9 @@ Todos os contratos inteligentes já foram devidamente [auditados](https://github Tudo foi testado exaustivamente, e já está pronto um plano de contingência para garantir uma transição segura e suave. Mais detalhes [aqui](https://forum.thegraph.com/t/gip-0037-the-graph-arbitrum-deployment-with-linear-rewards-minted-in-l2/3551#risks-and-security-considerations-20). -## Are existing subgraphs on Ethereum working? +## Are existing Subgraphs on Ethereum working? -All subgraphs are now on Arbitrum. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) to ensure your subgraphs operate seamlessly. +All Subgraphs are now on Arbitrum. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) to ensure your Subgraphs operate seamlessly. ## Does GRT have a new smart contract deployed on Arbitrum? From 4820ffd718728273a3f80d09d799048d4514cd58 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:13:06 -0500 Subject: [PATCH 0284/1789] New translations arbitrum-faq.mdx (Russian) --- website/src/pages/ru/archived/arbitrum/arbitrum-faq.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/ru/archived/arbitrum/arbitrum-faq.mdx b/website/src/pages/ru/archived/arbitrum/arbitrum-faq.mdx index 0375e85a7135..5e7bf098577d 100644 --- a/website/src/pages/ru/archived/arbitrum/arbitrum-faq.mdx +++ b/website/src/pages/ru/archived/arbitrum/arbitrum-faq.mdx @@ -14,7 +14,7 @@ title: Часто задаваемые вопросы об Arbitrum - Безопасность, унаследованную от Ethereum -Масштабирование смарт-контрактов протокола на L2 позволяет участникам сети взаимодействовать чаще и с меньшими затратами на комиссии за газ. Например, Индексаторы могут чаще открывать и закрывать аллокации, чтобы индексировать большее количество субграфов. Разработчики могут с большей легкостью разворачивать и обновлять субграфы, а Делегаторы — чаще делегировать GRT. Кураторы могут добавлять или удалять сигнал для большего количества субграфов — действия, которые ранее считались слишком затратными для частого выполнения из-за стоимости газа. +Scaling the protocol smart contracts onto L2 allows network participants to interact more frequently at a reduced cost in gas fees. For example, Indexers can open and close allocations more frequently to index a greater number of Subgraphs. Developers can deploy and update Subgraphs more easily, and Delegators can delegate GRT more frequently. Curators can add or remove signal to a larger number of Subgraphs–actions previously considered too cost-prohibitive to perform frequently due to gas. Решение о продолжении сотрудничества с Arbitrum было принято в прошлом году по итогам обсуждения сообществом The Graph [GIP-0031](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305). @@ -39,7 +39,7 @@ title: Часто задаваемые вопросы об Arbitrum ![Выпадающий список для переключения на Arbitrum](/img/arbitrum-screenshot-toggle.png) -## Что мне нужно делать сейчас как разработчику субграфа, потребителю данных, индексатору, куратору или делегатору? +## As a Subgraph developer, data consumer, Indexer, Curator, or Delegator, what do I need to do now? Network participants must move to Arbitrum to continue participating in The Graph Network. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) for additional support. @@ -51,9 +51,9 @@ Network participants must move to Arbitrum to continue participating in The Grap Все было тщательно протестировано, и разработан план действий на случай непредвиденных обстоятельств, чтобы обеспечить безопасный и непрерывный переход. Подробности можно найти [here](https://forum.thegraph.com/t/gip-0037-the-graph-arbitrum-deployment-with-linear-rewards-minted-in-l2/3551#risks-and-security-considerations-20). -## Работают ли существующие субграфы на Ethereum? +## Are existing Subgraphs on Ethereum working? -All subgraphs are now on Arbitrum. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) to ensure your subgraphs operate seamlessly. +All Subgraphs are now on Arbitrum. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) to ensure your Subgraphs operate seamlessly. ## Есть ли у GRT новый смарт-контракт, развернутый на Arbitrum? From bab0a84ca7ee296d201f1bc8a77aaaf30e79ed92 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:13:07 -0500 Subject: [PATCH 0285/1789] New translations arbitrum-faq.mdx (Swedish) --- website/src/pages/sv/archived/arbitrum/arbitrum-faq.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/sv/archived/arbitrum/arbitrum-faq.mdx b/website/src/pages/sv/archived/arbitrum/arbitrum-faq.mdx index a3162cf19888..aba7e13387a4 100644 --- a/website/src/pages/sv/archived/arbitrum/arbitrum-faq.mdx +++ b/website/src/pages/sv/archived/arbitrum/arbitrum-faq.mdx @@ -14,7 +14,7 @@ By scaling The Graph on L2, network participants can now benefit from: - Säkerhet ärvt från Ethereum -Scaling the protocol smart contracts onto L2 allows network participants to interact more frequently at a reduced cost in gas fees. For example, Indexers can open and close allocations more frequently to index a greater number of subgraphs. Developers can deploy and update subgraphs more easily, and Delegators can delegate GRT more frequently. Curators can add or remove signal to a larger number of subgraphs–actions previously considered too cost-prohibitive to perform frequently due to gas. +Scaling the protocol smart contracts onto L2 allows network participants to interact more frequently at a reduced cost in gas fees. For example, Indexers can open and close allocations more frequently to index a greater number of Subgraphs. Developers can deploy and update Subgraphs more easily, and Delegators can delegate GRT more frequently. Curators can add or remove signal to a larger number of Subgraphs–actions previously considered too cost-prohibitive to perform frequently due to gas. Graph gemenskapen beslutade att gå vidare med Arbitrum förra året efter resultatet av diskussionen [GIP-0031](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305). @@ -39,7 +39,7 @@ För att dra fördel av att använda The Graph på L2, använd den här rullgard ![Dropdown-väljare för att växla Arbitrum](/img/arbitrum-screenshot-toggle.png) -## Som subgrafutvecklare, datakonsument, indexerare, curator eller delegator, vad behöver jag göra nu? +## As a Subgraph developer, data consumer, Indexer, Curator, or Delegator, what do I need to do now? Network participants must move to Arbitrum to continue participating in The Graph Network. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) for additional support. @@ -51,9 +51,9 @@ All smart contracts have been thoroughly [audited](https://github.com/graphproto Allt har testats noggrant och en beredskapsplan finns på plats för att säkerställa en säker och sömlös övergång. Detaljer finns [here](https://forum.thegraph.com/t/gip-0037-the-graph-arbitrum-deployment-with-linear-rewards-minted-in-l2/3551#risks-and-security-considerations-20). -## Are existing subgraphs on Ethereum working? +## Are existing Subgraphs on Ethereum working? -All subgraphs are now on Arbitrum. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) to ensure your subgraphs operate seamlessly. +All Subgraphs are now on Arbitrum. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) to ensure your Subgraphs operate seamlessly. ## Does GRT have a new smart contract deployed on Arbitrum? From 0cfec1a7ad4e3eb53b0e5f36ec791e841e403386 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:13:08 -0500 Subject: [PATCH 0286/1789] New translations arbitrum-faq.mdx (Turkish) --- website/src/pages/tr/archived/arbitrum/arbitrum-faq.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/tr/archived/arbitrum/arbitrum-faq.mdx b/website/src/pages/tr/archived/arbitrum/arbitrum-faq.mdx index ca32d52975dc..eeb1e61127b5 100644 --- a/website/src/pages/tr/archived/arbitrum/arbitrum-faq.mdx +++ b/website/src/pages/tr/archived/arbitrum/arbitrum-faq.mdx @@ -14,7 +14,7 @@ Ağ katılımcıları, The Graph'i L2 üzerinde ölçeklendirerek şunlardan fay - Ethereum'dan aktarılmış güvenlik -Protokol akıllı sözleşmelerini L2’ye ölçeklendirmek, ağ katılımcılarının daha düşük gas ücretleriyle daha sık etkileşimde bulunmasına olanak tanır. Örneğin, Endeksleyiciler daha fazla subgraph endekslemek için tahsisleri daha sık açıp kapatabilir. Geliştiriciler, subgraph’leri daha kolay bir şekilde dağıtabilir ve güncelleyebilir. Delegatörler, GRT’yi daha sık bir şekilde delege edebilir. Küratörler, daha fazla sayıda subgraph’e sinyal ekleyebilir veya kaldırabilir. Böylece önceden gas maliyetleri nedeniyle sık yapılması ekonomik olmayan işlemler artık mümkün hale gelir. +Scaling the protocol smart contracts onto L2 allows network participants to interact more frequently at a reduced cost in gas fees. For example, Indexers can open and close allocations more frequently to index a greater number of Subgraphs. Developers can deploy and update Subgraphs more easily, and Delegators can delegate GRT more frequently. Curators can add or remove signal to a larger number of Subgraphs–actions previously considered too cost-prohibitive to perform frequently due to gas. Graph topluluğu, geçen yıl [GIP-0031](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) tartışmasının sonucuna göre Arbitrum ile çalışmaya karar verdi. @@ -39,7 +39,7 @@ Graph'ı Katman2'de kullanmanın avantajlarından yararlanmak için, zincirler a ![Dropdown switcher to toggle Arbitrum](/img/arbitrum-screenshot-toggle.png) -## Bir subgraph geliştirici, veri tüketicisi, Endeksleyici, Küratör veya Delegatör olarak şimdi ne yapmalıyım? +## As a Subgraph developer, data consumer, Indexer, Curator, or Delegator, what do I need to do now? Network participants must move to Arbitrum to continue participating in The Graph Network. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) for additional support. @@ -51,9 +51,9 @@ Tüm akıllı sözleşmeler kapsamlı bir şekilde [denetlenmiştir](https://git Güvenli ve sorunsuz bir geçiş sağlamak için her şey kapsamlı bir şekilde test edilmiş ve bir acil durum planı hazırlanmıştır. Ayrıntıları [burada](https://forum.thegraph.com/t/gip-0037-the-graph-arbitrum-deployment-with-linear-rewards-minted-in-l2/3551#risks-and-security-considerations-20) bulabilirsiniz. -## Ethereum üzerindeki mevcut subgraph'ler çalışıyor mu? +## Are existing Subgraphs on Ethereum working? -All subgraphs are now on Arbitrum. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) to ensure your subgraphs operate seamlessly. +All Subgraphs are now on Arbitrum. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) to ensure your Subgraphs operate seamlessly. ## GRT'nin Arbitrum'da yeni bir akıllı sözleşmesi mi var? From 298830e07015a8e57dba95b2c1421ceaf8e3ea7b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:13:09 -0500 Subject: [PATCH 0287/1789] New translations arbitrum-faq.mdx (Ukrainian) --- website/src/pages/uk/archived/arbitrum/arbitrum-faq.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/uk/archived/arbitrum/arbitrum-faq.mdx b/website/src/pages/uk/archived/arbitrum/arbitrum-faq.mdx index 28f6a3faeee6..8e6d1bd8d962 100644 --- a/website/src/pages/uk/archived/arbitrum/arbitrum-faq.mdx +++ b/website/src/pages/uk/archived/arbitrum/arbitrum-faq.mdx @@ -14,7 +14,7 @@ By scaling The Graph on L2, network participants can now benefit from: - Security inherited from Ethereum -Scaling the protocol smart contracts onto L2 allows network participants to interact more frequently at a reduced cost in gas fees. For example, Indexers can open and close allocations more frequently to index a greater number of subgraphs. Developers can deploy and update subgraphs more easily, and Delegators can delegate GRT more frequently. Curators can add or remove signal to a larger number of subgraphs–actions previously considered too cost-prohibitive to perform frequently due to gas. +Scaling the protocol smart contracts onto L2 allows network participants to interact more frequently at a reduced cost in gas fees. For example, Indexers can open and close allocations more frequently to index a greater number of Subgraphs. Developers can deploy and update Subgraphs more easily, and Delegators can delegate GRT more frequently. Curators can add or remove signal to a larger number of Subgraphs–actions previously considered too cost-prohibitive to perform frequently due to gas. The Graph community decided to move forward with Arbitrum last year after the outcome of the [GIP-0031](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) discussion. @@ -39,7 +39,7 @@ Once you have GRT on Arbitrum, you can add it to your billing balance. ![Dropdown switcher to toggle Arbitrum](/img/arbitrum-screenshot-toggle.png) -## Якщо я розробник підграфів, споживач даних, Індексатор, Куратор або Делегат, що мені потрібно робити зараз? +## As a Subgraph developer, data consumer, Indexer, Curator, or Delegator, what do I need to do now? Network participants must move to Arbitrum to continue participating in The Graph Network. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) for additional support. @@ -51,9 +51,9 @@ All smart contracts have been thoroughly [audited](https://github.com/graphproto Everything has been tested thoroughly, and a contingency plan is in place to ensure a safe and seamless transition. Details can be found [here](https://forum.thegraph.com/t/gip-0037-the-graph-arbitrum-deployment-with-linear-rewards-minted-in-l2/3551#risks-and-security-considerations-20). -## Are existing subgraphs on Ethereum working? +## Are existing Subgraphs on Ethereum working? -All subgraphs are now on Arbitrum. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) to ensure your subgraphs operate seamlessly. +All Subgraphs are now on Arbitrum. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) to ensure your Subgraphs operate seamlessly. ## Does GRT have a new smart contract deployed on Arbitrum? From f1d63d8636952ce9be0878e91a74a47cb1b277a1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:13:10 -0500 Subject: [PATCH 0288/1789] New translations arbitrum-faq.mdx (Chinese Simplified) --- website/src/pages/zh/archived/arbitrum/arbitrum-faq.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/zh/archived/arbitrum/arbitrum-faq.mdx b/website/src/pages/zh/archived/arbitrum/arbitrum-faq.mdx index cc912a21a269..7870ec6b9150 100644 --- a/website/src/pages/zh/archived/arbitrum/arbitrum-faq.mdx +++ b/website/src/pages/zh/archived/arbitrum/arbitrum-faq.mdx @@ -14,7 +14,7 @@ By scaling The Graph on L2, network participants can now benefit from: - 从以太坊继承的安全性 -Scaling the protocol smart contracts onto L2 allows network participants to interact more frequently at a reduced cost in gas fees. For example, Indexers can open and close allocations more frequently to index a greater number of subgraphs. Developers can deploy and update subgraphs more easily, and Delegators can delegate GRT more frequently. Curators can add or remove signal to a larger number of subgraphs–actions previously considered too cost-prohibitive to perform frequently due to gas. +Scaling the protocol smart contracts onto L2 allows network participants to interact more frequently at a reduced cost in gas fees. For example, Indexers can open and close allocations more frequently to index a greater number of Subgraphs. Developers can deploy and update Subgraphs more easily, and Delegators can delegate GRT more frequently. Curators can add or remove signal to a larger number of Subgraphs–actions previously considered too cost-prohibitive to perform frequently due to gas. 去年,Graph社区在[GIP-0031](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) 讨论的结果之后,决定推进Arbitrum。 @@ -39,7 +39,7 @@ Once you have GRT on Arbitrum, you can add it to your billing balance. ![Dropdown switcher to toggle Arbitrum](/img/arbitrum-screenshot-toggle.png) -## 作为子图开发人员、数据消费者、索引人、策展人或授权者,我现在需要做什么? +## As a Subgraph developer, data consumer, Indexer, Curator, or Delegator, what do I need to do now? Network participants must move to Arbitrum to continue participating in The Graph Network. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) for additional support. @@ -51,9 +51,9 @@ All smart contracts have been thoroughly [audited](https://github.com/graphproto 所有事项已经经过了彻底测试,并制定了应急计划,以确保安全和无缝过渡。详细信息可以在 [这里] (https://forum.thegraph.com/t/gip-0037-the-graph-arbitrum-deployment-with-linear-rewards-minted-in-l2/3551#risks-and-security-considerations-20)找到。 -## Are existing subgraphs on Ethereum working? +## Are existing Subgraphs on Ethereum working? -All subgraphs are now on Arbitrum. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) to ensure your subgraphs operate seamlessly. +All Subgraphs are now on Arbitrum. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) to ensure your Subgraphs operate seamlessly. ## Does GRT have a new smart contract deployed on Arbitrum? From 2b723da4e8d0300fe286cbf4372c1d6a04031b95 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:13:12 -0500 Subject: [PATCH 0289/1789] New translations arbitrum-faq.mdx (Urdu (Pakistan)) --- website/src/pages/ur/archived/arbitrum/arbitrum-faq.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/ur/archived/arbitrum/arbitrum-faq.mdx b/website/src/pages/ur/archived/arbitrum/arbitrum-faq.mdx index c51d33e4e16c..1483cf6b2a4e 100644 --- a/website/src/pages/ur/archived/arbitrum/arbitrum-faq.mdx +++ b/website/src/pages/ur/archived/arbitrum/arbitrum-faq.mdx @@ -14,7 +14,7 @@ By scaling The Graph on L2, network participants can now benefit from: - سیکیورٹی ایتھیریم سے وراثت میں ملی -Scaling the protocol smart contracts onto L2 allows network participants to interact more frequently at a reduced cost in gas fees. For example, Indexers can open and close allocations more frequently to index a greater number of subgraphs. Developers can deploy and update subgraphs more easily, and Delegators can delegate GRT more frequently. Curators can add or remove signal to a larger number of subgraphs–actions previously considered too cost-prohibitive to perform frequently due to gas. +Scaling the protocol smart contracts onto L2 allows network participants to interact more frequently at a reduced cost in gas fees. For example, Indexers can open and close allocations more frequently to index a greater number of Subgraphs. Developers can deploy and update Subgraphs more easily, and Delegators can delegate GRT more frequently. Curators can add or remove signal to a larger number of Subgraphs–actions previously considered too cost-prohibitive to perform frequently due to gas. گراف کمیونٹی نے [GIP-0031](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) بحث کے نتائج کے بعد گزشتہ سال Arbitrum کے ساتھ آگے بڑھنے کا فیصلہ کیا۔ @@ -39,7 +39,7 @@ L2 پر گراف استعمال کرنے کا فائدہ اٹھانے کے لی ![Arbitrum کو ٹوگل کرنے کے لیے ڈراپ ڈاؤن سویچر](/img/arbitrum-screenshot-toggle.png) -## بطور سب گراف ڈویلپر، ڈیٹا کنزیومر، انڈیکسر، کیوریٹر، یا ڈیلیگیٹر، مجھے اب کیا کرنے کی ضرورت ہے؟ +## As a Subgraph developer, data consumer, Indexer, Curator, or Delegator, what do I need to do now? Network participants must move to Arbitrum to continue participating in The Graph Network. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) for additional support. @@ -51,9 +51,9 @@ All smart contracts have been thoroughly [audited](https://github.com/graphproto ہر چیز کی اچھی طرح جانچ کی گئی ہے، اور ایک محفوظ اور ہموار منتقلی کو یقینی بنانے کے لیے ایک ہنگامی منصوبہ تیار کیا گیا ہے۔ تفصیلات دیکھی جا سکتی ہیں [یہاں](https://forum.thegraph.com/t/gip-0037-the-graph-arbitrum-deployment-with-linear-rewards-minted-in-l2/3551#risks-and-security-considerations-20). -## Are existing subgraphs on Ethereum working? +## Are existing Subgraphs on Ethereum working? -All subgraphs are now on Arbitrum. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) to ensure your subgraphs operate seamlessly. +All Subgraphs are now on Arbitrum. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) to ensure your Subgraphs operate seamlessly. ## Does GRT have a new smart contract deployed on Arbitrum? From 5ccf36aa1570df08e04d57b11329c84722d6cf7e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:13:13 -0500 Subject: [PATCH 0290/1789] New translations arbitrum-faq.mdx (Vietnamese) --- website/src/pages/vi/archived/arbitrum/arbitrum-faq.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/vi/archived/arbitrum/arbitrum-faq.mdx b/website/src/pages/vi/archived/arbitrum/arbitrum-faq.mdx index 562824e64e95..d121f5a2d0f3 100644 --- a/website/src/pages/vi/archived/arbitrum/arbitrum-faq.mdx +++ b/website/src/pages/vi/archived/arbitrum/arbitrum-faq.mdx @@ -14,7 +14,7 @@ By scaling The Graph on L2, network participants can now benefit from: - Security inherited from Ethereum -Scaling the protocol smart contracts onto L2 allows network participants to interact more frequently at a reduced cost in gas fees. For example, Indexers can open and close allocations more frequently to index a greater number of subgraphs. Developers can deploy and update subgraphs more easily, and Delegators can delegate GRT more frequently. Curators can add or remove signal to a larger number of subgraphs–actions previously considered too cost-prohibitive to perform frequently due to gas. +Scaling the protocol smart contracts onto L2 allows network participants to interact more frequently at a reduced cost in gas fees. For example, Indexers can open and close allocations more frequently to index a greater number of Subgraphs. Developers can deploy and update Subgraphs more easily, and Delegators can delegate GRT more frequently. Curators can add or remove signal to a larger number of Subgraphs–actions previously considered too cost-prohibitive to perform frequently due to gas. The Graph community decided to move forward with Arbitrum last year after the outcome of the [GIP-0031](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) discussion. @@ -39,7 +39,7 @@ To take advantage of using The Graph on L2, use this dropdown switcher to toggle ![Dropdown switcher to toggle Arbitrum](/img/arbitrum-screenshot-toggle.png) -## As a subgraph developer, data consumer, Indexer, Curator, or Delegator, what do I need to do now? +## As a Subgraph developer, data consumer, Indexer, Curator, or Delegator, what do I need to do now? Network participants must move to Arbitrum to continue participating in The Graph Network. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) for additional support. @@ -51,9 +51,9 @@ All smart contracts have been thoroughly [audited](https://github.com/graphproto Everything has been tested thoroughly, and a contingency plan is in place to ensure a safe and seamless transition. Details can be found [here](https://forum.thegraph.com/t/gip-0037-the-graph-arbitrum-deployment-with-linear-rewards-minted-in-l2/3551#risks-and-security-considerations-20). -## Are existing subgraphs on Ethereum working? +## Are existing Subgraphs on Ethereum working? -All subgraphs are now on Arbitrum. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) to ensure your subgraphs operate seamlessly. +All Subgraphs are now on Arbitrum. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) to ensure your Subgraphs operate seamlessly. ## Does GRT have a new smart contract deployed on Arbitrum? From 65e5767f679917140908471f4872cd990f14e112 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:13:14 -0500 Subject: [PATCH 0291/1789] New translations arbitrum-faq.mdx (Marathi) --- website/src/pages/mr/archived/arbitrum/arbitrum-faq.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/mr/archived/arbitrum/arbitrum-faq.mdx b/website/src/pages/mr/archived/arbitrum/arbitrum-faq.mdx index 562824e64e95..d121f5a2d0f3 100644 --- a/website/src/pages/mr/archived/arbitrum/arbitrum-faq.mdx +++ b/website/src/pages/mr/archived/arbitrum/arbitrum-faq.mdx @@ -14,7 +14,7 @@ By scaling The Graph on L2, network participants can now benefit from: - Security inherited from Ethereum -Scaling the protocol smart contracts onto L2 allows network participants to interact more frequently at a reduced cost in gas fees. For example, Indexers can open and close allocations more frequently to index a greater number of subgraphs. Developers can deploy and update subgraphs more easily, and Delegators can delegate GRT more frequently. Curators can add or remove signal to a larger number of subgraphs–actions previously considered too cost-prohibitive to perform frequently due to gas. +Scaling the protocol smart contracts onto L2 allows network participants to interact more frequently at a reduced cost in gas fees. For example, Indexers can open and close allocations more frequently to index a greater number of Subgraphs. Developers can deploy and update Subgraphs more easily, and Delegators can delegate GRT more frequently. Curators can add or remove signal to a larger number of Subgraphs–actions previously considered too cost-prohibitive to perform frequently due to gas. The Graph community decided to move forward with Arbitrum last year after the outcome of the [GIP-0031](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) discussion. @@ -39,7 +39,7 @@ To take advantage of using The Graph on L2, use this dropdown switcher to toggle ![Dropdown switcher to toggle Arbitrum](/img/arbitrum-screenshot-toggle.png) -## As a subgraph developer, data consumer, Indexer, Curator, or Delegator, what do I need to do now? +## As a Subgraph developer, data consumer, Indexer, Curator, or Delegator, what do I need to do now? Network participants must move to Arbitrum to continue participating in The Graph Network. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) for additional support. @@ -51,9 +51,9 @@ All smart contracts have been thoroughly [audited](https://github.com/graphproto Everything has been tested thoroughly, and a contingency plan is in place to ensure a safe and seamless transition. Details can be found [here](https://forum.thegraph.com/t/gip-0037-the-graph-arbitrum-deployment-with-linear-rewards-minted-in-l2/3551#risks-and-security-considerations-20). -## Are existing subgraphs on Ethereum working? +## Are existing Subgraphs on Ethereum working? -All subgraphs are now on Arbitrum. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) to ensure your subgraphs operate seamlessly. +All Subgraphs are now on Arbitrum. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) to ensure your Subgraphs operate seamlessly. ## Does GRT have a new smart contract deployed on Arbitrum? From b6db5a844db2097f7261079a992fbcedd7b100db Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:13:15 -0500 Subject: [PATCH 0292/1789] New translations arbitrum-faq.mdx (Hindi) --- .../pages/hi/archived/arbitrum/arbitrum-faq.mdx | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/website/src/pages/hi/archived/arbitrum/arbitrum-faq.mdx b/website/src/pages/hi/archived/arbitrum/arbitrum-faq.mdx index 35afafb65cd3..e6ee94187b38 100644 --- a/website/src/pages/hi/archived/arbitrum/arbitrum-faq.mdx +++ b/website/src/pages/hi/archived/arbitrum/arbitrum-faq.mdx @@ -6,7 +6,7 @@ Click [here](#billing-on-arbitrum-faqs) if you would like to skip to the Arbitru ## The Graph ने L2 समाधान को लागू करने का कारण क्या था? -L2 पर The Graph को स्केल करके, नेटवर्क के प्रतिभागी अब निम्नलिखित लाभ उठा सकते हैं: + L2 पर The Graph को स्केल करके, नेटवर्क के प्रतिभागी अब निम्नलिखित लाभ उठा सकते हैं: - Upwards of 26x savings on gas fees @@ -14,17 +14,17 @@ L2 पर The Graph को स्केल करके, नेटवर्क - Security inherited from Ethereum -L2 पर प्रोटोकॉल स्मार्ट कॉन्ट्रैक्ट्स को स्केल करने से नेटवर्क के प्रतिभागियों को गैस शुल्क में कमी के साथ अधिक बार इंटरैक्ट करने की अनुमति मिलती है। उदाहरण के लिए, Indexer अधिक बार आवंटन खोल और बंद कर सकते हैं ताकि अधिक सबग्राफ़ को इंडेक्स किया जा सके। डेवलपर्स सबग्राफ़ को अधिक आसानी से तैनात और अपडेट कर सकते हैं, और डेलीगेटर्स अधिक बार GRT को डेलीगेट कर सकते हैं। क्यूरेटर अधिक सबग्राफ़ में सिग्नल जोड़ या हटा सकते हैं—ऐसे कार्य जो पहले गैस की उच्च लागत के कारण अक्सर करना बहुत महंगा माना जाता था। +Scaling the protocol smart contracts onto L2 allows network participants to interact more frequently at a reduced cost in gas fees. For example, Indexers can open and close allocations more frequently to index a greater number of Subgraphs. Developers can deploy and update Subgraphs more easily, and Delegators can delegate GRT more frequently. Curators can add or remove signal to a larger number of Subgraphs–actions previously considered too cost-prohibitive to perform frequently due to gas. The Graph community decided to move forward with Arbitrum last year after the outcome of the [GIP-0031](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) discussion. ## What do I need to do to use The Graph on L2? -The Graph का बिलिंग सिस्टम Arbitrum पर GRT को स्वीकार करता है, और उपयोगकर्ताओं को गैस के भुगतान के लिए Arbitrum पर ETH की आवश्यकता होगी। जबकि The Graph प्रोटोकॉल Ethereum Mainnet पर शुरू हुआ, सभी गतिविधियाँ, जिसमें बिलिंग कॉन्ट्रैक्ट्स भी शामिल हैं, अब Arbitrum One पर हैं। +The Graph का बिलिंग सिस्टम Arbitrum पर GRT को स्वीकार करता है, और उपयोगकर्ताओं को गैस के भुगतान के लिए Arbitrum पर ETH की आवश्यकता होगी। जबकि The Graph प्रोटोकॉल Ethereum Mainnet पर शुरू हुआ, सभी गतिविधियाँ, जिसमें बिलिंग कॉन्ट्रैक्ट्स भी शामिल हैं, अब Arbitrum One पर हैं। अत: क्वेरीज़ के लिए भुगतान करने के लिए, आपको Arbitrum पर GRT की आवश्यकता है। इसे प्राप्त करने के कुछ विभिन्न तरीके यहाँ दिए गए हैं: -- यदि आपके पास पहले से Ethereum पर GRT है, तो आप इसे Arbitrum पर ब्रिज कर सकते हैं। आप यह Subgraph Studio में प्रदान किए गए GRT ब्रिजिंग विकल्प के माध्यम से या निम्नलिखित में से किसी एक ब्रिज का उपयोग करके कर सकते हैं: +- यदि आपके पास पहले से Ethereum पर GRT है, तो आप इसे Arbitrum पर ब्रिज कर सकते हैं। आप यह Subgraph Studio में प्रदान किए गए GRT ब्रिजिंग विकल्प के माध्यम से या निम्नलिखित में से किसी एक ब्रिज का उपयोग करके कर सकते हैं: - [The Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161) - [TransferTo](https://transferto.xyz/swap) @@ -39,7 +39,7 @@ To take advantage of using The Graph on L2, use this dropdown switcher to toggle ![Dropdown switcher to toggle Arbitrum](/img/arbitrum-screenshot-toggle.png) -## Subgraph developer, data consumer, Indexer, Curator, or Delegator, के रूप में, मुझे अब क्या करने की आवश्यकता है? +## As a Subgraph developer, data consumer, Indexer, Curator, or Delegator, what do I need to do now? The Graph Network में भाग लेने के लिए नेटवर्क प्रतिभागियों को Arbitrum पर स्थानांतरित होना आवश्यक है। अतिरिक्त सहायता के लिए कृपया [L2 Transfer Tool मार्गदर्शक](/archived/arbitrum/l2-transfer-tools-guide/) देखें। @@ -47,13 +47,14 @@ The Graph Network में भाग लेने के लिए नेटव ## क्या नेटवर्क को L2 पर स्केल करने से संबंधित कोई जोखिम थे? -सभी स्मार्ट कॉन्ट्रैक्ट्स का पूरी तरह से परीक्षित किया गया है। (https://github.com/graphprotocol/contracts/blob/main/packages/contracts/audits/OpenZeppelin/2022-07-graph-arbitrum-bridge-audit.pdf). +सभी स्मार्ट कॉन्ट्रैक्ट्स का पूरी तरह से परीक्षित किया गया है। +(https://github.com/graphprotocol/contracts/blob/main/packages/contracts/audits/OpenZeppelin/2022-07-graph-arbitrum-bridge-audit.pdf). हर चीज़ का पूरी तरह से परीक्षण किया गया है, और एक सुरक्षित और निर्बाध संक्रमण सुनिश्चित करने के लिए एक आकस्मिक योजना बनाई गई है। विवरण यहां पाया जा सकता है [here] (https://forum.thegraph.com/t/gip-0037-the-graph-arbitrum-deployment-with-linear-rewards-minted-in-l2/3551#risks-and- सुरक्षा-विचार-20). -## क्या Ethereum पर मौजूद सबग्राफ़ काम कर रहे हैं? +## Are existing Subgraphs on Ethereum working? -सभी सबग्राफ अब Arbitrum पर हैं। कृपया [ L2 Transfer Tool मार्गदर्शक](/archived/arbitrum/l2-transfer-tools-guide/) का संदर्भ लें ताकि आपके सबग्राफ बिना किसी समस्या के कार्य करें। +All Subgraphs are now on Arbitrum. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) to ensure your Subgraphs operate seamlessly. ## क्या GRT का एक नया स्मार्ट कॉन्ट्रैक्ट Arbitrum पर तैनात किया गया है? From f82a3481ec0c37cde04934436ba3190d6e7d151b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:13:16 -0500 Subject: [PATCH 0293/1789] New translations arbitrum-faq.mdx (Swahili) --- .../sw/archived/arbitrum/arbitrum-faq.mdx | 80 +++++++++++++++++++ 1 file changed, 80 insertions(+) create mode 100644 website/src/pages/sw/archived/arbitrum/arbitrum-faq.mdx diff --git a/website/src/pages/sw/archived/arbitrum/arbitrum-faq.mdx b/website/src/pages/sw/archived/arbitrum/arbitrum-faq.mdx new file mode 100644 index 000000000000..d121f5a2d0f3 --- /dev/null +++ b/website/src/pages/sw/archived/arbitrum/arbitrum-faq.mdx @@ -0,0 +1,80 @@ +--- +title: Arbitrum FAQ +--- + +Click [here](#billing-on-arbitrum-faqs) if you would like to skip to the Arbitrum Billing FAQs. + +## Why did The Graph implement an L2 Solution? + +By scaling The Graph on L2, network participants can now benefit from: + +- Upwards of 26x savings on gas fees + +- Faster transaction speed + +- Security inherited from Ethereum + +Scaling the protocol smart contracts onto L2 allows network participants to interact more frequently at a reduced cost in gas fees. For example, Indexers can open and close allocations more frequently to index a greater number of Subgraphs. Developers can deploy and update Subgraphs more easily, and Delegators can delegate GRT more frequently. Curators can add or remove signal to a larger number of Subgraphs–actions previously considered too cost-prohibitive to perform frequently due to gas. + +The Graph community decided to move forward with Arbitrum last year after the outcome of the [GIP-0031](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) discussion. + +## What do I need to do to use The Graph on L2? + +The Graph’s billing system accepts GRT on Arbitrum, and users will need ETH on Arbitrum to pay their gas. While The Graph protocol started on Ethereum Mainnet, all activity, including the billing contracts, is now on Arbitrum One. + +Consequently, to pay for queries, you need GRT on Arbitrum. Here are a few different ways to achieve this: + +- If you already have GRT on Ethereum, you can bridge it to Arbitrum. You can do this via the GRT bridging option provided in Subgraph Studio or by using one of the following bridges: + + - [The Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161) + - [TransferTo](https://transferto.xyz/swap) + +- If you have other assets on Arbitrum, you can swap them for GRT through a swapping protocol like Uniswap. + +- Alternatively, you can acquire GRT directly on Arbitrum through a decentralized exchange. + +Once you have GRT on Arbitrum, you can add it to your billing balance. + +To take advantage of using The Graph on L2, use this dropdown switcher to toggle between chains. + +![Dropdown switcher to toggle Arbitrum](/img/arbitrum-screenshot-toggle.png) + +## As a Subgraph developer, data consumer, Indexer, Curator, or Delegator, what do I need to do now? + +Network participants must move to Arbitrum to continue participating in The Graph Network. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) for additional support. + +All indexing rewards are now entirely on Arbitrum. + +## Were there any risks associated with scaling the network to L2? + +All smart contracts have been thoroughly [audited](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/audits/OpenZeppelin/2022-07-graph-arbitrum-bridge-audit.pdf). + +Everything has been tested thoroughly, and a contingency plan is in place to ensure a safe and seamless transition. Details can be found [here](https://forum.thegraph.com/t/gip-0037-the-graph-arbitrum-deployment-with-linear-rewards-minted-in-l2/3551#risks-and-security-considerations-20). + +## Are existing Subgraphs on Ethereum working? + +All Subgraphs are now on Arbitrum. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) to ensure your Subgraphs operate seamlessly. + +## Does GRT have a new smart contract deployed on Arbitrum? + +Yes, GRT has an additional [smart contract on Arbitrum](https://arbiscan.io/address/0x9623063377ad1b27544c965ccd7342f7ea7e88c7). However, the Ethereum mainnet [GRT contract](https://etherscan.io/token/0xc944e90c64b2c07662a292be6244bdf05cda44a7) will remain operational. + +## Billing on Arbitrum FAQs + +## What do I need to do about the GRT in my billing balance? + +Nothing! Your GRT has been securely migrated to Arbitrum and is being used to pay for queries as you read this. + +## How do I know my funds have migrated securely to Arbitrum? + +All GRT billing balances have already been successfully migrated to Arbitrum. You can view the billing contract on Arbitrum [here](https://arbiscan.io/address/0x1B07D3344188908Fb6DEcEac381f3eE63C48477a). + +## How do I know the Arbitrum bridge is secure? + +The bridge has been [heavily audited](https://code4rena.com/contests/2022-10-the-graph-l2-bridge-contest) to ensure safety and security for all users. + +## What do I need to do if I'm adding fresh GRT from my Ethereum mainnet wallet? + +Adding GRT to your Arbitrum billing balance can be done with a one-click experience in [Subgraph Studio](https://thegraph.com/studio/). You'll be able to easily bridge your GRT to Arbitrum and fill your API keys in one transaction. + +Visit the [Billing page](/subgraphs/billing/) for more detailed instructions on adding, withdrawing, or acquiring GRT. From 28213cea9dea6b906e06b76cbe65e710a3356790 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:13:17 -0500 Subject: [PATCH 0294/1789] New translations l2-transfer-tools-faq.mdx (Romanian) --- .../arbitrum/l2-transfer-tools-faq.mdx | 54 +++++++++---------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/website/src/pages/ro/archived/arbitrum/l2-transfer-tools-faq.mdx b/website/src/pages/ro/archived/arbitrum/l2-transfer-tools-faq.mdx index 612b61fd0515..7edde3d0cbcd 100644 --- a/website/src/pages/ro/archived/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/src/pages/ro/archived/arbitrum/l2-transfer-tools-faq.mdx @@ -24,9 +24,9 @@ The exception is with smart contract wallets like multisigs: these are smart con The L2 Transfer Tools use Arbitrum’s native mechanism to send messages from L1 to L2. This mechanism is called a “retryable ticket” and is used by all native token bridges, including the Arbitrum GRT bridge. You can read more about retryable tickets in the [Arbitrum docs](https://docs.arbitrum.io/arbos/l1-to-l2-messaging). -When you transfer your assets (subgraph, stake, delegation or curation) to L2, a message is sent through the Arbitrum GRT bridge which creates a retryable ticket in L2. The transfer tool includes some ETH value in the transaction, that is used to 1) pay to create the ticket and 2) pay for the gas to execute the ticket in L2. However, because gas prices might vary in the time until the ticket is ready to execute in L2, it is possible that this auto-execution attempt fails. When that happens, the Arbitrum bridge will keep the retryable ticket alive for up to 7 days, and anyone can retry “redeeming” the ticket (which requires a wallet with some ETH bridged to Arbitrum). +When you transfer your assets (Subgraph, stake, delegation or curation) to L2, a message is sent through the Arbitrum GRT bridge which creates a retryable ticket in L2. The transfer tool includes some ETH value in the transaction, that is used to 1) pay to create the ticket and 2) pay for the gas to execute the ticket in L2. However, because gas prices might vary in the time until the ticket is ready to execute in L2, it is possible that this auto-execution attempt fails. When that happens, the Arbitrum bridge will keep the retryable ticket alive for up to 7 days, and anyone can retry “redeeming” the ticket (which requires a wallet with some ETH bridged to Arbitrum). -This is what we call the “Confirm” step in all the transfer tools - it will run automatically in most cases, as the auto-execution is most often successful, but it is important that you check back to make sure it went through. If it doesn’t succeed and there are no successful retries in 7 days, the Arbitrum bridge will discard the ticket, and your assets (subgraph, stake, delegation or curation) will be lost and can’t be recovered. The Graph core devs have a monitoring system in place to detect these situations and try to redeem the tickets before it’s too late, but it is ultimately your responsibility to ensure your transfer is completed in time. If you’re having trouble confirming your transaction, please reach out using [this form](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) and core devs will be there help you. +This is what we call the “Confirm” step in all the transfer tools - it will run automatically in most cases, as the auto-execution is most often successful, but it is important that you check back to make sure it went through. If it doesn’t succeed and there are no successful retries in 7 days, the Arbitrum bridge will discard the ticket, and your assets (Subgraph, stake, delegation or curation) will be lost and can’t be recovered. The Graph core devs have a monitoring system in place to detect these situations and try to redeem the tickets before it’s too late, but it is ultimately your responsibility to ensure your transfer is completed in time. If you’re having trouble confirming your transaction, please reach out using [this form](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) and core devs will be there help you. ### I started my delegation/stake/curation transfer and I'm not sure if it made it through to L2, how can I confirm that it was transferred correctly? @@ -36,43 +36,43 @@ If you have the L1 transaction hash (which you can find by looking at the recent ## Subgraph Transfer -### How do I transfer my subgraph? +### How do I transfer my Subgraph? -To transfer your subgraph, you will need to complete the following steps: +To transfer your Subgraph, you will need to complete the following steps: 1. Initiate the transfer on Ethereum mainnet 2. Wait 20 minutes for confirmation -3. Confirm subgraph transfer on Arbitrum\* +3. Confirm Subgraph transfer on Arbitrum\* -4. Finish publishing subgraph on Arbitrum +4. Finish publishing Subgraph on Arbitrum 5. Update Query URL (recommended) -\*Note that you must confirm the transfer within 7 days otherwise your subgraph may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). +\*Note that you must confirm the transfer within 7 days otherwise your Subgraph may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). ### Where should I initiate my transfer from? -You can initiate your transfer from the [Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) or any subgraph details page. Click the "Transfer Subgraph" button in the subgraph details page to start the transfer. +You can initiate your transfer from the [Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) or any Subgraph details page. Click the "Transfer Subgraph" button in the Subgraph details page to start the transfer. -### How long do I need to wait until my subgraph is transferred +### How long do I need to wait until my Subgraph is transferred The transfer time takes approximately 20 minutes. The Arbitrum bridge is working in the background to complete the bridge transfer automatically. In some cases, gas costs may spike and you will need to confirm the transaction again. -### Will my subgraph still be discoverable after I transfer it to L2? +### Will my Subgraph still be discoverable after I transfer it to L2? -Your subgraph will only be discoverable on the network it is published to. For example, if your subgraph is on Arbitrum One, then you can only find it in Explorer on Arbitrum One and will not be able to find it on Ethereum. Please ensure that you have Arbitrum One selected in the network switcher at the top of the page to ensure you are on the correct network.  After the transfer, the L1 subgraph will appear as deprecated. +Your Subgraph will only be discoverable on the network it is published to. For example, if your Subgraph is on Arbitrum One, then you can only find it in Explorer on Arbitrum One and will not be able to find it on Ethereum. Please ensure that you have Arbitrum One selected in the network switcher at the top of the page to ensure you are on the correct network.  After the transfer, the L1 Subgraph will appear as deprecated. -### Does my subgraph need to be published to transfer it? +### Does my Subgraph need to be published to transfer it? -To take advantage of the subgraph transfer tool, your subgraph must be already published to Ethereum mainnet and must have some curation signal owned by the wallet that owns the subgraph. If your subgraph is not published, it is recommended you simply publish directly on Arbitrum One - the associated gas fees will be considerably lower. If you want to transfer a published subgraph but the owner account hasn't curated any signal on it, you can signal a small amount (e.g. 1 GRT) from that account; make sure to choose "auto-migrating" signal. +To take advantage of the Subgraph transfer tool, your Subgraph must be already published to Ethereum mainnet and must have some curation signal owned by the wallet that owns the Subgraph. If your Subgraph is not published, it is recommended you simply publish directly on Arbitrum One - the associated gas fees will be considerably lower. If you want to transfer a published Subgraph but the owner account hasn't curated any signal on it, you can signal a small amount (e.g. 1 GRT) from that account; make sure to choose "auto-migrating" signal. -### What happens to the Ethereum mainnet version of my subgraph after I transfer to Arbitrum? +### What happens to the Ethereum mainnet version of my Subgraph after I transfer to Arbitrum? -After transferring your subgraph to Arbitrum, the Ethereum mainnet version will be deprecated. We recommend you update your query URL within 48 hours. However, there is a grace period in place that keeps your mainnet URL functioning so that any third-party dapp support can be updated. +After transferring your Subgraph to Arbitrum, the Ethereum mainnet version will be deprecated. We recommend you update your query URL within 48 hours. However, there is a grace period in place that keeps your mainnet URL functioning so that any third-party dapp support can be updated. ### After I transfer, do I also need to re-publish on Arbitrum? @@ -80,21 +80,21 @@ After the 20 minute transfer window, you will need to confirm the transfer with ### Will my endpoint experience downtime while re-publishing? -It is unlikely, but possible to experience a brief downtime depending on which Indexers are supporting the subgraph on L1 and whether they keep indexing it until the subgraph is fully supported on L2. +It is unlikely, but possible to experience a brief downtime depending on which Indexers are supporting the Subgraph on L1 and whether they keep indexing it until the Subgraph is fully supported on L2. ### Is publishing and versioning the same on L2 as Ethereum Ethereum mainnet? -Yes. Select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the subgraph. +Yes. Select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the Subgraph. -### Will my subgraph's curation move with my subgraph? +### Will my Subgraph's curation move with my Subgraph? -If you've chosen auto-migrating signal, 100% of your own curation will move with your subgraph to Arbitrum One. All of the subgraph's curation signal will be converted to GRT at the time of the transfer, and the GRT corresponding to your curation signal will be used to mint signal on the L2 subgraph. +If you've chosen auto-migrating signal, 100% of your own curation will move with your Subgraph to Arbitrum One. All of the Subgraph's curation signal will be converted to GRT at the time of the transfer, and the GRT corresponding to your curation signal will be used to mint signal on the L2 Subgraph. -Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same subgraph. +Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same Subgraph. -### Can I move my subgraph back to Ethereum mainnet after I transfer? +### Can I move my Subgraph back to Ethereum mainnet after I transfer? -Once transferred, your Ethereum mainnet version of this subgraph will be deprecated. If you would like to move back to mainnet, you will need to redeploy and publish back to mainnet. However, transferring back to Ethereum mainnet is strongly discouraged as indexing rewards will eventually be distributed entirely on Arbitrum One. +Once transferred, your Ethereum mainnet version of this Subgraph will be deprecated. If you would like to move back to mainnet, you will need to redeploy and publish back to mainnet. However, transferring back to Ethereum mainnet is strongly discouraged as indexing rewards will eventually be distributed entirely on Arbitrum One. ### Why do I need bridged ETH to complete my transfer? @@ -206,19 +206,19 @@ To transfer your curation, you will need to complete the following steps: \*If necessary - i.e. you are using a contract address. -### How will I know if the subgraph I curated has moved to L2? +### How will I know if the Subgraph I curated has moved to L2? -When viewing the subgraph details page, a banner will notify you that this subgraph has been transferred. You can follow the prompt to transfer your curation. You can also find this information on the subgraph details page of any subgraph that has moved. +When viewing the Subgraph details page, a banner will notify you that this Subgraph has been transferred. You can follow the prompt to transfer your curation. You can also find this information on the Subgraph details page of any Subgraph that has moved. ### What if I do not wish to move my curation to L2? -When a subgraph is deprecated you have the option to withdraw your signal. Similarly, if a subgraph has moved to L2, you can choose to withdraw your signal in Ethereum mainnet or send the signal to L2. +When a Subgraph is deprecated you have the option to withdraw your signal. Similarly, if a Subgraph has moved to L2, you can choose to withdraw your signal in Ethereum mainnet or send the signal to L2. ### How do I know my curation successfully transferred? Signal details will be accessible via Explorer approximately 20 minutes after the L2 transfer tool is initiated. -### Can I transfer my curation on more than one subgraph at a time? +### Can I transfer my curation on more than one Subgraph at a time? There is no bulk transfer option at this time. @@ -266,7 +266,7 @@ It will take approximately 20 minutes for the L2 transfer tool to complete trans ### Do I have to index on Arbitrum before I transfer my stake? -You can effectively transfer your stake first before setting up indexing, but you will not be able to claim any rewards on L2 until you allocate to subgraphs on L2, index them, and present POIs. +You can effectively transfer your stake first before setting up indexing, but you will not be able to claim any rewards on L2 until you allocate to Subgraphs on L2, index them, and present POIs. ### Can Delegators move their delegation before I move my indexing stake? From 1ddaf351fa0af6846cb910c12e4bb50bf8f72f31 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:13:18 -0500 Subject: [PATCH 0295/1789] New translations l2-transfer-tools-faq.mdx (French) --- .../arbitrum/l2-transfer-tools-faq.mdx | 60 ++++++++++--------- 1 file changed, 31 insertions(+), 29 deletions(-) diff --git a/website/src/pages/fr/archived/arbitrum/l2-transfer-tools-faq.mdx b/website/src/pages/fr/archived/arbitrum/l2-transfer-tools-faq.mdx index d4edd391bed6..b445b410ec55 100644 --- a/website/src/pages/fr/archived/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/src/pages/fr/archived/arbitrum/l2-transfer-tools-faq.mdx @@ -24,9 +24,9 @@ Une exception concerne les portefeuilles de smart contracts comme les multisigs Les outils de transfert L2 utilisent le mécanisme natif d’Arbitrum pour envoyer des messages de L1 à L2. Ce mécanisme s’appelle un « billet modifiable » et est utilisé par tous les ponts de jetons natifs, y compris le pont GRT Arbitrum. Vous pouvez en savoir plus sur les billets retryables dans le [Arbitrum docs](https://docs.arbitrum.io/arbos/l1-to-l2-messaging). -Lorsque vous transférez vos actifs (subgraph, enjeu, délégation ou curation) vers L2, un message est envoyé par le pont GRT Arbitrum qui crée un ticket modifiable en L2. L’outil de transfert inclut une certaine valeur ETH dans la transaction, qui est utilisée pour 1) payer la création du ticket et 2) payer pour le gaz utile à l'exécution du ticket en L2. Cependant, comme le prix du gaz peut varier durant le temps nécessaire à l'exécution du ticket en L2, il est possible que cette tentative d’exécution automatique échoue. Lorsque cela se produit, le pont Arbitrum maintient le billet remboursable en vie pendant 7 jours, et tout le monde peut réessayer de « racheter » le billet (ce qui nécessite un portefeuille avec des ETH liés à Arbitrum). +When you transfer your assets (Subgraph, stake, delegation or curation) to L2, a message is sent through the Arbitrum GRT bridge which creates a retryable ticket in L2. The transfer tool includes some ETH value in the transaction, that is used to 1) pay to create the ticket and 2) pay for the gas to execute the ticket in L2. However, because gas prices might vary in the time until the ticket is ready to execute in L2, it is possible that this auto-execution attempt fails. When that happens, the Arbitrum bridge will keep the retryable ticket alive for up to 7 days, and anyone can retry “redeeming” the ticket (which requires a wallet with some ETH bridged to Arbitrum). -C'est ce que nous appelons l'étape « Confirmer » dans tous les outils de transfert : elle s'exécute automatiquement dans la plupart des cas et l'exécution automatique réussit le plus souvent. Il est tout de même important de vérifier que le transfert se soit bien déroulé. Si cela échoue et qu'aucune autre tentative n'est confirmé dans les 7 jours, le pont Arbitrum rejettera le ticket et vos actifs (subgraph, participation, délégation ou curation) ne pourront pas être récupérés. Les développeurs principaux de Graph ont mis en place un système de surveillance pour détecter ces situations et essayer d'échanger les billets avant qu'il ne soit trop tard, mais il en reste de votre responsabilité de vous assurer que votre transfert est terminé à temps. Si vous rencontrez des difficultés pour confirmer votre transaction, veuillez nous contacter en utilisant [ce formulaire](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) et les développeurs seront là pour vous aider. +This is what we call the “Confirm” step in all the transfer tools - it will run automatically in most cases, as the auto-execution is most often successful, but it is important that you check back to make sure it went through. If it doesn’t succeed and there are no successful retries in 7 days, the Arbitrum bridge will discard the ticket, and your assets (Subgraph, stake, delegation or curation) will be lost and can’t be recovered. The Graph core devs have a monitoring system in place to detect these situations and try to redeem the tickets before it’s too late, but it is ultimately your responsibility to ensure your transfer is completed in time. If you’re having trouble confirming your transaction, please reach out using [this form](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) and core devs will be there help you. ### J'ai commencé le transfert de ma délégation/enjeu/curation et je ne suis pas sûr qu'il soit parvenu jusqu'à L2, comment puis-je confirmer qu'il a été transféré correctement ? @@ -36,43 +36,43 @@ Si vous disposez du hachage de transaction L1 (que vous pouvez trouver en consul ## Subgraph transfert -### Comment transférer mon subgraph ? +### How do I transfer my Subgraph? -Pour transférer votre subgraph, suivez les étapes qui suivent : +To transfer your Subgraph, you will need to complete the following steps: 1. Initier le transfert sur le mainnet Ethereum 2. Attendre 20 minutes pour une confirmation -3. Vérifier le transfert de subgraph sur Arbitrum\* +3. Confirm Subgraph transfer on Arbitrum\* -4. Terminer la publication du sous-graphe sur Arbitrum +4. Finish publishing Subgraph on Arbitrum 5. Mettre à jour l’URL de requête (recommandé) -\*Notez que vous devez confirmer le transfert dans un délai de 7 jours, faute de quoi votre subgraph pourrait être perdu. Dans la plupart des cas, cette étape s'exécutera automatiquement, mais une confirmation manuelle peut être nécessaire en cas de hausse du prix du gaz sur Arbitrum. En cas de problème au cours de ce processus, des ressources seront disponibles pour vous aider : contactez le service d'assistance à l'adresse support@thegraph.com ou sur [Discord](https://discord.gg/graphprotocol). +\*Note that you must confirm the transfer within 7 days otherwise your Subgraph may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). ### D’où dois-je initier mon transfert ? -Vous pouvez effectuer votre transfert à partir de la [Subgraph Studio] (https://thegraph.com/studio/), [Explorer,] (https://thegraph.com/explorer) ou de n’importe quelle page de détails de subgraph. Cliquez sur le bouton "Transférer le subgraph" dans la page de détails du subgraph pour démarrer le transfert. +You can initiate your transfer from the [Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) or any Subgraph details page. Click the "Transfer Subgraph" button in the Subgraph details page to start the transfer. -### Combien de temps dois-je attendre avant que mon subgraph soit transféré ? +### How long do I need to wait until my Subgraph is transferred Le temps de transfert prend environ 20 minutes. Le pont Arbitrum fonctionne en arrière-plan pour terminer automatiquement le transfert du pont. Dans certains cas, les coûts du gaz peuvent augmenter et vous devrez confirmer à nouveau la transaction. -### Mon subgraph sera-t-il toujours repérable après le transfert à L2? +### Will my Subgraph still be discoverable after I transfer it to L2? -Votre subgraph ne sera détectable que sur le réseau sur lequel il est publié. Par exemple, si votre subgraph est sur Arbitrum One, vous ne pouvez le trouver que dans Explorer sur Arbitrum One et vous ne pourrez pas le trouver sur Ethereum. Assurez-vous que vous avez Arbitrum One sélectionné dans le commutateur de réseau en haut de la page pour vous assurer que vous êtes sur le bon réseau.  Après le transfert, le subgraph L1 apparaîtra comme obsolète. +Your Subgraph will only be discoverable on the network it is published to. For example, if your Subgraph is on Arbitrum One, then you can only find it in Explorer on Arbitrum One and will not be able to find it on Ethereum. Please ensure that you have Arbitrum One selected in the network switcher at the top of the page to ensure you are on the correct network.  After the transfer, the L1 Subgraph will appear as deprecated. -### Mon subgraph doit-il être publié afin d'être transférer ? +### Does my Subgraph need to be published to transfer it? -Pour profiter de l’outil de transfert de subgraph, votre subgraph doit déjà être publié sur Ethereum mainnet et doit avoir un signal de curation appartenant au portefeuille qui possède le subgraph. Si votre subgraph n’est pas publié, il est recommandé de publier simplement directement sur Arbitrum One - les frais de gaz associés seront considérablement moins élevés. Si vous souhaitez transférer un subgraph publié mais que le compte propriétaire n’a pas sélectionné de signal, vous pouvez signaler un petit montant (par ex. 1 GRT) à partir de ce compte; assurez-vous de choisir le signal de “migration automatique”. +To take advantage of the Subgraph transfer tool, your Subgraph must be already published to Ethereum mainnet and must have some curation signal owned by the wallet that owns the Subgraph. If your Subgraph is not published, it is recommended you simply publish directly on Arbitrum One - the associated gas fees will be considerably lower. If you want to transfer a published Subgraph but the owner account hasn't curated any signal on it, you can signal a small amount (e.g. 1 GRT) from that account; make sure to choose "auto-migrating" signal. -### Que se passe-t-il pour la version Ethereum mainnet de mon subgraph après que j'ai transféré sur Arbitrum ? +### What happens to the Ethereum mainnet version of my Subgraph after I transfer to Arbitrum? -Après avoir transféré votre subgraph vers Arbitrum, la version du réseau principal Ethereum deviendra obsolète. Nous vous recommandons de mettre à jour votre URL de requête dans les 48 heures. Cependant, il existe une période de grâce qui maintient le fonctionnement de votre URL mainnet afin que tout support dapp tiers puisse être mis à jour. +After transferring your Subgraph to Arbitrum, the Ethereum mainnet version will be deprecated. We recommend you update your query URL within 48 hours. However, there is a grace period in place that keeps your mainnet URL functioning so that any third-party dapp support can be updated. ### Après le transfert, dois-je également republier sur Arbitrum ? @@ -80,21 +80,21 @@ Après la fenêtre de transfert de 20 minutes, vous devrez confirmer le transfer ### Mon point de terminaison subira-t-il un temps d'arrêt lors de la republication ? -Il est peu probable, mais possible, de subir un bref temps d'arrêt selon les indexeurs qui prennent en charge le subgraph sur L1 et s'ils continuent à l'indexer jusqu'à ce que le subgraph soit entièrement pris en charge sur L2. +It is unlikely, but possible to experience a brief downtime depending on which Indexers are supporting the Subgraph on L1 and whether they keep indexing it until the Subgraph is fully supported on L2. ### La publication et la gestion des versions sont-elles les mêmes sur L2 que sur le mainnet Ethereum Ethereum ? -Oui. Sélectionnez Arbitrum One comme réseau publié lors de la publication dans le Subgraph Studio. Dans le Studio, le dernier point de terminaison sera disponible et vous dirigera vers la dernière version mise à jour du subgraph. +Yes. Select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the Subgraph. -### La curation de mon subgraph sera-t-elle déplacée avec mon subgraph? +### Will my Subgraph's curation move with my Subgraph? -Si vous avez choisi le signal de migration automatique, 100% de votre propre curation se déplacera avec votre subgraph vers Arbitrum One. Tout le signal de curation du subgraph sera converti en GTR au moment du transfert, et le GRT correspondant à votre signal de curation sera utilisé pour frapper le signal sur le subgraph L2. +If you've chosen auto-migrating signal, 100% of your own curation will move with your Subgraph to Arbitrum One. All of the Subgraph's curation signal will be converted to GRT at the time of the transfer, and the GRT corresponding to your curation signal will be used to mint signal on the L2 Subgraph. -D’autres conservateurs peuvent choisir de retirer leur fraction de GRT ou de la transférer à L2 pour créer un signal neuf sur le même subgraph. +Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same Subgraph. -### Puis-je déplacer mon subgraph vers le mainnet Ethereum après le transfert? +### Can I move my Subgraph back to Ethereum mainnet after I transfer? -Une fois transféré, votre version mainnet Ethereum de ce subgraph deviendra obsolète. Si vous souhaitez revenir au mainnet, vous devrez redéployer et publier à nouveau sur le mainnet. Cependant, le transfert vers le mainnet Ethereumt est fortement déconseillé car les récompenses d’indexation seront distribuées entièrement sur Arbitrum One. +Once transferred, your Ethereum mainnet version of this Subgraph will be deprecated. If you would like to move back to mainnet, you will need to redeploy and publish back to mainnet. However, transferring back to Ethereum mainnet is strongly discouraged as indexing rewards will eventually be distributed entirely on Arbitrum One. ### Pourquoi ai-je besoin d’un pont ETH pour finaliser mon transfert ? @@ -206,19 +206,19 @@ Pour transférer votre curation, vous devrez compléter les étapes suivantes : \*Si nécessaire, c'est-à-dire que vous utilisez une adresse contractuelle. -### Comment saurai-je si le subgraph que j'ai organisé a été déplacé vers L2 ? +### How will I know if the Subgraph I curated has moved to L2? -Lors de la visualisation de la page de détails du subgraph, une bannière vous informera que ce subgraph a été transféré. Vous pouvez suivre l'invite pour transférer votre curation. Vous pouvez également trouver ces informations sur la page de détails du subgraph de tout subgraph déplacé. +When viewing the Subgraph details page, a banner will notify you that this Subgraph has been transferred. You can follow the prompt to transfer your curation. You can also find this information on the Subgraph details page of any Subgraph that has moved. ### Que se passe-t-il si je ne souhaite pas déplacer ma curation en L2 ? -Lorsqu’un subgraph est déprécié, vous avez la possibilité de retirer votre signal. De même, si un subgraph est passé à L2, vous pouvez choisir de retirer votre signal dans Ethereum mainnet ou d’envoyer le signal à L2. +When a Subgraph is deprecated you have the option to withdraw your signal. Similarly, if a Subgraph has moved to L2, you can choose to withdraw your signal in Ethereum mainnet or send the signal to L2. ### Comment puis-je savoir si ma curation a été transférée avec succès? Les détails du signal seront accessibles via Explorer environ 20 minutes après le lancement de l'outil de transfert L2. -### Puis-je transférer ma curation sur plus d’un subgraph à la fois? +### Can I transfer my curation on more than one Subgraph at a time? Il n’existe actuellement aucune option de transfert groupé. @@ -266,7 +266,7 @@ Il faudra environ 20 minutes à l'outil de transfert L2 pour achever le transfer ### Dois-je indexer sur Arbitrum avant de transférer ma mise ? -Vous pouvez effectivement transférer votre mise d’abord avant de mettre en place l’indexation, mais vous ne serez pas en mesure de réclamer des récompenses sur L2 jusqu’à ce que vous allouez à des sous-graphes sur L2, les indexer, et présenter des points d’intérêt. +You can effectively transfer your stake first before setting up indexing, but you will not be able to claim any rewards on L2 until you allocate to Subgraphs on L2, index them, and present POIs. ### Les délégués peuvent-ils déplacer leur délégation avant que je ne déplace ma participation à l'indexation ? @@ -339,11 +339,13 @@ Si vous n’avez transféré aucun solde de contrat de vesting à L2 et que votr ### J’utilise mon contrat de vesting pour investir dans mainnet. Puis-je transférer ma participation à Arbitrum? -Oui, mais si votre contrat est toujours acquis, vous ne pouvez transférer la participation que pour qu’elle soit détenue par votre contrat d’acquisition L2. Vous devez d’abord initialiser ce contrat L2 en transférant un solde de GRT à l’aide de l’outil de transfert de contrat d’acquisition dans Explorer. Si votre contrat est entièrement acquis, vous pouvez transférer votre participation à n’importe quelle adresse en L2, mais vous devez le définir au préalable et déposer des GRT pour l’outil de transfert L2 pour payer le gaz L2. +Oui, mais si votre contrat est toujours acquis, vous ne pouvez transférer la participation que pour qu’elle soit détenue par votre contrat d’acquisition L2. Vous devez d’abord initialiser ce contrat L2 en transférant un solde de GRT à l’aide de l’outil de transfert de contrat d’acquisition dans Explorer. Si +votre contrat est entièrement acquis, vous pouvez transférer votre participation à n’importe quelle adresse en L2, mais vous devez le définir au préalable et déposer des GRT pour l’outil de transfert L2 pour payer le gaz L2. ### J’utilise mon contrat de vesting pour déléguer sur mainnet. Puis-je transférer mes délégations à Arbitrum? -Oui, mais si votre contrat est toujours acquis, vous ne pouvez transférer la participation que pour qu’elle soit détenue par votre contrat de vesting L2. Vous devez d’abord initialiser ce contrat L2 en transférant un solde de GRT à l’aide de l’outil de transfert de contrat de vesting dans Explorer. Si votre contrat est entièrement acquis, vous pouvez transférer votre participation à n’importe quelle adresse en L2, mais vous devez le définir au préalable et déposer des GRT pour l’outil de transfert L2 pour payer le gaz L2. +Oui, mais si votre contrat est toujours acquis, vous ne pouvez transférer la participation que pour qu’elle soit détenue par votre contrat de vesting L2. Vous devez d’abord initialiser ce contrat L2 en transférant un solde de GRT à l’aide de l’outil de transfert de contrat de vesting dans Explorer. Si +votre contrat est entièrement acquis, vous pouvez transférer votre participation à n’importe quelle adresse en L2, mais vous devez le définir au préalable et déposer des GRT pour l’outil de transfert L2 pour payer le gaz L2. ### Puis-je spécifier un bénéficiaire différent pour mon contrat de vesting sur L2? From 8b23e401ef672ec42c2d7bb3d5f10bd3af1ba17b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:13:19 -0500 Subject: [PATCH 0296/1789] New translations l2-transfer-tools-faq.mdx (Spanish) --- .../arbitrum/l2-transfer-tools-faq.mdx | 54 +++++++++---------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/website/src/pages/es/archived/arbitrum/l2-transfer-tools-faq.mdx b/website/src/pages/es/archived/arbitrum/l2-transfer-tools-faq.mdx index 4b5963a153d4..730aa861a37d 100644 --- a/website/src/pages/es/archived/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/src/pages/es/archived/arbitrum/l2-transfer-tools-faq.mdx @@ -24,9 +24,9 @@ The exception is with smart contract wallets like multisigs: these are smart con Las Herramientas de Transferencia a L2 utilizan el mecanismo nativo de Arbitrum para enviar mensajes de L1 a L2. Este mecanismo se llama "ticket reintentable" y es utilizado por todos los puentes de tokens nativos, incluido el puente GRT de Arbitrum. Puedes obtener más información sobre los tickets reintentables en la [Documentación de Arbitrum](https://docs.arbitrum.io/arbos/l1-to-l2-messaging). -Cuando transfieres tus activos (subgrafo, stake, delegación o curación) a L2, se envía un mensaje a través del puente Arbitrum GRT que crea un ticket reintentable en L2. La herramienta de transferencia incluye un valor ETH en la transacción, que se utiliza para: 1) pagar la creación del ticket y 2) pagar por el gas para ejecutar el ticket en L2. Sin embargo, debido a que los precios del gas pueden variar durante el tiempo hasta que el ticket esté listo para ejecutarse en L2, es posible que este intento de autoejecución falle. Cuando eso sucede, el puente de Arbitrum mantendrá el ticket reintentable activo durante un máximo de 7 días, y cualquier persona puede intentar nuevamente "canjear" el ticket (lo que requiere una wallet con algo de ETH transferido a Arbitrum). +When you transfer your assets (Subgraph, stake, delegation or curation) to L2, a message is sent through the Arbitrum GRT bridge which creates a retryable ticket in L2. The transfer tool includes some ETH value in the transaction, that is used to 1) pay to create the ticket and 2) pay for the gas to execute the ticket in L2. However, because gas prices might vary in the time until the ticket is ready to execute in L2, it is possible that this auto-execution attempt fails. When that happens, the Arbitrum bridge will keep the retryable ticket alive for up to 7 days, and anyone can retry “redeeming” the ticket (which requires a wallet with some ETH bridged to Arbitrum). -Esto es lo que llamamos el paso de "Confirmar" en todas las herramientas de transferencia. En la mayoría de los casos, se ejecutará automáticamente, ya que la autoejecución suele ser exitosa, pero es importante que vuelvas a verificar para asegurarte de que se haya completado. Si no tiene éxito y no hay reintentos exitosos en 7 días, el puente de Arbitrum descartará el ticket, y tus activos (subgrafo, stake, delegación o curación) se perderán y no podrán recuperarse. Los core devs de The Graph tienen un sistema de monitoreo para detectar estas situaciones e intentar canjear los tickets antes de que sea demasiado tarde, pero en última instancia, es tu responsabilidad asegurarte de que tu transferencia se complete a tiempo. Si tienes problemas para confirmar tu transacción, por favor comunícate a través de [este formulario](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) y los core devs estarán allí para ayudarte. +This is what we call the “Confirm” step in all the transfer tools - it will run automatically in most cases, as the auto-execution is most often successful, but it is important that you check back to make sure it went through. If it doesn’t succeed and there are no successful retries in 7 days, the Arbitrum bridge will discard the ticket, and your assets (Subgraph, stake, delegation or curation) will be lost and can’t be recovered. The Graph core devs have a monitoring system in place to detect these situations and try to redeem the tickets before it’s too late, but it is ultimately your responsibility to ensure your transfer is completed in time. If you’re having trouble confirming your transaction, please reach out using [this form](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) and core devs will be there help you. ### Comencé la transferencia de mi delegación/stake/curación y no estoy seguro de si se completó en L2, ¿cómo puedo confirmar que se transfirió correctamente? @@ -36,43 +36,43 @@ Si tienes el hash de la transacción en L1 (que puedes encontrar revisando las t ## Transferencia de Subgrafo -### ¿Cómo transfiero mi subgrafo? +### How do I transfer my Subgraph? -Para transferir tu subgrafo, tendrás que completar los siguientes pasos: +To transfer your Subgraph, you will need to complete the following steps: 1. Inicia la transferencia en Ethereum mainnet 2. Espera 20 minutos para la confirmación -3. Confirma la transferencia del subgrafo en Arbitrum +3. Confirm Subgraph transfer on Arbitrum\* -4. Termina de publicar el subgrafo en Arbitrum +4. Finish publishing Subgraph on Arbitrum 5. Actualiza la URL de consulta (recomendado) -\*Ten en cuenta que debes confirmar la transferencia dentro de los 7 días, de lo contrario, es posible que se pierda tu subgrafo. En la mayoría de los casos, este paso se ejecutará automáticamente, pero puede ser necesaria una confirmación manual si hay un aumento en el precio del gas en Arbitrum. Si surgen problemas durante este proceso, habrá recursos disponibles para ayudarte: ponte en contacto con el soporte en support@thegraph.com o en [Discord](https://discord.gg/graphprotocol). +\*Note that you must confirm the transfer within 7 days otherwise your Subgraph may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). ### ¿Desde dónde debo iniciar mi transferencia? -Puedes iniciar la transferencia desde el [Subgraph Studio](https://thegraph.com/studio/), [Explorer](https://thegraph.com/explorer) o desde cualquier página de detalles del subgrafo. Haz clic en el botón "Transferir Subgrafo" en la página de detalles del subgrafo para iniciar la transferencia. +You can initiate your transfer from the [Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) or any Subgraph details page. Click the "Transfer Subgraph" button in the Subgraph details page to start the transfer. -### ¿Cuánto tiempo tengo que esperar hasta que se transfiera mi subgrafo? +### How long do I need to wait until my Subgraph is transferred El tiempo de transferencia demora aproximadamente 20 minutos. El puente de Arbitrum está trabajando en segundo plano para completar la transferencia automáticamente. En algunos casos, los costos de gas pueden aumentar y necesitarás confirmar la transacción nuevamente. -### ¿Mi subgrafo seguirá siendo accesible después de transferirlo a L2? +### Will my Subgraph still be discoverable after I transfer it to L2? -Tu subgrafo solo será accesible en la red donde esté publicado. Por ejemplo, si tu subgrafo está en Arbitrum One, solo podrás encontrarlo en el explorador de Arbitrum One y no podrás encontrarlo en Ethereum. Asegúrate de tener seleccionado Arbitrum One en el selector de redes en la parte superior de la página para asegurarte de estar en la red correcta. Después de la transferencia, el subgrafo en L1 aparecerá como obsoleto. +Your Subgraph will only be discoverable on the network it is published to. For example, if your Subgraph is on Arbitrum One, then you can only find it in Explorer on Arbitrum One and will not be able to find it on Ethereum. Please ensure that you have Arbitrum One selected in the network switcher at the top of the page to ensure you are on the correct network.  After the transfer, the L1 Subgraph will appear as deprecated. -### ¿Es necesario publicar mi subgrafo para transferirlo? +### Does my Subgraph need to be published to transfer it? -Para aprovechar la herramienta de transferencia de subgrafos, tu subgrafo debe estar ya publicado en la red principal de Ethereum y debe tener alguna señal de curación propiedad de la wallet que posee el subgrafo. Si tu subgrafo no está publicado, se recomienda que lo publiques directamente en Arbitrum One, ya que las tarifas de gas asociadas serán considerablemente más bajas. Si deseas transferir un subgrafo ya publicado pero la cuenta del propietario no ha curado ninguna señal en él, puedes señalizar una pequeña cantidad (por ejemplo, 1 GRT) desde esa cuenta; asegúrate de elegir la opción de señal "auto-migración". +To take advantage of the Subgraph transfer tool, your Subgraph must be already published to Ethereum mainnet and must have some curation signal owned by the wallet that owns the Subgraph. If your Subgraph is not published, it is recommended you simply publish directly on Arbitrum One - the associated gas fees will be considerably lower. If you want to transfer a published Subgraph but the owner account hasn't curated any signal on it, you can signal a small amount (e.g. 1 GRT) from that account; make sure to choose "auto-migrating" signal. -### ¿Qué ocurre con la versión de Ethereum mainnet de mi subgrafo después de transferirlo a Arbitrum? +### What happens to the Ethereum mainnet version of my Subgraph after I transfer to Arbitrum? -Tras transferir tu subgrafo a Arbitrum, la versión de Ethereum mainnet quedará obsoleta. Te recomendamos que actualices tu URL de consulta en un plazo de 48 horas. Sin embargo, existe un periodo de gracia que mantiene tu URL de mainnet en funcionamiento para que se pueda actualizar cualquier soporte de dapp de terceros. +After transferring your Subgraph to Arbitrum, the Ethereum mainnet version will be deprecated. We recommend you update your query URL within 48 hours. However, there is a grace period in place that keeps your mainnet URL functioning so that any third-party dapp support can be updated. ### Después de la transferencia, ¿también tengo que volver a publicar en Arbitrum? @@ -80,21 +80,21 @@ Una vez transcurridos los 20 minutos de la ventana de transferencia, tendrás qu ### ¿Experimentará mi endpoint una interrupción durante la republicación? -Es poco probable, pero es posible experimentar una breve interrupción dependiendo de qué Indexadores estén respaldando el subgrafo en L1 y si continúan indexándolo hasta que el subgrafo esté completamente respaldado en L2. +It is unlikely, but possible to experience a brief downtime depending on which Indexers are supporting the Subgraph on L1 and whether they keep indexing it until the Subgraph is fully supported on L2. ### ¿Es lo mismo publicar y versionar en L2 que en Ethereum mainnet? -Sí. Asegúrate de seleccionar Arbitrum One como tu red para publicar cuando publiques en Subgraph Studio. En el Studio, estará disponible el último endpoint que apunta a la última versión actualizada del subgrafo. +Yes. Select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the Subgraph. -### ¿Se moverá la curación de mi subgrafo junto con mi subgrafo? +### Will my Subgraph's curation move with my Subgraph? -Si has elegido auto-migrar la señal, el 100% de tu curación propia se moverá con tu subgrafo a Arbitrum One. Toda la señal de curación del subgrafo se convertirá a GRT en el momento de la transferencia, y el GRT correspondiente a tu señal de curación se utilizará para mintear señal en el subgrafo L2. +If you've chosen auto-migrating signal, 100% of your own curation will move with your Subgraph to Arbitrum One. All of the Subgraph's curation signal will be converted to GRT at the time of the transfer, and the GRT corresponding to your curation signal will be used to mint signal on the L2 Subgraph. -Otros Curadores pueden elegir si retiran su fracción de GRT, o también la transfieren a L2 para mintear señal en el mismo subgrafo. +Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same Subgraph. -### ¿Puedo mover mi subgrafo de nuevo a Ethereum mainnet después de la transferencia? +### Can I move my Subgraph back to Ethereum mainnet after I transfer? -Una vez transferida, la versión en Ethereum mainnet de este subgrafo quedará obsoleta. Si deseas regresar a mainnet, deberás volver a deployar y publicar en mainnet. Sin embargo, se desaconseja firmemente volver a transferir a Ethereum mainnet, ya que las recompensas por indexación se distribuirán eventualmente por completo en Arbitrum One. +Once transferred, your Ethereum mainnet version of this Subgraph will be deprecated. If you would like to move back to mainnet, you will need to redeploy and publish back to mainnet. However, transferring back to Ethereum mainnet is strongly discouraged as indexing rewards will eventually be distributed entirely on Arbitrum One. ### ¿Por qué necesito ETH bridgeado para completar mi transferencia? @@ -206,19 +206,19 @@ Para transferir tu curación, deberás completar los siguientes pasos: \*Si es necesario - i.e. si estás utilizando una dirección de contrato. -### ¿Cómo sabré si el subgrafo que he curado ha pasado a L2? +### How will I know if the Subgraph I curated has moved to L2? -Al ver la página de detalles del subgrafo, un banner te notificará que este subgrafo ha sido transferido. Puedes seguir la indicación para transferir tu curación. También puedes encontrar esta información en la página de detalles del subgrafo de cualquier subgrafo que se haya trasladado. +When viewing the Subgraph details page, a banner will notify you that this Subgraph has been transferred. You can follow the prompt to transfer your curation. You can also find this information on the Subgraph details page of any Subgraph that has moved. ### ¿Qué ocurre si no deseo trasladar mi curación a L2? -Cuando un subgrafo queda obsoleto, tienes la opción de retirar tu señal. De manera similar, si un subgrafo se ha trasladado a L2, puedes elegir retirar tu señal en Ethereum mainnet o enviar la señal a L2. +When a Subgraph is deprecated you have the option to withdraw your signal. Similarly, if a Subgraph has moved to L2, you can choose to withdraw your signal in Ethereum mainnet or send the signal to L2. ### ¿Cómo sé si mi curación se ha transferido correctamente? Los detalles de la señal serán accesibles a través del Explorer aproximadamente 20 minutos después de iniciar la herramienta de transferencia a L2. -### ¿Puedo transferir mi curación en más de un subgrafo a la vez? +### Can I transfer my curation on more than one Subgraph at a time? En este momento no existe la opción de transferencia masiva. @@ -266,7 +266,7 @@ La herramienta de transferencia L2 tardará aproximadamente 20 minutos en comple ### ¿Tengo que indexar en Arbitrum antes de transferir mi stake? -En efecto, puedes transferir tu stake primero antes de configurar la indexación de manera efectiva, pero no podrás reclamar ninguna recompensa en L2 hasta que asignes a subgrafos en L2, los indexes y presentes POIs. +You can effectively transfer your stake first before setting up indexing, but you will not be able to claim any rewards on L2 until you allocate to Subgraphs on L2, index them, and present POIs. ### ¿Pueden los Delegadores trasladar su delegación antes de que yo traslade mi stake de Indexador? From fb4d2b95f15e0cd0ebc769a37f37c092b16c9fc4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:13:20 -0500 Subject: [PATCH 0297/1789] New translations l2-transfer-tools-faq.mdx (Arabic) --- .../arbitrum/l2-transfer-tools-faq.mdx | 54 +++++++++---------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/website/src/pages/ar/archived/arbitrum/l2-transfer-tools-faq.mdx b/website/src/pages/ar/archived/arbitrum/l2-transfer-tools-faq.mdx index 9c949027b41f..965c96f7355a 100644 --- a/website/src/pages/ar/archived/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/src/pages/ar/archived/arbitrum/l2-transfer-tools-faq.mdx @@ -24,9 +24,9 @@ The exception is with smart contract wallets like multisigs: these are smart con The L2 Transfer Tools use Arbitrum’s native mechanism to send messages from L1 to L2. This mechanism is called a “retryable ticket” and is used by all native token bridges, including the Arbitrum GRT bridge. You can read more about retryable tickets in the [Arbitrum docs](https://docs.arbitrum.io/arbos/l1-to-l2-messaging). -When you transfer your assets (subgraph, stake, delegation or curation) to L2, a message is sent through the Arbitrum GRT bridge which creates a retryable ticket in L2. The transfer tool includes some ETH value in the transaction, that is used to 1) pay to create the ticket and 2) pay for the gas to execute the ticket in L2. However, because gas prices might vary in the time until the ticket is ready to execute in L2, it is possible that this auto-execution attempt fails. When that happens, the Arbitrum bridge will keep the retryable ticket alive for up to 7 days, and anyone can retry “redeeming” the ticket (which requires a wallet with some ETH bridged to Arbitrum). +When you transfer your assets (Subgraph, stake, delegation or curation) to L2, a message is sent through the Arbitrum GRT bridge which creates a retryable ticket in L2. The transfer tool includes some ETH value in the transaction, that is used to 1) pay to create the ticket and 2) pay for the gas to execute the ticket in L2. However, because gas prices might vary in the time until the ticket is ready to execute in L2, it is possible that this auto-execution attempt fails. When that happens, the Arbitrum bridge will keep the retryable ticket alive for up to 7 days, and anyone can retry “redeeming” the ticket (which requires a wallet with some ETH bridged to Arbitrum). -This is what we call the “Confirm” step in all the transfer tools - it will run automatically in most cases, as the auto-execution is most often successful, but it is important that you check back to make sure it went through. If it doesn’t succeed and there are no successful retries in 7 days, the Arbitrum bridge will discard the ticket, and your assets (subgraph, stake, delegation or curation) will be lost and can’t be recovered. The Graph core devs have a monitoring system in place to detect these situations and try to redeem the tickets before it’s too late, but it is ultimately your responsibility to ensure your transfer is completed in time. If you’re having trouble confirming your transaction, please reach out using [this form](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) and core devs will be there help you. +This is what we call the “Confirm” step in all the transfer tools - it will run automatically in most cases, as the auto-execution is most often successful, but it is important that you check back to make sure it went through. If it doesn’t succeed and there are no successful retries in 7 days, the Arbitrum bridge will discard the ticket, and your assets (Subgraph, stake, delegation or curation) will be lost and can’t be recovered. The Graph core devs have a monitoring system in place to detect these situations and try to redeem the tickets before it’s too late, but it is ultimately your responsibility to ensure your transfer is completed in time. If you’re having trouble confirming your transaction, please reach out using [this form](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) and core devs will be there help you. ### I started my delegation/stake/curation transfer and I'm not sure if it made it through to L2, how can I confirm that it was transferred correctly? @@ -36,43 +36,43 @@ If you have the L1 transaction hash (which you can find by looking at the recent ## نقل الـ Subgraph (الرسم البياني الفرعي) -### كيفكيف أقوم بتحويل الـ subgraph الخاص بي؟ +### How do I transfer my Subgraph? -لنقل الـ subgraph الخاص بك ، ستحتاج إلى إكمال الخطوات التالية: +To transfer your Subgraph, you will need to complete the following steps: 1. ابدأ التحويل على شبكة Ethereum mainnet 2. انتظر 20 دقيقة للتأكيد -3. قم بتأكيد نقل الـ subgraph على Arbitrum \ \* +3. Confirm Subgraph transfer on Arbitrum\* -4. قم بإنهاء نشر الـ subgraph على Arbitrum +4. Finish publishing Subgraph on Arbitrum 5. جدث عنوان URL للاستعلام (مستحسن) -\*Note that you must confirm the transfer within 7 days otherwise your subgraph may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). +\*Note that you must confirm the transfer within 7 days otherwise your Subgraph may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). ### من أين يجب أن أبدأ التحويل ؟ -يمكنك بدء عملية النقل من [Subgraph Studio] (https://thegraph.com/studio/) ، [Explorer ،] (https://thegraph.com/explorer) أو من أي صفحة تفاصيل subgraph. انقر فوق الزر "Transfer Subgraph" في صفحة تفاصيل الرسم الـ subgraph لبدء النقل. +You can initiate your transfer from the [Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) or any Subgraph details page. Click the "Transfer Subgraph" button in the Subgraph details page to start the transfer. -### كم من الوقت سأنتظر حتى يتم نقل الـ subgraph الخاص بي +### How long do I need to wait until my Subgraph is transferred يستغرق وقت النقل حوالي 20 دقيقة. يعمل جسر Arbitrum في الخلفية لإكمال نقل الجسر تلقائيًا. في بعض الحالات ، قد ترتفع تكاليف الغاز وستحتاج إلى تأكيد المعاملة مرة أخرى. -### هل سيظل الـ subgraph قابلاً للاكتشاف بعد أن أنقله إلى L2؟ +### Will my Subgraph still be discoverable after I transfer it to L2? -سيكون الـ subgraph الخاص بك قابلاً للاكتشاف على الشبكة التي تم نشرها عليها فقط. على سبيل المثال ، إذا كان الـ subgraph الخاص بك موجودًا على Arbitrum One ، فيمكنك العثور عليه فقط في Explorer على Arbitrum One ولن تتمكن من العثور عليه على Ethereum. يرجى التأكد من تحديد Arbitrum One في مبدل الشبكة في أعلى الصفحة للتأكد من أنك على الشبكة الصحيحة. بعد النقل ، سيظهر الـ L1 subgraph على أنه مهمل. +Your Subgraph will only be discoverable on the network it is published to. For example, if your Subgraph is on Arbitrum One, then you can only find it in Explorer on Arbitrum One and will not be able to find it on Ethereum. Please ensure that you have Arbitrum One selected in the network switcher at the top of the page to ensure you are on the correct network.  After the transfer, the L1 Subgraph will appear as deprecated. -### هل يلزم نشر الـ subgraph الخاص بي لنقله؟ +### Does my Subgraph need to be published to transfer it? -للاستفادة من أداة نقل الـ subgraph ، يجب أن يكون الرسم البياني الفرعي الخاص بك قد تم نشره بالفعل على شبكة Ethereum الرئيسية ويجب أن يكون لديه إشارة تنسيق مملوكة للمحفظة التي تمتلك الرسم البياني الفرعي. إذا لم يتم نشر الرسم البياني الفرعي الخاص بك ، فمن المستحسن أن تقوم ببساطة بالنشر مباشرة على Arbitrum One - ستكون رسوم الغاز أقل بكثير. إذا كنت تريد نقل رسم بياني فرعي منشور ولكن حساب المالك لا يملك إشارة تنسيق عليه ، فيمكنك الإشارة بمبلغ صغير (على سبيل المثال 1 GRT) من ذلك الحساب ؛ تأكد من اختيار إشارة "auto-migrating". +To take advantage of the Subgraph transfer tool, your Subgraph must be already published to Ethereum mainnet and must have some curation signal owned by the wallet that owns the Subgraph. If your Subgraph is not published, it is recommended you simply publish directly on Arbitrum One - the associated gas fees will be considerably lower. If you want to transfer a published Subgraph but the owner account hasn't curated any signal on it, you can signal a small amount (e.g. 1 GRT) from that account; make sure to choose "auto-migrating" signal. -### ماذا يحدث لإصدار Ethereum mainnet للرسم البياني الفرعي الخاص بي بعد أن النقل إلى Arbitrum؟ +### What happens to the Ethereum mainnet version of my Subgraph after I transfer to Arbitrum? -بعد نقل الرسم البياني الفرعي الخاص بك إلى Arbitrum ، سيتم إهمال إصدار Ethereum mainnet. نوصي بتحديث عنوان URL للاستعلام في غضون 48 ساعة. ومع ذلك ، هناك فترة سماح تحافظ على عمل عنوان URL للشبكة الرئيسية الخاصة بك بحيث يمكن تحديث أي دعم dapp لجهة خارجية. +After transferring your Subgraph to Arbitrum, the Ethereum mainnet version will be deprecated. We recommend you update your query URL within 48 hours. However, there is a grace period in place that keeps your mainnet URL functioning so that any third-party dapp support can be updated. ### بعد النقل ، هل أحتاج أيضًا إلى إعادة النشر على Arbitrum؟ @@ -80,21 +80,21 @@ If you have the L1 transaction hash (which you can find by looking at the recent ### Will my endpoint experience downtime while re-publishing? -It is unlikely, but possible to experience a brief downtime depending on which Indexers are supporting the subgraph on L1 and whether they keep indexing it until the subgraph is fully supported on L2. +It is unlikely, but possible to experience a brief downtime depending on which Indexers are supporting the Subgraph on L1 and whether they keep indexing it until the Subgraph is fully supported on L2. ### هل يتم نشر وتخطيط الإصدار بنفس الطريقة في الـ L2 كما هو الحال في شبكة Ethereum Ethereum mainnet؟ -Yes. Select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the subgraph. +Yes. Select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the Subgraph. -### هل سينتقل تنسيق الـ subgraph مع الـ subgraph ؟ +### Will my Subgraph's curation move with my Subgraph? -إذا اخترت إشارة الترحيل التلقائي auto-migrating ، فسيتم نقل 100٪ من التنسيق مع الرسم البياني الفرعي الخاص بك إلى Arbitrum One. سيتم تحويل كل إشارة التنسيق الخاصة بالرسم الفرعي إلى GRT في وقت النقل ، وسيتم استخدام GRT المقابل لإشارة التنسيق الخاصة بك لصك الإشارة على L2 subgraph. +If you've chosen auto-migrating signal, 100% of your own curation will move with your Subgraph to Arbitrum One. All of the Subgraph's curation signal will be converted to GRT at the time of the transfer, and the GRT corresponding to your curation signal will be used to mint signal on the L2 Subgraph. -يمكن للمنسقين الآخرين اختيار ما إذا كانوا سيسحبون أجزاء من GRT ، أو ينقلونه أيضًا إلى L2 لإنتاج إشارة على نفس الرسم البياني الفرعي. +Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same Subgraph. -### هل يمكنني إعادة الرسم البياني الفرعي الخاص بي إلى Ethereum mainnet بعد أن أقوم بالنقل؟ +### Can I move my Subgraph back to Ethereum mainnet after I transfer? -بمجرد النقل ، سيتم إهمال إصدار شبكة Ethereum mainnet للرسم البياني الفرعي الخاص بك. إذا كنت ترغب في العودة إلى mainnet ، فستحتاج إلى إعادة النشر (redeploy) والنشر مرة أخرى على mainnet. ومع ذلك ، لا يُنصح بشدة بالتحويل مرة أخرى إلى شبكة Ethereum mainnet حيث سيتم في النهاية توزيع مكافآت الفهرسة بالكامل على Arbitrum One. +Once transferred, your Ethereum mainnet version of this Subgraph will be deprecated. If you would like to move back to mainnet, you will need to redeploy and publish back to mainnet. However, transferring back to Ethereum mainnet is strongly discouraged as indexing rewards will eventually be distributed entirely on Arbitrum One. ### لماذا أحتاج إلى Bridged ETH لإكمال النقل؟ @@ -206,19 +206,19 @@ The tokens that are being undelegated are "locked" and therefore cannot be trans \ \* إذا لزم الأمر -أنت تستخدم عنوان عقد. -### كيف سأعرف ما إذا كان الرسم البياني الفرعي الذي قمت بعمل إشارة تنسيق عليه قد انتقل إلى L2؟ +### How will I know if the Subgraph I curated has moved to L2? -عند عرض صفحة تفاصيل الرسم البياني الفرعي ، ستعلمك لافتة بأنه تم نقل هذا الرسم البياني الفرعي. يمكنك اتباع التعليمات لنقل إشارة التنسيق الخاص بك. يمكنك أيضًا العثور على هذه المعلومات في صفحة تفاصيل الرسم البياني الفرعي لأي رسم بياني فرعي تم نقله. +When viewing the Subgraph details page, a banner will notify you that this Subgraph has been transferred. You can follow the prompt to transfer your curation. You can also find this information on the Subgraph details page of any Subgraph that has moved. ### ماذا لو كنت لا أرغب في نقل إشارة التنسيق الخاص بي إلى L2؟ -عندما يتم إهمال الرسم البياني الفرعي ، يكون لديك خيار سحب الإشارة. وبالمثل ، إذا انتقل الرسم البياني الفرعي إلى L2 ، فيمكنك اختيار سحب الإشارة في شبكة Ethereum الرئيسية أو إرسال الإشارة إلى L2. +When a Subgraph is deprecated you have the option to withdraw your signal. Similarly, if a Subgraph has moved to L2, you can choose to withdraw your signal in Ethereum mainnet or send the signal to L2. ### كيف أعرف أنه تم نقل إشارة التنسيق بنجاح؟ يمكن الوصول إلى تفاصيل الإشارة عبر Explorer بعد حوالي 20 دقيقة من بدء أداة النقل للـ L2. -### هل يمكنني نقل إشاة التنسيق الخاص بي على أكثر من رسم بياني فرعي في وقت واحد؟ +### Can I transfer my curation on more than one Subgraph at a time? لا يوجد خيار كهذا حالياً. @@ -266,7 +266,7 @@ The tokens that are being undelegated are "locked" and therefore cannot be trans ### هل يجب أن أقوم بالفهرسة على Arbitrum قبل أن أنقل حصتي؟ -يمكنك تحويل حصتك بشكل فعال أولاً قبل إعداد الفهرسة ، ولكن لن تتمكن من المطالبة بأي مكافآت على L2 حتى تقوم بتخصيصها لـ subgraphs على L2 وفهرستها وعرض POIs. +You can effectively transfer your stake first before setting up indexing, but you will not be able to claim any rewards on L2 until you allocate to Subgraphs on L2, index them, and present POIs. ### هل يستطيع المفوضون نقل تفويضهم قبل نقل indexing stake الخاص بي؟ From cb7c5bc29c71314fbe690f5a826a03951e679cb3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:13:22 -0500 Subject: [PATCH 0298/1789] New translations l2-transfer-tools-faq.mdx (Czech) --- .../arbitrum/l2-transfer-tools-faq.mdx | 54 +++++++++---------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/website/src/pages/cs/archived/arbitrum/l2-transfer-tools-faq.mdx b/website/src/pages/cs/archived/arbitrum/l2-transfer-tools-faq.mdx index 88e1d9e632a2..439e83f3864b 100644 --- a/website/src/pages/cs/archived/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/src/pages/cs/archived/arbitrum/l2-transfer-tools-faq.mdx @@ -24,9 +24,9 @@ Výjimkou jsou peněženky s chytrými smlouvami, jako je multisigs: jedná se o Nástroje pro přenos L2 používají k odesílání zpráv z L1 do L2 nativní mechanismus Arbitrum. Tento mechanismus se nazývá 'retryable ticket,' a všechny nativní tokenové můstky, včetně můstku Arbitrum GRT, ho používají. Další informace o opakovatelných ticketch naleznete v části [Arbitrum docs](https://docs.arbitrum.io/arbos/l1-to-l2-messaging). -Při přenosu aktiv (podgraf, podíl, delegace nebo kurátorství) do L2 se odešle zpráva přes můstek Arbitrum GRT, která vytvoří opakovatelný tiket v L2. Nástroj pro převod zahrnuje v transakci určitou hodnotu ETH, která se použije na 1) zaplacení vytvoření tiketu a 2) zaplacení plynu pro provedení tiketu v L2. Se však ceny plynu mohou v době, než je ticket připraven k provedení v režimu L2, měnit. Je možné, že se tento pokus o automatické provedení nezdaří. Když se tak stane, most Arbitrum udrží opakovatelný tiket naživu až 7 dní a kdokoli se může pokusit o jeho "vykoupení" (což vyžaduje peněženku s určitým množstvím ETH propojenou s mostem Arbitrum). +When you transfer your assets (Subgraph, stake, delegation or curation) to L2, a message is sent through the Arbitrum GRT bridge which creates a retryable ticket in L2. The transfer tool includes some ETH value in the transaction, that is used to 1) pay to create the ticket and 2) pay for the gas to execute the ticket in L2. However, because gas prices might vary in the time until the ticket is ready to execute in L2, it is possible that this auto-execution attempt fails. When that happens, the Arbitrum bridge will keep the retryable ticket alive for up to 7 days, and anyone can retry “redeeming” the ticket (which requires a wallet with some ETH bridged to Arbitrum). -Tomuto kroku říkáme 'Potvrzení' ve všech nástrojích pro přenos - ve většině případů se spustí automaticky, protože automatické provedení je většinou úspěšné, ale je důležité, abyste se ujistili, že proběhlo. Pokud se to nepodaří a během 7 dnů nedojde k žádnému úspěšnému opakování, můstek Arbitrum tiket zahodí a vaše aktiva (podgraf, podíl, delegace nebo kurátorství) budou ztracena a nebude možné je obnovit. Vývojáři The Graph jádra mají k dispozici monitorovací systém, který tyto situace odhaluje a snaží se lístky uplatnit dříve, než bude pozdě, ale v konečném důsledku je vaší odpovědností zajistit, aby byl váš přenos dokončen včas. Pokud máte potíže s potvrzením transakce, obraťte se na nás pomocí [tohoto formuláře](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) a hlavní vývojáři vám pomohou. +This is what we call the “Confirm” step in all the transfer tools - it will run automatically in most cases, as the auto-execution is most often successful, but it is important that you check back to make sure it went through. If it doesn’t succeed and there are no successful retries in 7 days, the Arbitrum bridge will discard the ticket, and your assets (Subgraph, stake, delegation or curation) will be lost and can’t be recovered. The Graph core devs have a monitoring system in place to detect these situations and try to redeem the tickets before it’s too late, but it is ultimately your responsibility to ensure your transfer is completed in time. If you’re having trouble confirming your transaction, please reach out using [this form](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) and core devs will be there help you. ### Zahájil jsem přenos delegace/podílů/kurátorství a nejsem si jistý, zda se to dostalo do L2. Jak mohu potvrdit, že to bylo přeneseno správně? @@ -36,43 +36,43 @@ Pokud máte k dispozici hash transakce L1 (který zjistíte, když se podíváte ## Podgraf přenos -### Jak mohu přenést svůj podgraf? +### How do I transfer my Subgraph? -Chcete-li přenést svůj podgraf, musíte provést následující kroky: +To transfer your Subgraph, you will need to complete the following steps: 1. Zahájení převodu v mainnet Ethereum 2. Počkejte 20 minut na potvrzení -3. Potvrzení přenosu podgrafů na Arbitrum\* +3. Confirm Subgraph transfer on Arbitrum\* -4. Úplné zveřejnění podgrafu na arbitrum +4. Finish publishing Subgraph on Arbitrum 5. Aktualizovat adresu URL dotazu (doporučeno) -\*Upozorňujeme, že převod musíte potvrdit do 7 dnů, jinak může dojít ke ztrátě vašeho podgrafu. Ve většině případů se tento krok provede automaticky, ale v případě prudkého nárůstu cen plynu na Arbitru může být nutné ruční potvrzení. Pokud se během tohoto procesu vyskytnou nějaké problémy, budou k dispozici zdroje, které vám pomohou: kontaktujte podporu na adrese support@thegraph.com nebo na [Discord](https://discord.gg/graphprotocol). +\*Note that you must confirm the transfer within 7 days otherwise your Subgraph may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). ### Odkud mám iniciovat převod? -Přenos můžete zahájit v [Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) nebo na libovolné stránce s detaily subgrafu. "Kliknutím na tlačítko 'Transfer Subgraph' na stránce s podrobnostmi o podgrafu zahájíte přenos. +You can initiate your transfer from the [Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) or any Subgraph details page. Click the "Transfer Subgraph" button in the Subgraph details page to start the transfer. -### Jak dlouho musím čekat, než bude můj podgraf přenesen +### How long do I need to wait until my Subgraph is transferred Přenos trvá přibližně 20 minut. Most Arbitrum pracuje na pozadí a automaticky dokončí přenos mostu. V některých případech může dojít ke zvýšení nákladů na plyn a transakci bude nutné potvrdit znovu. -### Bude můj podgraf zjistitelný i poté, co jej přenesu do L2? +### Will my Subgraph still be discoverable after I transfer it to L2? -Váš podgraf bude zjistitelný pouze v síti, ve které je publikován. Pokud se například váš subgraf nachází na Arbitrum One, pak jej najdete pouze v Průzkumníku na Arbitrum One a na Ethereum jej nenajdete. Ujistěte se, že máte v přepínači sítí v horní části stránky vybranou možnost Arbitrum One, abyste se ujistili, že jste ve správné síti. Po přenosu se podgraf L1 zobrazí jako zastaralý. +Your Subgraph will only be discoverable on the network it is published to. For example, if your Subgraph is on Arbitrum One, then you can only find it in Explorer on Arbitrum One and will not be able to find it on Ethereum. Please ensure that you have Arbitrum One selected in the network switcher at the top of the page to ensure you are on the correct network.  After the transfer, the L1 Subgraph will appear as deprecated. -### Musí být můj podgraf zveřejněn, abych ho mohl přenést? +### Does my Subgraph need to be published to transfer it? -Abyste mohli využít nástroj pro přenos subgrafů, musí být váš subgraf již zveřejněn v mainnet Ethereum a musí mít nějaký kurátorský signál vlastněný peněženkou, která subgraf vlastní. Pokud váš subgraf není zveřejněn, doporučujeme vám jednoduše publikovat přímo na Arbitrum One - související poplatky za plyn budou podstatně nižší. Pokud chcete přenést publikovaný podgraf, ale účet vlastníka na něm nemá kurátorský signál, můžete z tohoto účtu signalizovat malou částku (např. 1 GRT); nezapomeňte zvolit "auto-migrating" signál. +To take advantage of the Subgraph transfer tool, your Subgraph must be already published to Ethereum mainnet and must have some curation signal owned by the wallet that owns the Subgraph. If your Subgraph is not published, it is recommended you simply publish directly on Arbitrum One - the associated gas fees will be considerably lower. If you want to transfer a published Subgraph but the owner account hasn't curated any signal on it, you can signal a small amount (e.g. 1 GRT) from that account; make sure to choose "auto-migrating" signal. -### Co se stane s verzí mého subgrafu na ethereum mainnet po převodu na Arbitrum? +### What happens to the Ethereum mainnet version of my Subgraph after I transfer to Arbitrum? -Po převedení vašeho subgrafu na Arbitrum bude verze mainnet Ethereum zastaralá. Doporučujeme vám aktualizovat adresu URL dotazu do 48 hodin. Je však zavedena ochranná lhůta, která udržuje adresu URL mainnet funkční, aby bylo možné aktualizovat podporu dapp třetích stran. +After transferring your Subgraph to Arbitrum, the Ethereum mainnet version will be deprecated. We recommend you update your query URL within 48 hours. However, there is a grace period in place that keeps your mainnet URL functioning so that any third-party dapp support can be updated. ### Musím po převodu také znovu publikovat na Arbitrum? @@ -80,21 +80,21 @@ Po uplynutí 20minutového okna pro převod budete muset převod potvrdit transa ### Dojde při opětovném publikování k výpadku mého koncového bodu? -Je nepravděpodobné, ale je možné, že dojde ke krátkému výpadku v závislosti na tom, které indexátory podporují podgraf na L1 a zda jej indexují, dokud není podgraf plně podporován na L2. +It is unlikely, but possible to experience a brief downtime depending on which Indexers are supporting the Subgraph on L1 and whether they keep indexing it until the Subgraph is fully supported on L2. ### Je publikování a verzování na L2 stejné jako na mainnet Ethereum Ethereum? -Ano. Při publikování v aplikaci Subgraph Studio vyberte jako publikovanou síť Arbitrum One. Ve Studiu bude k dispozici nejnovější koncový bod, který odkazuje na nejnovější aktualizovanou verzi podgrafu. +Yes. Select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the Subgraph. -### Bude se kurátorství mého podgrafu pohybovat spolu s mým podgrafem? +### Will my Subgraph's curation move with my Subgraph? -Pokud jste zvolili automatickou migraci signálu, 100 % vaší vlastní kurátorství se přesune spolu s vaším subgrafem do Arbitrum One. Veškerý signál kurátorství podgrafu bude v okamžiku převodu převeden na GRT a GRT odpovídající vašemu signálu kurátorství bude použit k ražbě signálu na podgrafu L2. +If you've chosen auto-migrating signal, 100% of your own curation will move with your Subgraph to Arbitrum One. All of the Subgraph's curation signal will be converted to GRT at the time of the transfer, and the GRT corresponding to your curation signal will be used to mint signal on the L2 Subgraph. -Ostatní kurátoři se mohou rozhodnout, zda stáhnou svou část GRT, nebo ji také převedou na L2, aby vyrazili signál na stejném podgraf. +Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same Subgraph. -### Mohu svůj subgraf po převodu přesunout zpět do mainnet Ethereum? +### Can I move my Subgraph back to Ethereum mainnet after I transfer? -Po přenosu bude vaše verze tohoto podgrafu v síti Ethereum mainnet zneplatněna. Pokud se chcete přesunout zpět do mainnetu, musíte provést nové nasazení a publikovat zpět do mainnet. Převod zpět do mainnetu Etherea se však důrazně nedoporučuje, protože odměny za indexování budou nakonec distribuovány výhradně na Arbitrum One. +Once transferred, your Ethereum mainnet version of this Subgraph will be deprecated. If you would like to move back to mainnet, you will need to redeploy and publish back to mainnet. However, transferring back to Ethereum mainnet is strongly discouraged as indexing rewards will eventually be distributed entirely on Arbitrum One. ### Proč potřebuji k dokončení převodu překlenovací ETH? @@ -206,19 +206,19 @@ Chcete-li přenést své kurátorství, musíte provést následující kroky: \*Pokud je to nutné - tj. používáte smluvní adresu. -### Jak se dozvím, že se mnou kurátorovaný podgraf přesunul do L2? +### How will I know if the Subgraph I curated has moved to L2? -Při zobrazení stránky s podrobnostmi podgrafu se zobrazí banner s upozorněním, že tento podgraf byl přenesen. Můžete následovat výzvu k přenosu kurátorství. Tyto informace najdete také na stránce s podrobnostmi o podgrafu, který se přesunul. +When viewing the Subgraph details page, a banner will notify you that this Subgraph has been transferred. You can follow the prompt to transfer your curation. You can also find this information on the Subgraph details page of any Subgraph that has moved. ### Co když si nepřeji přesunout své kurátorství do L2? -Pokud je podgraf vyřazen, máte možnost stáhnout svůj signál. Stejně tak pokud se podgraf přesunul do L2, můžete si vybrat, zda chcete stáhnout svůj signál v mainnet Ethereum, nebo signál poslat do L2. +When a Subgraph is deprecated you have the option to withdraw your signal. Similarly, if a Subgraph has moved to L2, you can choose to withdraw your signal in Ethereum mainnet or send the signal to L2. ### Jak poznám, že se moje kurátorství úspěšně přeneslo? Podrobnosti o signálu budou k dispozici prostřednictvím Průzkumníka přibližně 20 minut po spuštění nástroje pro přenos L2. -### Mohu přenést své kurátorství na více než jeden podgraf najednou? +### Can I transfer my curation on more than one Subgraph at a time? V současné době není k dispozici možnost hromadného přenosu. @@ -266,7 +266,7 @@ Nástroj pro převod L2 dokončí převod vašeho podílu přibližně za 20 min ### Musím před převodem svého podílu indexovat na Arbitrum? -Před nastavením indexování můžete nejprve efektivně převést svůj podíl, ale nebudete si moci nárokovat žádné odměny na L2, dokud nepřidělíte podgrafy na L2, neindexujete je a nepředložíte POIs. +You can effectively transfer your stake first before setting up indexing, but you will not be able to claim any rewards on L2 until you allocate to Subgraphs on L2, index them, and present POIs. ### Mohou delegáti přesunout svou delegaci dříve, než přesunu svůj indexovací podíl? From 9a4b33ac27328b403de29bb307183eb074d7913c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:13:23 -0500 Subject: [PATCH 0299/1789] New translations l2-transfer-tools-faq.mdx (German) --- .../arbitrum/l2-transfer-tools-faq.mdx | 54 +++++++++---------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/website/src/pages/de/archived/arbitrum/l2-transfer-tools-faq.mdx b/website/src/pages/de/archived/arbitrum/l2-transfer-tools-faq.mdx index 8abcda305f8a..6da46f82c1fc 100644 --- a/website/src/pages/de/archived/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/src/pages/de/archived/arbitrum/l2-transfer-tools-faq.mdx @@ -24,9 +24,9 @@ Die Ausnahme sind Smart-Contract-Wallets wie Multisigs: Das sind Smart Contracts Die L2-Transfer-Tools verwenden den nativen Mechanismus von Arbitrum, um Nachrichten von L1 nach L2 zu senden. Dieser Mechanismus wird "retryable ticket" genannt und wird von allen nativen Token-Bridges verwendet, einschließlich der Arbitrum GRT-Bridge. Sie können mehr über wiederholbare Tickets in den [Arbitrum docs](https://docs.arbitrum.io/arbos/l1-to-l2-messaging) lesen. -Wenn Sie Ihre Vermögenswerte (Subgraph, Anteil, Delegation oder Kuration) an L2 übertragen, wird eine Nachricht über die Arbitrum GRT-Brücke gesendet, die ein wiederholbares Ticket in L2 erstellt. Das Transfer-Tool beinhaltet einen gewissen ETH-Wert in der Transaktion, der verwendet wird, um 1) die Erstellung des Tickets und 2) das Gas für die Ausführung des Tickets in L2 zu bezahlen. Da jedoch die Gaspreise in der Zeit, bis das Zertifikat zur Ausführung in L2 bereit ist, schwanken können, ist es möglich, dass dieser automatische Ausführungsversuch fehlschlägt. Wenn das passiert, hält die Arbitrum-Brücke das wiederholbare Zertifikat für bis zu 7 Tage am Leben, und jeder kann versuchen, das Ticket erneut "einzulösen" (was eine Geldbörse mit etwas ETH erfordert, die mit Arbitrum verbunden ist). +When you transfer your assets (Subgraph, stake, delegation or curation) to L2, a message is sent through the Arbitrum GRT bridge which creates a retryable ticket in L2. The transfer tool includes some ETH value in the transaction, that is used to 1) pay to create the ticket and 2) pay for the gas to execute the ticket in L2. However, because gas prices might vary in the time until the ticket is ready to execute in L2, it is possible that this auto-execution attempt fails. When that happens, the Arbitrum bridge will keep the retryable ticket alive for up to 7 days, and anyone can retry “redeeming” the ticket (which requires a wallet with some ETH bridged to Arbitrum). -Dies ist der so genannte "Bestätigungsschritt" in allen Übertragungswerkzeugen - er wird in den meisten Fällen automatisch ausgeführt, da die automatische Ausführung meist erfolgreich ist, aber es ist wichtig, dass Sie sich vergewissern, dass die Übertragung erfolgreich war. Wenn dies nicht gelingt und es innerhalb von 7 Tagen keine erfolgreichen Wiederholungsversuche gibt, verwirft die Arbitrum-Brücke das Ticket, und Ihre Assets (Subgraph, Pfahl, Delegation oder Kuration) gehen verloren und können nicht wiederhergestellt werden. Die Entwickler des Graph-Kerns haben ein Überwachungssystem eingerichtet, um diese Situationen zu erkennen und zu versuchen, die Tickets einzulösen, bevor es zu spät ist, aber es liegt letztendlich in Ihrer Verantwortung, sicherzustellen, dass Ihr Transfer rechtzeitig abgeschlossen wird. Wenn Sie Probleme mit der Bestätigung Ihrer Transaktion haben, wenden Sie sich bitte an [dieses Formular] (https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) und die Entwickler des Kerns werden Ihnen helfen. +This is what we call the “Confirm” step in all the transfer tools - it will run automatically in most cases, as the auto-execution is most often successful, but it is important that you check back to make sure it went through. If it doesn’t succeed and there are no successful retries in 7 days, the Arbitrum bridge will discard the ticket, and your assets (Subgraph, stake, delegation or curation) will be lost and can’t be recovered. The Graph core devs have a monitoring system in place to detect these situations and try to redeem the tickets before it’s too late, but it is ultimately your responsibility to ensure your transfer is completed in time. If you’re having trouble confirming your transaction, please reach out using [this form](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) and core devs will be there help you. ### Ich habe mit der Übertragung meiner Delegation/des Einsatzes/der Kuration begonnen und bin mir nicht sicher, ob sie an L2 weitergeleitet wurde. Wie kann ich bestätigen, dass sie korrekt übertragen wurde? @@ -36,43 +36,43 @@ If you have the L1 transaction hash (which you can find by looking at the recent ## Subgraph-Transfer -### Wie übertrage ich meinen Subgraphen +### How do I transfer my Subgraph? -Um Ihren Subgraphen zu übertragen, müssen Sie die folgenden Schritte ausführen: +To transfer your Subgraph, you will need to complete the following steps: 1. Starten Sie den Transfer im Ethereum-Mainnet 2. 20 Minuten auf Bestätigung warten -3. Bestätigung der Übertragung von Subgraphen auf Arbitrum\* +3. Confirm Subgraph transfer on Arbitrum\* -4. Veröffentlichung des Subgraphen auf Arbitrum beenden +4. Finish publishing Subgraph on Arbitrum 5. Abfrage-URL aktualisieren (empfohlen) -\*Note that you must confirm the transfer within 7 days otherwise your subgraph may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). +\*Note that you must confirm the transfer within 7 days otherwise your Subgraph may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). ### Von wo aus soll ich meine Übertragung veranlassen? -Sie können die Übertragung vom [Subgraph Studio] (https://thegraph.com/studio/), vom [Explorer] (https://thegraph.com/explorer) oder von einer beliebigen Subgraph-Detailseite aus starten. Klicken Sie auf die Schaltfläche "Subgraph übertragen" auf der Detailseite des Subgraphen, um die Übertragung zu starten. +You can initiate your transfer from the [Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) or any Subgraph details page. Click the "Transfer Subgraph" button in the Subgraph details page to start the transfer. -### Wie lange muss ich warten, bis mein Subgraph übertragen wird? +### How long do I need to wait until my Subgraph is transferred Die Übertragungszeit beträgt etwa 20 Minuten. Die Arbitrum-Brücke arbeitet im Hintergrund, um den Übergang automatisch abzuschließen. In einigen Fällen können die Gaskosten in die Höhe schnellen und Sie müssen die Transaktion erneut bestätigen. -### Wird mein Subgraph noch auffindbar sein, nachdem ich ihn auf L2 übertragen habe? +### Will my Subgraph still be discoverable after I transfer it to L2? -Ihr Subgraph ist nur in dem Netzwerk auffindbar, in dem er veröffentlicht ist. Wenn Ihr Subgraph zum Beispiel auf Arbitrum One ist, können Sie ihn nur im Explorer auf Arbitrum One finden und nicht auf Ethereum. Bitte vergewissern Sie sich, dass Sie Arbitrum One in der Netzwerkumschaltung oben auf der Seite ausgewählt haben, um sicherzustellen, dass Sie sich im richtigen Netzwerk befinden. Nach der Übertragung wird der L1-Subgraph als veraltet angezeigt. +Your Subgraph will only be discoverable on the network it is published to. For example, if your Subgraph is on Arbitrum One, then you can only find it in Explorer on Arbitrum One and will not be able to find it on Ethereum. Please ensure that you have Arbitrum One selected in the network switcher at the top of the page to ensure you are on the correct network.  After the transfer, the L1 Subgraph will appear as deprecated. -### Muss mein Subgraph ( Teilgraph ) veröffentlicht werden, um ihn zu übertragen? +### Does my Subgraph need to be published to transfer it? -Um das Subgraph-Transfer-Tool nutzen zu können, muss Ihr Subgraph bereits im Ethereum-Mainnet veröffentlicht sein und über ein Kurationssignal verfügen, das der Wallet gehört, die den Subgraph besitzt. Wenn Ihr Subgraph nicht veröffentlicht ist, empfehlen wir Ihnen, ihn einfach direkt auf Arbitrum One zu veröffentlichen - die damit verbundenen Gasgebühren sind erheblich niedriger. Wenn Sie einen veröffentlichten Subgraphen übertragen wollen, aber das Konto des Eigentümers kein Signal darauf kuratiert hat, können Sie einen kleinen Betrag (z.B. 1 GRT) von diesem Konto signalisieren; stellen Sie sicher, dass Sie ein "auto-migrating" Signal wählen. +To take advantage of the Subgraph transfer tool, your Subgraph must be already published to Ethereum mainnet and must have some curation signal owned by the wallet that owns the Subgraph. If your Subgraph is not published, it is recommended you simply publish directly on Arbitrum One - the associated gas fees will be considerably lower. If you want to transfer a published Subgraph but the owner account hasn't curated any signal on it, you can signal a small amount (e.g. 1 GRT) from that account; make sure to choose "auto-migrating" signal. -### Was passiert mit der Ethereum-Mainnet-Version meines Subgraphen, nachdem ich zu Arbitrum übergehe? +### What happens to the Ethereum mainnet version of my Subgraph after I transfer to Arbitrum? -Nach der Übertragung Ihres Subgraphen auf Arbitrum wird die Ethereum-Hauptnetzversion veraltet sein. Wir empfehlen Ihnen, Ihre Abfrage-URL innerhalb von 48 Stunden zu aktualisieren. Es gibt jedoch eine Schonfrist, die Ihre Mainnet-URL funktionsfähig hält, so dass jede Drittanbieter-Dapp-Unterstützung aktualisiert werden kann. +After transferring your Subgraph to Arbitrum, the Ethereum mainnet version will be deprecated. We recommend you update your query URL within 48 hours. However, there is a grace period in place that keeps your mainnet URL functioning so that any third-party dapp support can be updated. ### Muss ich nach der Übertragung auch auf Arbitrum neu veröffentlichen? @@ -80,21 +80,21 @@ Nach Ablauf des 20-minütigen Übertragungsfensters müssen Sie die Übertragung ### Will my endpoint experience downtime while re-publishing? -It is unlikely, but possible to experience a brief downtime depending on which Indexers are supporting the subgraph on L1 and whether they keep indexing it until the subgraph is fully supported on L2. +It is unlikely, but possible to experience a brief downtime depending on which Indexers are supporting the Subgraph on L1 and whether they keep indexing it until the Subgraph is fully supported on L2. ### Ist die Veröffentlichung und Versionierung auf L2 die gleiche wie im Ethereum-Mainnet? -Yes. Select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the subgraph. +Yes. Select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the Subgraph. -### Bewegt sich die Kuration meines Untergraphen ( Subgraphen ) mit meinem Untergraphen? +### Will my Subgraph's curation move with my Subgraph? -Wenn Sie die automatische Signalmigration gewählt haben, werden 100 % Ihrer eigenen Kuration mit Ihrem Subgraphen zu Arbitrum One übertragen. Alle Kurationssignale des Subgraphen werden zum Zeitpunkt des Transfers in GRT umgewandelt, und die GRT, die Ihrem Kurationssignal entsprechen, werden zum Prägen von Signalen auf dem L2-Subgraphen verwendet. +If you've chosen auto-migrating signal, 100% of your own curation will move with your Subgraph to Arbitrum One. All of the Subgraph's curation signal will be converted to GRT at the time of the transfer, and the GRT corresponding to your curation signal will be used to mint signal on the L2 Subgraph. -Andere Kuratoren können wählen, ob sie ihren Anteil an GRT zurückziehen oder ihn ebenfalls auf L2 übertragen, um das Signal auf demselben Untergraphen zu prägen. +Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same Subgraph. -### Kann ich meinen Subgraph nach dem Transfer zurück ins Ethereum Mainnet verschieben? +### Can I move my Subgraph back to Ethereum mainnet after I transfer? -Nach der Übertragung wird Ihre Ethereum-Mainnet-Version dieses Untergraphen veraltet sein. Wenn Sie zum Mainnet zurückkehren möchten, müssen Sie Ihre Version neu bereitstellen und zurück zum Mainnet veröffentlichen. Es wird jedoch dringend davon abgeraten, zurück ins Ethereum Mainnet zu wechseln, da die Indexierungsbelohnungen schließlich vollständig auf Arbitrum One verteilt werden. +Once transferred, your Ethereum mainnet version of this Subgraph will be deprecated. If you would like to move back to mainnet, you will need to redeploy and publish back to mainnet. However, transferring back to Ethereum mainnet is strongly discouraged as indexing rewards will eventually be distributed entirely on Arbitrum One. ### Warum brauche ich überbrückte ETH, um meine Überweisung abzuschließen? @@ -206,19 +206,19 @@ Um Ihre Kuration zu übertragen, müssen Sie die folgenden Schritte ausführen: \* Falls erforderlich - d.h. wenn Sie eine Vertragsadresse verwenden. -### Wie erfahre ich, ob der von mir kuratierte Subgraph nach L2 umgezogen ist? +### How will I know if the Subgraph I curated has moved to L2? -Auf der Seite mit den Details der Subgraphen werden Sie durch ein Banner darauf hingewiesen, dass dieser Subgraph übertragen wurde. Sie können der Aufforderung folgen, um Ihre Kuration zu übertragen. Diese Information finden Sie auch auf der Seite mit den Details zu jedem verschobenen Subgraphen. +When viewing the Subgraph details page, a banner will notify you that this Subgraph has been transferred. You can follow the prompt to transfer your curation. You can also find this information on the Subgraph details page of any Subgraph that has moved. ### Was ist, wenn ich meine Kuration nicht auf L2 verschieben möchte? -Wenn ein Subgraph veraltet ist, haben Sie die Möglichkeit, Ihr Signal zurückzuziehen. Wenn ein Subgraph nach L2 verschoben wurde, können Sie wählen, ob Sie Ihr Signal im Ethereum-Mainnet zurückziehen oder das Signal an L2 senden. +When a Subgraph is deprecated you have the option to withdraw your signal. Similarly, if a Subgraph has moved to L2, you can choose to withdraw your signal in Ethereum mainnet or send the signal to L2. ### Woran erkenne ich, dass meine Kuration erfolgreich übertragen wurde? Die Signaldetails werden etwa 20 Minuten nach dem Start des L2-Transfertools über den Explorer zugänglich sein. -### Kann ich meine Kuration auf mehr als einen Subgraphen zur gleichen Zeit übertragen? +### Can I transfer my curation on more than one Subgraph at a time? Zurzeit gibt es keine Option für Massenübertragungen. @@ -266,7 +266,7 @@ Es dauert etwa 20 Minuten, bis das L2-Transfertool die Übertragung Ihres Einsat ### Muss ich auf Arbitrum indexieren, bevor ich meinen Einsatz übertrage? -Sie können Ihren Einsatz zuerst überweisen, bevor Sie die Indizierung einrichten, aber Sie können keine Belohnungen auf L2 beanspruchen, bevor Sie Subgraphen auf L2 zuweisen, sie indizieren und POIs präsentieren. +You can effectively transfer your stake first before setting up indexing, but you will not be able to claim any rewards on L2 until you allocate to Subgraphs on L2, index them, and present POIs. ### Können Delegatoren ihre Delegation verschieben, bevor ich meine Indizierungsbeteiligung verschiebe? From cdb8386e6dcc2b3d8553f0816607c4ad61a65654 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:13:24 -0500 Subject: [PATCH 0300/1789] New translations l2-transfer-tools-faq.mdx (Italian) --- .../arbitrum/l2-transfer-tools-faq.mdx | 54 +++++++++---------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/website/src/pages/it/archived/arbitrum/l2-transfer-tools-faq.mdx b/website/src/pages/it/archived/arbitrum/l2-transfer-tools-faq.mdx index bc5a9ac711c5..0dd870395760 100644 --- a/website/src/pages/it/archived/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/src/pages/it/archived/arbitrum/l2-transfer-tools-faq.mdx @@ -24,9 +24,9 @@ The exception is with smart contract wallets like multisigs: these are smart con Gli Strumenti di Trasferimento L2 utilizzano il meccanismo nativo di Arbitrum per inviare messaggi da L1 a L2. Questo meccanismo è chiamato "retryable ticket" e viene utilizzato da tutti i bridge di token nativi, incluso il bridge GRT di Arbitrum. Puoi leggere ulteriori dettagli sui retryable tickets nella [documentazione di Arbitrum](https://docs.arbitrum.io/arbos/l1-to-l2-messaging). -Quando trasferisci i tuoi asset (subgraph, stake, delegation o curation) su L2, un messaggio viene inviato tramite il bridge GRT di Arbitrum, che crea un "retryable ticket" su L2. Lo strumento di trasferimento include un valore in ETH nella transazione, che viene utilizzato per 1) pagare la creazione del ticket e 2) coprire il costo del gas per eseguire il ticket su L2. Tuttavia, poiché i prezzi del gas potrebbero variare nel tempo fino a quando il ticket non è pronto per l'esecuzione su L2, è possibile che questo tentativo di auto-esecuzione fallisca. Quando ciò accade, il bridge Arbitrum manterrà il "retryable ticket" attivo per un massimo di 7 giorni, e chiunque può riprovare a "riscattare" il ticket (il che richiede un wallet con un po' di ETH trasferiti su Arbitrum). +When you transfer your assets (Subgraph, stake, delegation or curation) to L2, a message is sent through the Arbitrum GRT bridge which creates a retryable ticket in L2. The transfer tool includes some ETH value in the transaction, that is used to 1) pay to create the ticket and 2) pay for the gas to execute the ticket in L2. However, because gas prices might vary in the time until the ticket is ready to execute in L2, it is possible that this auto-execution attempt fails. When that happens, the Arbitrum bridge will keep the retryable ticket alive for up to 7 days, and anyone can retry “redeeming” the ticket (which requires a wallet with some ETH bridged to Arbitrum). -Questo è ciò che chiamiamo il passaggio "Conferma" in tutti gli strumenti di trasferimento: in molti casi verrà eseguito automaticamente, poiché l'auto-esecuzione ha spesso successo, ma è importante che tu verifichi che sia andato a buon fine. Se non è andato a buon fine e nessuna riprova ha successo entro 7 giorni, il bridge Arbitrum scarterà il "retryable ticket" e i tuoi asset (subgraph, stake, delegation o curation) andranno persi e non potranno essere recuperati. I core devs di The Graph hanno un sistema di monitoraggio per rilevare queste situazioni e cercare di riscattare i ticket prima che sia troppo tardi, ma alla fine è tua responsabilità assicurarti che il trasferimento venga completato in tempo. Se hai difficoltà a confermare la tua transazione, ti preghiamo di contattarci utilizzando [questo modulo](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) e i core devs saranno pronti ad aiutarti. +This is what we call the “Confirm” step in all the transfer tools - it will run automatically in most cases, as the auto-execution is most often successful, but it is important that you check back to make sure it went through. If it doesn’t succeed and there are no successful retries in 7 days, the Arbitrum bridge will discard the ticket, and your assets (Subgraph, stake, delegation or curation) will be lost and can’t be recovered. The Graph core devs have a monitoring system in place to detect these situations and try to redeem the tickets before it’s too late, but it is ultimately your responsibility to ensure your transfer is completed in time. If you’re having trouble confirming your transaction, please reach out using [this form](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) and core devs will be there help you. ### I started my delegation/stake/curation transfer and I'm not sure if it made it through to L2, how can I confirm that it was transferred correctly? @@ -36,43 +36,43 @@ If you have the L1 transaction hash (which you can find by looking at the recent ## Traserimento del Subgraph -### Come faccio a trasferire un mio subgraph? +### How do I transfer my Subgraph? -Per fare un trasferimento del tuo subgraph, dovrai completare i seguenti passaggi: +To transfer your Subgraph, you will need to complete the following steps: 1. Inizializza il trasferimento su Ethereum mainnet 2. Aspetta 20 minuti per la conferma -3. Conferma il trasferimento del subgraph su Arbitrum\* +3. Confirm Subgraph transfer on Arbitrum\* -4. Termina la pubblicazione del subgraph su Arbitrum +4. Finish publishing Subgraph on Arbitrum 5. Aggiorna l'URL della Query (raccomandato) -\*Note that you must confirm the transfer within 7 days otherwise your subgraph may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). +\*Note that you must confirm the transfer within 7 days otherwise your Subgraph may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). ### Da dove devo inizializzare il mio trasferimento? -Puoi inizializzare il tuo trasferimento da [Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) o dalla pagina di dettaglio di qualsiasi subgraph. Clicca sul bottone "Trasferisci Subgraph" sulla pagina di dettaglio del subgraph e inizia il trasferimento. +You can initiate your transfer from the [Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) or any Subgraph details page. Click the "Transfer Subgraph" button in the Subgraph details page to start the transfer. -### Quanto devo aspettare per il completamento del trasferimento del mio subgraph +### How long do I need to wait until my Subgraph is transferred Il tempo di trasferimento richiede circa 20 minuti. Il bridge Arbitrum sta lavorando in background per completare automaticamente il trasferimento. In alcuni casi, i costi del gas potrebbero aumentare e dovrai confermare nuovamente la transazione. -### I miei subgraph saranno ancora rintracciabili dopo averli trasferiti su L2? +### Will my Subgraph still be discoverable after I transfer it to L2? -Il tuo subgraph sarà rintracciabile solo sulla rete su cui è stata pubblicata. Ad esempio, se il tuo subgraph è su Arbitrum One, potrai trovarlo solo su Explorer su Arbitrum One e non sarai in grado di trovarlo su Ethereum. Assicurati di avere selezionato Arbitrum One nel tasto in alto nella pagina per essere sicuro di essere sulla rete corretta. Dopo il transfer, il subgraph su L1 apparirà come deprecato. +Your Subgraph will only be discoverable on the network it is published to. For example, if your Subgraph is on Arbitrum One, then you can only find it in Explorer on Arbitrum One and will not be able to find it on Ethereum. Please ensure that you have Arbitrum One selected in the network switcher at the top of the page to ensure you are on the correct network.  After the transfer, the L1 Subgraph will appear as deprecated. -### Il mio subgraph deve essere pubblicato per poterlo trasferire? +### Does my Subgraph need to be published to transfer it? -Per usufruire dello strumento di trasferimento del subgraph, il tuo subgraph deve già essere pubblicato sulla mainnet di Ethereum e deve possedere alcuni segnali di curation di proprietà del wallet che possiede il subgraph. Se il tuo subgraph non è stato pubblicato, è consigliabile pubblicarlo direttamente su Arbitrum One: le commissioni di gas associate saranno considerevolmente più basse. Se desideri trasferire un subgraph pubblicato ma l'account proprietario non inserito nessun segnale di curation su di esso, puoi segnalare una piccola quantità (ad esempio 1 GRT) da quell'account; assicurati di selezionare il segnale "auto-migrante". +To take advantage of the Subgraph transfer tool, your Subgraph must be already published to Ethereum mainnet and must have some curation signal owned by the wallet that owns the Subgraph. If your Subgraph is not published, it is recommended you simply publish directly on Arbitrum One - the associated gas fees will be considerably lower. If you want to transfer a published Subgraph but the owner account hasn't curated any signal on it, you can signal a small amount (e.g. 1 GRT) from that account; make sure to choose "auto-migrating" signal. -### Cosa succede alla versione del mio subgraph sulla mainnet di Ethereum dopo il trasferimento su Arbitrum? +### What happens to the Ethereum mainnet version of my Subgraph after I transfer to Arbitrum? -Dopo aver trasferito il tuo subgraph su Arbitrum, la versione sulla mainnet di Ethereum sarà deprecata. Ti consigliamo di aggiornare l'URL della query entro 48 ore. Tuttavia, è previsto un periodo di tolleranza che mantiene funzionante l'URL sulla mainnet in modo che il supporto per eventuali dApp di terze parti possa essere aggiornato. +After transferring your Subgraph to Arbitrum, the Ethereum mainnet version will be deprecated. We recommend you update your query URL within 48 hours. However, there is a grace period in place that keeps your mainnet URL functioning so that any third-party dapp support can be updated. ### Dopo il trasferimento, devo anche pubblicare di nuovo su Arbitrum? @@ -80,21 +80,21 @@ Dopo la finestra di trasferimento di 20 minuti, dovrai confermare il trasferimen ### Will my endpoint experience downtime while re-publishing? -It is unlikely, but possible to experience a brief downtime depending on which Indexers are supporting the subgraph on L1 and whether they keep indexing it until the subgraph is fully supported on L2. +It is unlikely, but possible to experience a brief downtime depending on which Indexers are supporting the Subgraph on L1 and whether they keep indexing it until the Subgraph is fully supported on L2. ### Is publishing and versioning the same on L2 as Ethereum Ethereum mainnet? -Yes. Select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the subgraph. +Yes. Select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the Subgraph. -### Will my subgraph's curation move with my subgraph? +### Will my Subgraph's curation move with my Subgraph? -If you've chosen auto-migrating signal, 100% of your own curation will move with your subgraph to Arbitrum One. All of the subgraph's curation signal will be converted to GRT at the time of the transfer, and the GRT corresponding to your curation signal will be used to mint signal on the L2 subgraph. +If you've chosen auto-migrating signal, 100% of your own curation will move with your Subgraph to Arbitrum One. All of the Subgraph's curation signal will be converted to GRT at the time of the transfer, and the GRT corresponding to your curation signal will be used to mint signal on the L2 Subgraph. -Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same subgraph. +Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same Subgraph. -### Can I move my subgraph back to Ethereum mainnet after I transfer? +### Can I move my Subgraph back to Ethereum mainnet after I transfer? -Once transferred, your Ethereum mainnet version of this subgraph will be deprecated. If you would like to move back to mainnet, you will need to redeploy and publish back to mainnet. However, transferring back to Ethereum mainnet is strongly discouraged as indexing rewards will eventually be distributed entirely on Arbitrum One. +Once transferred, your Ethereum mainnet version of this Subgraph will be deprecated. If you would like to move back to mainnet, you will need to redeploy and publish back to mainnet. However, transferring back to Ethereum mainnet is strongly discouraged as indexing rewards will eventually be distributed entirely on Arbitrum One. ### Why do I need bridged ETH to complete my transfer? @@ -206,19 +206,19 @@ To transfer your curation, you will need to complete the following steps: \*If necessary - i.e. you are using a contract address. -### How will I know if the subgraph I curated has moved to L2? +### How will I know if the Subgraph I curated has moved to L2? -When viewing the subgraph details page, a banner will notify you that this subgraph has been transferred. You can follow the prompt to transfer your curation. You can also find this information on the subgraph details page of any subgraph that has moved. +When viewing the Subgraph details page, a banner will notify you that this Subgraph has been transferred. You can follow the prompt to transfer your curation. You can also find this information on the Subgraph details page of any Subgraph that has moved. ### What if I do not wish to move my curation to L2? -When a subgraph is deprecated you have the option to withdraw your signal. Similarly, if a subgraph has moved to L2, you can choose to withdraw your signal in Ethereum mainnet or send the signal to L2. +When a Subgraph is deprecated you have the option to withdraw your signal. Similarly, if a Subgraph has moved to L2, you can choose to withdraw your signal in Ethereum mainnet or send the signal to L2. ### How do I know my curation successfully transferred? Signal details will be accessible via Explorer approximately 20 minutes after the L2 transfer tool is initiated. -### Can I transfer my curation on more than one subgraph at a time? +### Can I transfer my curation on more than one Subgraph at a time? There is no bulk transfer option at this time. @@ -266,7 +266,7 @@ It will take approximately 20 minutes for the L2 transfer tool to complete trans ### Do I have to index on Arbitrum before I transfer my stake? -You can effectively transfer your stake first before setting up indexing, but you will not be able to claim any rewards on L2 until you allocate to subgraphs on L2, index them, and present POIs. +You can effectively transfer your stake first before setting up indexing, but you will not be able to claim any rewards on L2 until you allocate to Subgraphs on L2, index them, and present POIs. ### Can Delegators move their delegation before I move my indexing stake? From e0d49c9fe16880449808cd3e4d965a22c2c69087 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:13:25 -0500 Subject: [PATCH 0301/1789] New translations l2-transfer-tools-faq.mdx (Japanese) --- .../arbitrum/l2-transfer-tools-faq.mdx | 54 +++++++++---------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/website/src/pages/ja/archived/arbitrum/l2-transfer-tools-faq.mdx b/website/src/pages/ja/archived/arbitrum/l2-transfer-tools-faq.mdx index 70999970ca9a..32be44b363b9 100644 --- a/website/src/pages/ja/archived/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/src/pages/ja/archived/arbitrum/l2-transfer-tools-faq.mdx @@ -24,9 +24,9 @@ EthereumやArbitrumのようなEVMブロックチェーン上のウォレット L2転送ツールは、アービトラムのネイティブメカニズムを使用してL1からL2にメッセージを送信します。このメカニズムは「再試行可能チケット」と呼ばれ、Arbitrum GRTブリッジを含むすべてのネイティブトークンブリッジで使用されます。再試行可能なチケットの詳細については、[アービトラムドキュメント](https://docs.arbitrum.io/arbos/l1 からl2へのメッセージング)を参照してください。 -資産(サブグラフ、ステーク、委任、またはキュレーション)をL2に転送する際、Arbitrum GRTブリッジを介してメッセージが送信され、L2でretryable ticketが作成されます。転送ツールにはトランザクションに一部のETHが含まれており、これは1)チケットの作成に支払われ、2)L2でのチケットの実行に必要なガスに使用されます。ただし、チケットがL2で実行可能になるまでの時間でガス料金が変動する可能性があるため、この自動実行試行が失敗することがあります。その場合、Arbitrumブリッジはretryable ticketを最大7日間保持し、誰でもそのチケットを「償還」しようと再試行できます(これにはArbitrumにブリッジされた一部のETHを持つウォレットが必要です)。 +When you transfer your assets (Subgraph, stake, delegation or curation) to L2, a message is sent through the Arbitrum GRT bridge which creates a retryable ticket in L2. The transfer tool includes some ETH value in the transaction, that is used to 1) pay to create the ticket and 2) pay for the gas to execute the ticket in L2. However, because gas prices might vary in the time until the ticket is ready to execute in L2, it is possible that this auto-execution attempt fails. When that happens, the Arbitrum bridge will keep the retryable ticket alive for up to 7 days, and anyone can retry “redeeming” the ticket (which requires a wallet with some ETH bridged to Arbitrum). -これは、すべての転送ツールで「確認」ステップと呼んでいるものです。ほとんどの場合、自動実行は成功するため、自動的に実行されますが、確認が完了したことを確認するために戻ってチェックすることが重要です。成功せず、7日間で成功した再試行がない場合、Arbitrumブリッジはそのチケットを破棄し、あなたの資産(サブグラフ、ステーク、委任、またはキュレーション)は失われ、回復できません。The Graphのコア開発者は、これらの状況を検出し、遅すぎる前にチケットを償還しようとする監視システムを設置していますが、最終的には転送が時間内に完了することを確認する責任があなたにあります。トランザクションの確認に問題がある場合は、[this form](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) を使用して連絡し、コア開発者が助けてくれるでしょう。 +This is what we call the “Confirm” step in all the transfer tools - it will run automatically in most cases, as the auto-execution is most often successful, but it is important that you check back to make sure it went through. If it doesn’t succeed and there are no successful retries in 7 days, the Arbitrum bridge will discard the ticket, and your assets (Subgraph, stake, delegation or curation) will be lost and can’t be recovered. The Graph core devs have a monitoring system in place to detect these situations and try to redeem the tickets before it’s too late, but it is ultimately your responsibility to ensure your transfer is completed in time. If you’re having trouble confirming your transaction, please reach out using [this form](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) and core devs will be there help you. ### 委任/ステーク/キュレーション転送を開始しましたが、L2 まで転送されたかどうかわかりません。正しく転送されたことを確認するにはどうすればよいですか? @@ -36,43 +36,43 @@ L1トランザクションのハッシュを持っている場合(これはウ ## 部分グラフの転送 -### サブグラフを転送するにはどうすればよいですか? +### How do I transfer my Subgraph? -サブグラフを転送するには、次の手順を完了する必要があります。 +To transfer your Subgraph, you will need to complete the following steps: 1. イーサリアムメインネットで転送を開始する 2. 確認を待つために20分お待ちください。 -3. Arbitrum でサブグラフ転送を確認します\* +3. Confirm Subgraph transfer on Arbitrum\* -4. Arbitrum でサブグラフの公開を完了する +4. Finish publishing Subgraph on Arbitrum 5. クエリ URL を更新 (推奨) -\*注意:7日以内に転送を確認する必要があります。それ以外の場合、サブグラフが失われる可能性があります。ほとんどの場合、このステップは自動的に実行されますが、Arbitrumでガス価格が急上昇した場合には手動で確認する必要があるかもしれません。このプロセス中に問題が発生した場合、サポートを受けるためのリソースが用意されています:support@thegraph.com に連絡するか、[Discord](https://discord.gg/graphprotocol)でお問い合わせください\。 +\*Note that you must confirm the transfer within 7 days otherwise your Subgraph may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). ### どこから転送を開始すればよいですか? -トランスファーを開始するには、[Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer)またはサブグラフの詳細ページからトランスファーを開始できます。サブグラフの詳細ページで「サブグラフを転送」ボタンをクリックしてトランスファーを開始してください。 +You can initiate your transfer from the [Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) or any Subgraph details page. Click the "Transfer Subgraph" button in the Subgraph details page to start the transfer. -### サブグラフが転送されるまでどれくらい待つ必要がありますか +### How long do I need to wait until my Subgraph is transferred トランスファーには約20分かかります。Arbitrumブリッジはバックグラウンドでブリッジトランスファーを自動的に完了します。一部の場合、ガス料金が急上昇する可能性があり、トランザクションを再度確認する必要があるかもしれません。 -### 私のサブグラフは L2 に転送した後も検出可能ですか? +### Will my Subgraph still be discoverable after I transfer it to L2? -あなたのサブグラフは、それが公開されたネットワーク上でのみ発見できます。たとえば、あなたのサブグラフがArbitrum Oneにある場合、それはArbitrum OneのExplorerでのみ見つけることができ、Ethereum上では見つけることはできません。正しいネットワークにいることを確認するために、ページの上部にあるネットワーク切り替えツールでArbitrum Oneを選択していることを確認してください。トランスファー後、L1サブグラフは非推奨として表示されます。 +Your Subgraph will only be discoverable on the network it is published to. For example, if your Subgraph is on Arbitrum One, then you can only find it in Explorer on Arbitrum One and will not be able to find it on Ethereum. Please ensure that you have Arbitrum One selected in the network switcher at the top of the page to ensure you are on the correct network.  After the transfer, the L1 Subgraph will appear as deprecated. -### 私のサブグラフを転送するには公開する必要がありますか? +### Does my Subgraph need to be published to transfer it? -サブグラフ転送ツールを活用するには、サブグラフがすでにEthereumメインネットに公開され、そのサブグラフを所有するウォレットが所有するキュレーション信号を持っている必要があります。サブグラフが公開されていない場合、Arbitrum Oneに直接公開することをお勧めします。関連するガス料金はかなり低くなります。公開されたサブグラフを転送したいが、所有者のアカウントがそれに対してキュレーション信号を出していない場合、そのアカウントから少額(たとえば1 GRT)の信号を送ることができます。必ず「auto-migrating(自動移行)」信号を選択してください。 +To take advantage of the Subgraph transfer tool, your Subgraph must be already published to Ethereum mainnet and must have some curation signal owned by the wallet that owns the Subgraph. If your Subgraph is not published, it is recommended you simply publish directly on Arbitrum One - the associated gas fees will be considerably lower. If you want to transfer a published Subgraph but the owner account hasn't curated any signal on it, you can signal a small amount (e.g. 1 GRT) from that account; make sure to choose "auto-migrating" signal. -### Arbitrumへの転送後、Ethereumメインネットバージョンの私のサブグラフはどうなりますか? +### What happens to the Ethereum mainnet version of my Subgraph after I transfer to Arbitrum? -サブグラフをArbitrumに転送した後、Ethereumメインネットワークのバージョンは非推奨とされます。おすすめでは、48時間以内にクエリURLを更新することをお勧めしています。ただし、サードパーティのDAppサポートが更新されるために、メインネットワークのURLが機能し続ける猶予期間も設けられています。 +After transferring your Subgraph to Arbitrum, the Ethereum mainnet version will be deprecated. We recommend you update your query URL within 48 hours. However, there is a grace period in place that keeps your mainnet URL functioning so that any third-party dapp support can be updated. ### 転送後、Arbitrum上で再公開する必要がありますか? @@ -80,21 +80,21 @@ L1トランザクションのハッシュを持っている場合(これはウ ### 再公開中にエンドポイントでダウンタイムが発生しますか? -短期間のダウンタイムを経験する可能性は低いですが、L1でサブグラフをサポートしているインデクサーと、サブグラフが完全にL2でサポートされるまでインデクシングを続けるかどうかに依存することがあります。 +It is unlikely, but possible to experience a brief downtime depending on which Indexers are supporting the Subgraph on L1 and whether they keep indexing it until the Subgraph is fully supported on L2. ### L2上での公開とバージョニングは、Ethereumメインネットと同じですか? -はい、Subgraph Studioで公開する際には、公開ネットワークとしてArbitrum Oneを選択してください。Studioでは、最新のエンドポイントが利用可能で、最新の更新されたサブグラフバージョンを指します。 +Yes. Select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the Subgraph. -### 私のサブグラフのキュレーションは、サブグラフと一緒に移動しますか? +### Will my Subgraph's curation move with my Subgraph? -自動移行信号を選択した場合、あなたのキュレーションの100%はサブグラフと一緒にArbitrum Oneに移行します。サブグラフのすべてのキュレーション信号は、転送時にGRTに変換され、あなたのキュレーション信号に対応するGRTがL2サブグラフ上で信号を発行するために使用されます。 +If you've chosen auto-migrating signal, 100% of your own curation will move with your Subgraph to Arbitrum One. All of the Subgraph's curation signal will be converted to GRT at the time of the transfer, and the GRT corresponding to your curation signal will be used to mint signal on the L2 Subgraph. -他のキュレーターは、自分の一部のGRTを引き出すか、それをL2に転送して同じサブグラフで信号を発行するかを選択できます。 +Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same Subgraph. -### 転送後にサブグラフをEthereumメインネットに戻すことはできますか? +### Can I move my Subgraph back to Ethereum mainnet after I transfer? -一度転送されると、Ethereumメインネットワークのサブグラフバージョンは非推奨とされます。メインネットワークに戻りたい場合、再デプロイしてメインネットワークに再度公開する必要があります。ただし、Ethereumメインネットワークに戻すことは強く勧められていません。なぜなら、将来的にはインデクシングリワードが完全にArbitrum Oneで分配されるためです。 +Once transferred, your Ethereum mainnet version of this Subgraph will be deprecated. If you would like to move back to mainnet, you will need to redeploy and publish back to mainnet. However, transferring back to Ethereum mainnet is strongly discouraged as indexing rewards will eventually be distributed entirely on Arbitrum One. ### なぜ転送を完了するためにブリッジされたETHが必要なのですか? @@ -206,19 +206,19 @@ Indexerに連絡できる場合、彼らにL2トランスファーツールを \*必要な場合 - つまり、契約アドレスを使用している場合。 -### 私がキュレーションしたサブグラフが L2 に移動したかどうかはどうすればわかりますか? +### How will I know if the Subgraph I curated has moved to L2? -サブグラフの詳細ページを表示すると、このサブグラフが転送されたことを通知するバナーが表示されます。バナーに従ってキュレーションを転送できます。また、移動したサブグラフの詳細ページでもこの情報を見つけることができます。 +When viewing the Subgraph details page, a banner will notify you that this Subgraph has been transferred. You can follow the prompt to transfer your curation. You can also find this information on the Subgraph details page of any Subgraph that has moved. ### 自分のキュレーションを L2 に移動したくない場合はどうすればよいですか? -サブグラフが非推奨になった場合、信号を引き出すオプションがあります。同様に、サブグラフがL2に移動した場合、Ethereumメインネットワークで信号を引き出すか、L2に送信することを選択できます。 +When a Subgraph is deprecated you have the option to withdraw your signal. Similarly, if a Subgraph has moved to L2, you can choose to withdraw your signal in Ethereum mainnet or send the signal to L2. ### 私のキュレーションが正常に転送されたことを確認するにはどうすればよいですか? L2トランスファーツールを開始してから約20分後、Explorerを介して信号の詳細にアクセスできるようになります。 -### 一度に複数のサブグラフへキュレーションを転送することはできますか? +### Can I transfer my curation on more than one Subgraph at a time? 現時点では一括転送オプションは提供されていません。 @@ -266,7 +266,7 @@ L2トランスファーツールがステークの転送を完了するのに約 ### 株式を譲渡する前に、Arbitrum でインデックスを作成する必要がありますか? -インデクシングのセットアップよりも先にステークを効果的に転送できますが、L2でのサブグラフへの割り当て、それらのサブグラフのインデクシング、およびPOIの提出を行うまで、L2での報酬を請求することはできません。 +You can effectively transfer your stake first before setting up indexing, but you will not be able to claim any rewards on L2 until you allocate to Subgraphs on L2, index them, and present POIs. ### 委任者は、インデックス作成の賭け金を移動する前に委任を移動できますか? From 4e33e8b5a9e041a385bb4fb2aeb5bd70aafd7af7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:13:26 -0500 Subject: [PATCH 0302/1789] New translations l2-transfer-tools-faq.mdx (Korean) --- .../arbitrum/l2-transfer-tools-faq.mdx | 57 ++++++++++--------- 1 file changed, 30 insertions(+), 27 deletions(-) diff --git a/website/src/pages/ko/archived/arbitrum/l2-transfer-tools-faq.mdx b/website/src/pages/ko/archived/arbitrum/l2-transfer-tools-faq.mdx index 904587bfc535..be8af8c171b5 100644 --- a/website/src/pages/ko/archived/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/src/pages/ko/archived/arbitrum/l2-transfer-tools-faq.mdx @@ -22,11 +22,12 @@ The exception is with smart contract wallets like multisigs: these are smart con ### 만약 7일 안에 이체를 완료하지 못하면 어떻게 되나요? -L2 전송 도구는 Arbitrum의 기본 메커니즘을 사용하여 L1에서 L2로 메시지를 보냅니다. 이 메커니즘은 "재시도 가능한 티켓"이라고 하며 Arbitrum GRT 브리지를 포함한 모든 네이티브 토큰 브리지를 사용하여 사용됩니다. 재시도 가능한 티켓에 대해 자세히 읽을 수 있습니다 [Arbitrum 문서] (https://docs.arbitrum.io/arbos/l1-to-l2-messaging). +L2 전송 도구는 Arbitrum의 기본 메커니즘을 사용하여 L1에서 L2로 메시지를 보냅니다. 이 메커니즘은 "재시도 가능한 티켓"이라고 하며 Arbitrum GRT 브리지를 포함한 모든 네이티브 토큰 브리지를 사용하여 사용됩니다. 재시도 가능한 티켓에 대해 자세히 읽을 수 있습니다 [Arbitrum 문서] +(https://docs.arbitrum.io/arbos/l1-to-l2-messaging). -자산(하위 그래프, 스테이크, 위임 또는 큐레이션) 을 L2로 이전하면 L2에서 재시도 가능한 티켓을 생성하는 Arbitrum GRT 브리지를 통해 메시지가 전송됩니다. 전송 도구에는 거래에 일부 ETH 값이 포함되어 있으며, 이는 1) 티켓 생성 비용을 지불하고 2) L2에서 티켓을 실행하기 위해 가스 비용을 지불하는 데 사용됩니다. 그러나 티켓이 L2에서 실행될 준비가 될 때까지 가스 가격이 시간에 따라 달라질 수 있으므로 이 자동 실행 시도가 실패할 수 있습니다. 그런 일이 발생하면 Arbitrum 브릿지는 재시도 가능한 티켓을 최대 7일 동안 유지하며 누구나 티켓 "사용"을 재시도할 수 있습니다(Arbitrum에 브릿지된 일부 ETH가 있는 지갑이 필요함). +When you transfer your assets (Subgraph, stake, delegation or curation) to L2, a message is sent through the Arbitrum GRT bridge which creates a retryable ticket in L2. The transfer tool includes some ETH value in the transaction, that is used to 1) pay to create the ticket and 2) pay for the gas to execute the ticket in L2. However, because gas prices might vary in the time until the ticket is ready to execute in L2, it is possible that this auto-execution attempt fails. When that happens, the Arbitrum bridge will keep the retryable ticket alive for up to 7 days, and anyone can retry “redeeming” the ticket (which requires a wallet with some ETH bridged to Arbitrum). -이것이 모든 전송 도구에서 '확인' 단계라고 부르는 것입니다. 자동 실행이 성공하는 경우가 가장 많기 때문에 대부분의 경우 자동으로 실행되지만 제대로 진행되었는지 다시 확인하는 것이 중요합니다. 성공하지 못하고 7일 이내에 성공적인 재시도가 없으면 Arbitrum 브릿지는 티켓을 폐기하며 귀하의 자산(하위 그래프, 지분, 위임 또는 큐레이션)은 손실되어 복구할 수 없습니다. Graph 코어 개발자는 이러한 상황을 감지하고 너무 늦기 전에 티켓을 교환하기 위해 모니터링 시스템을 갖추고 있지만 전송이 제 시간에 완료되도록 하는 것은 궁극적으로 귀하의 책임입니다. 거래를 확인하는 데 문제가 있는 경우 [이 양식]을 사용하여 문의하세요 (https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) 핵심 개발자들이 도와드릴 것입니다. +This is what we call the “Confirm” step in all the transfer tools - it will run automatically in most cases, as the auto-execution is most often successful, but it is important that you check back to make sure it went through. If it doesn’t succeed and there are no successful retries in 7 days, the Arbitrum bridge will discard the ticket, and your assets (Subgraph, stake, delegation or curation) will be lost and can’t be recovered. The Graph core devs have a monitoring system in place to detect these situations and try to redeem the tickets before it’s too late, but it is ultimately your responsibility to ensure your transfer is completed in time. If you’re having trouble confirming your transaction, please reach out using [this form](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) and core devs will be there help you. ### I started my delegation/stake/curation transfer and I'm not sure if it made it through to L2, how can I confirm that it was transferred correctly? @@ -36,41 +37,43 @@ If you have the L1 transaction hash (which you can find by looking at the recent ## 하위 그래프 전송 -### 내 서브그래프를 어떻게 이전하나요? +### How do I transfer my Subgraph? +To transfer your Subgraph, you will need to complete the following steps: + 1. 이더리움 메인넷에서 전송 시작 2. 확인을 위해 20분 정도 기다리세요 -3. Arbitrum에서 하위 그래프 전송 확인\* +3. Confirm Subgraph transfer on Arbitrum\* -4. Arbitrum에 하위 그래프 게시 완료 +4. Finish publishing Subgraph on Arbitrum 5. 쿼리 URL 업데이트(권장) -\*Note that you must confirm the transfer within 7 days otherwise your subgraph may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). +\*Note that you must confirm the transfer within 7 days otherwise your Subgraph may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). ### 어디에서 이전을 시작해야 합니까? -[Subgraph Studio](https://thegraph.com/studio/), [Explorer](https://thegraph.com/explorer) 또는 하위 그래프 세부정보 페이지에서 전송을 시작할 수 있습니다. 하위 그래프 세부 정보 페이지에서 "하위 그래프 전송" 버튼을 클릭하여 전송을 시작하세요. +You can initiate your transfer from the [Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) or any Subgraph details page. Click the "Transfer Subgraph" button in the Subgraph details page to start the transfer. -### 내 하위 그래프가 전송될 때까지 얼마나 기다려야 합니까? +### How long do I need to wait until my Subgraph is transferred 환승 시간은 약 20분 정도 소요됩니다. Arbitrum 브리지는 브리지 전송을 자동으로 완료하기 위해 백그라운드에서 작동하고 있습니다. 경우에 따라 가스 비용이 급증할 수 있으며 거래를 다시 확인해야 합니다. -### 내 하위 그래프를 L2로 전송한 후에도 계속 검색할 수 있나요? +### Will my Subgraph still be discoverable after I transfer it to L2? -귀하의 하위 그래프는 해당 하위 그래프가 게시된 네트워크에서만 검색 가능합니다. 예를 들어, 귀하의 하위 그래프가 Arbitrum One에 있는 경우 Arbitrum One의 Explorer에서만 찾을 수 있으며 Ethereum에서는 찾을 수 없습니다. 올바른 네트워크에 있는지 확인하려면 페이지 상단의 네트워크 전환기에서 Arbitrum One을 선택했는지 확인하세요. 이전 후 L1 하위 그래프는 더 이상 사용되지 않는 것으로 표시됩니다. +Your Subgraph will only be discoverable on the network it is published to. For example, if your Subgraph is on Arbitrum One, then you can only find it in Explorer on Arbitrum One and will not be able to find it on Ethereum. Please ensure that you have Arbitrum One selected in the network switcher at the top of the page to ensure you are on the correct network.  After the transfer, the L1 Subgraph will appear as deprecated. -### 내 하위 그래프를 전송하려면 게시해야 합니까? +### Does my Subgraph need to be published to transfer it? -하위 그래프 전송 도구를 활용하려면 하위 그래프가 이미 이더리움 메인넷에 게시되어 있어야 하며 하위 그래프를 소유한 지갑이 소유한 일부 큐레이션 신호가 있어야 합니다. 하위 그래프가 게시되지 않은 경우 Arbitrum One에 직접 게시하는 것이 좋습니다. 관련 가스 요금은 상당히 낮아집니다. 게시된 하위 그래프를 전송하고 싶지만 소유자 계정이 이에 대한 신호를 큐레이팅하지 않은 경우 해당 계정에서 소액(예: 1 GRT)을 신호로 보낼 수 있습니다. "자동 마이그레이션" 신호를 선택했는지 확인하세요. +To take advantage of the Subgraph transfer tool, your Subgraph must be already published to Ethereum mainnet and must have some curation signal owned by the wallet that owns the Subgraph. If your Subgraph is not published, it is recommended you simply publish directly on Arbitrum One - the associated gas fees will be considerably lower. If you want to transfer a published Subgraph but the owner account hasn't curated any signal on it, you can signal a small amount (e.g. 1 GRT) from that account; make sure to choose "auto-migrating" signal. -### Arbitrum으로 이전한 후 내 서브그래프의 이더리움 메인넷 버전은 어떻게 되나요? +### What happens to the Ethereum mainnet version of my Subgraph after I transfer to Arbitrum? -귀하의 하위 그래프를 Arbitrum으로 이전한 후에는 Ethereum 메인넷 버전이 더 이상 사용되지 않습니다. 48시간 이내에 쿼리 URL을 업데이트하는 것이 좋습니다. 그러나 타사 dapp 지원이 업데이트될 수 있도록 메인넷 URL이 작동하도록 유지하는 유예 기간이 있습니다. +After transferring your Subgraph to Arbitrum, the Ethereum mainnet version will be deprecated. We recommend you update your query URL within 48 hours. However, there is a grace period in place that keeps your mainnet URL functioning so that any third-party dapp support can be updated. ### 양도한 후에 Arbitrum에 다시 게시해야 합니까? @@ -78,21 +81,21 @@ If you have the L1 transaction hash (which you can find by looking at the recent ### Will my endpoint experience downtime while re-publishing? -It is unlikely, but possible to experience a brief downtime depending on which Indexers are supporting the subgraph on L1 and whether they keep indexing it until the subgraph is fully supported on L2. +It is unlikely, but possible to experience a brief downtime depending on which Indexers are supporting the Subgraph on L1 and whether they keep indexing it until the Subgraph is fully supported on L2. ### L2에서 Ethereum Ethereum 메인넷과 게시 및 버전 관리가 동일합니까? -Yes. Select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the subgraph. +Yes. Select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the Subgraph. -### 내 하위 그래프의 큐레이션이 내 하위 그래프와 함께 이동하나요? +### Will my Subgraph's curation move with my Subgraph? -자동 마이그레이션 신호를 선택한 경우 자체 큐레이션의 100%가 하위 그래프와 함께 Arbitrum One으로 이동됩니다. 하위 그래프의 모든 큐레이션 신호는 전송 시 GRT로 변환되며, 큐레이션 신호에 해당하는 GRT는 L2 하위 그래프의 신호 생성에 사용됩니다. +If you've chosen auto-migrating signal, 100% of your own curation will move with your Subgraph to Arbitrum One. All of the Subgraph's curation signal will be converted to GRT at the time of the transfer, and the GRT corresponding to your curation signal will be used to mint signal on the L2 Subgraph. -다른 큐레이터는 GRT 일부를 인출할지, 아니면 L2로 전송하여 동일한 하위 그래프의 신호를 생성할지 선택할 수 있습니다. +Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same Subgraph. -### 이전 후 구독을 이더리움 메인넷으로 다시 이동할 수 있나요? +### Can I move my Subgraph back to Ethereum mainnet after I transfer? -이전되면 이 하위 그래프의 Ethereum 메인넷 버전은 더 이상 사용되지 않습니다. 메인넷으로 다시 이동하려면 다시 메인넷에 재배포하고 게시해야 합니다. 그러나 인덱싱 보상은 결국 Arbitrum One에 전적으로 배포되므로 이더리움 메인넷으로 다시 이전하는 것은 권장되지 않습니다. +Once transferred, your Ethereum mainnet version of this Subgraph will be deprecated. If you would like to move back to mainnet, you will need to redeploy and publish back to mainnet. However, transferring back to Ethereum mainnet is strongly discouraged as indexing rewards will eventually be distributed entirely on Arbitrum One. ### 전송을 완료하려면 브리지된 ETH가 필요한 이유는 무엇입니까? @@ -204,19 +207,19 @@ The tokens that are being undelegated are "locked" and therefore cannot be trans \*필요한 경우 - 즉, 계약 주소를 사용하고 있습니다. -### 내가 큐레이트한 하위 그래프가 L2로 이동했는지 어떻게 알 수 있나요? +### How will I know if the Subgraph I curated has moved to L2? -하위 세부정보 페이지를 보면 해당 하위 하위가 이전되었음을 알리는 배너가 표시됩니다. 메시지에 따라 큐레이션을 전송할 수 있습니다. 이동한 하위 그래프의 하위 그래프 세부정보 페이지에서도 이 정보를 찾을 수 있습니다. +When viewing the Subgraph details page, a banner will notify you that this Subgraph has been transferred. You can follow the prompt to transfer your curation. You can also find this information on the Subgraph details page of any Subgraph that has moved. ### 큐레이션을 L2로 옮기고 싶지 않으면 어떻게 되나요? -하위 그래프가 더 이상 사용되지 않으면 신호를 철회할 수 있는 옵션이 있습니다. 마찬가지로 하위 그래프가 L2로 이동한 경우 이더리움 메인넷에서 신호를 철회하거나 L2로 신호를 보낼 수 있습니다. +When a Subgraph is deprecated you have the option to withdraw your signal. Similarly, if a Subgraph has moved to L2, you can choose to withdraw your signal in Ethereum mainnet or send the signal to L2. ### 내 큐레이션이 성공적으로 전송되었는지 어떻게 알 수 있나요? L2 전송 도구가 시작된 후 약 20분 후에 Explorer를 통해 신호 세부 정보에 액세스할 수 있습니다. -### 한 번에 두 개 이상의 하위 그래프에 대한 내 큐레이션을 전송할 수 있나요? +### Can I transfer my curation on more than one Subgraph at a time? 현재는 대량 전송 옵션이 없습니다. @@ -264,7 +267,7 @@ L2 전송 도구가 지분 전송을 완료하는 데 약 20분이 소요됩니 ### 지분을 양도하기 전에 Arbitrum에서 색인을 생성해야 합니까? -인덱싱을 설정하기 전에 먼저 지분을 효과적으로 이전할 수 있지만, L2의 하위 그래프에 할당하고 이를 인덱싱하고 POI를 제시할 때까지는 L2에서 어떤 보상도 청구할 수 없습니다. +You can effectively transfer your stake first before setting up indexing, but you will not be able to claim any rewards on L2 until you allocate to Subgraphs on L2, index them, and present POIs. ### 내가 인덱싱 지분을 이동하기 전에 위임자가 자신의 위임을 이동할 수 있나요? From 5c51e4e86212894f933d2c3a93a9f9a6d4dfa872 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:13:27 -0500 Subject: [PATCH 0303/1789] New translations l2-transfer-tools-faq.mdx (Dutch) --- .../arbitrum/l2-transfer-tools-faq.mdx | 54 +++++++++---------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/website/src/pages/nl/archived/arbitrum/l2-transfer-tools-faq.mdx b/website/src/pages/nl/archived/arbitrum/l2-transfer-tools-faq.mdx index 2c7df434e45c..846ddd61273d 100644 --- a/website/src/pages/nl/archived/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/src/pages/nl/archived/arbitrum/l2-transfer-tools-faq.mdx @@ -24,9 +24,9 @@ The exception is with smart contract wallets like multisigs: these are smart con De L2 Transfer Tools gebruiken Arbitrum's eigen mechanismen op berichten te sturen van L1 naar L2. Dit mechanisme heet een "retryable ticket" en is gebruikt door alle eigen token bruggen, inclusief de Arbitrum GRT brug. Je kunt meer lezen over retryable tickets in de [Arbiturm docs](https://docs.arbitrum.io/arbos/l1-to-l2-messaging). -Wanneer je jouw activa (subgraph, inzet, delegatie of curatie) overdraagt naar L2, wordt er een bericht via de Arbitrum GRT-brug gestuurd dat een herhaalbaar ticket in L2 aanmaakt. De overdrachtstool bevat een bepaalde hoeveelheid ETH in de transactie, die gebruikt wordt om 1) te betalen voor de creatie van de ticket en 2) te betalen voor de gas voor de uitvoer van de ticket in L2. Omdat de gasprijzen kunnen variëren in de tijd tot het ticket gereed is om in L2 uit te voeren, is het mogelijk dat deze automatische uitvoerpoging mislukt. Als dat gebeurt, zal de Arbitrum-brug het herhaalbare ticket tot 7 dagen lang actief houden, en iedereen kan proberen het ticket te "inlossen" (wat een portemonnee met wat ETH dat naar Arbitrum is overgebracht, vereist). +When you transfer your assets (Subgraph, stake, delegation or curation) to L2, a message is sent through the Arbitrum GRT bridge which creates a retryable ticket in L2. The transfer tool includes some ETH value in the transaction, that is used to 1) pay to create the ticket and 2) pay for the gas to execute the ticket in L2. However, because gas prices might vary in the time until the ticket is ready to execute in L2, it is possible that this auto-execution attempt fails. When that happens, the Arbitrum bridge will keep the retryable ticket alive for up to 7 days, and anyone can retry “redeeming” the ticket (which requires a wallet with some ETH bridged to Arbitrum). -Dit is wat we de "Bevestigen"-stap noemen in alle overdrachtstools - deze zal in de meeste gevallen automatisch worden uitgevoerd, omdat de automatische uitvoering meestal succesvol is, maar het is belangrijk dat je terugkeert om te controleren of het is gelukt. Als het niet lukt en er zijn geen succesvolle herhaalpogingen in 7 dagen, zal de Arbitrum-brug het ticket verwerpen, en je activa (subgraph, inzet, delegatie of curatie) zullen verloren gaan en kunnen niet worden hersteld. De kernontwikkelaars van The Graph hebben een bewakingssysteem om deze situaties te detecteren en proberen de tickets in te lossen voordat het te laat is, maar uiteindelijk ben jij verantwoordelijk om ervoor te zorgen dat je overdracht op tijd is voltooid. Als je problemen hebt met het bevestigen van je transactie, neem dan contact op via [dit formulier](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) en de kernontwikkelaars zullen er zijn om je te helpen. +This is what we call the “Confirm” step in all the transfer tools - it will run automatically in most cases, as the auto-execution is most often successful, but it is important that you check back to make sure it went through. If it doesn’t succeed and there are no successful retries in 7 days, the Arbitrum bridge will discard the ticket, and your assets (Subgraph, stake, delegation or curation) will be lost and can’t be recovered. The Graph core devs have a monitoring system in place to detect these situations and try to redeem the tickets before it’s too late, but it is ultimately your responsibility to ensure your transfer is completed in time. If you’re having trouble confirming your transaction, please reach out using [this form](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) and core devs will be there help you. ### Ik ben mijn delegatie/inzet/curatie overdracht begonnen en ik ben niet zeker of deze door is gekomen naar L2, hoe kan ik bevestigen dat deze correct is overgedragen? @@ -36,43 +36,43 @@ Als je de L1 transactie-hash hebt (die je kunt vinden door naar de recente trans ## Subgraph Overdracht -### Hoe verplaats ik mijn subgraphs? +### How do I transfer my Subgraph? -Om je subgraph te verplaatsen, moet je de volgende stappen volgen: +To transfer your Subgraph, you will need to complete the following steps: 1. Start de overdracht op het Ethereum mainnet 2. Wacht 20 minuten op bevestiging -3. Bevestig subgraph overdracht op Arbitrum\* +3. Confirm Subgraph transfer on Arbitrum\* -4. Maak het publiceren van subrgaph op Arbitrum af +4. Finish publishing Subgraph on Arbitrum 5. Update Query URL (aanbevolen) -\*Let op dat je de overdracht binnen 7 dagen moet bevestigen, anders kan je subgraph verloren gaan. In de meeste gevallen zal deze stap automatisch verlopen, maar een handmatige bevestiging kan nodig zijn als er een gasprijsstijging is op Arbitrum. Als er tijdens dit proces problemen zijn, zijn er bronnen beschikbaar om te helpen: neem contact op met de ondersteuning via support@thegraph.com of op [Discord](https://discord.gg/graphprotocol). +\*Note that you must confirm the transfer within 7 days otherwise your Subgraph may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). ### Waarvandaan moet ik mijn overdracht vanaf starten? -Je kan je overdracht starten vanaf de [Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) of elke subgraph details pagina. Klik de "Transfer Subgraph" knop in de subgraph details pagina om de overdracht te starten. +You can initiate your transfer from the [Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) or any Subgraph details page. Click the "Transfer Subgraph" button in the Subgraph details page to start the transfer. -### Hoe lang moet ik wachten to mijn subrgaph overgedragen is +### How long do I need to wait until my Subgraph is transferred De overdracht duurt ongeveer 20 minuten. De Arbitrum brug werkt momenteel op de achtergrond om de brug overdracht automatisch te laten voltooien. In sommige gevallen kunnen gaskosten pieken en zul je de overdracht opnieuw moeten bevestigen. -### Is mijn subgraph nog te ontdekken nadat ik het naar L2 overgedragen heb? +### Will my Subgraph still be discoverable after I transfer it to L2? -Jouw subgraph zal alleen te ontdekken zijn op het netwerk waarnaar deze gepubliceerd is. Bijvoorbeeld, als jouw subgraph gepubliceerd is op Arbitrum One, dan kan je deze alleen vinden via de Explorer op Arbitrum One en zul je deze niet kunnen vinden op Ethereum. Zorg ervoor dat je Arbitrum One hebt geselecteerd in de netwerkschakelaar bovenaan de pagina om er zeker van te zijn dat je op het juiste netwerk bent.  Na de overdracht zal de L1 subgraph als verouderd worden weergegeven. +Your Subgraph will only be discoverable on the network it is published to. For example, if your Subgraph is on Arbitrum One, then you can only find it in Explorer on Arbitrum One and will not be able to find it on Ethereum. Please ensure that you have Arbitrum One selected in the network switcher at the top of the page to ensure you are on the correct network.  After the transfer, the L1 Subgraph will appear as deprecated. -### Moet mijn subgraph gepubliceerd zijn om deze te kunnen overdragen? +### Does my Subgraph need to be published to transfer it? -Om gebruik te maken van de subgraph transfer tool, moet jouw subgraph al gepubliceerd zijn op het Ethereum mainnet en moet het enige curatie-signalen hebben die eigendom zijn van de wallet die de subgraph bezit. Als jouw subgraph nog niet is gepubliceerd, wordt het aanbevolen om het direct op Arbitrum One te publiceren - de bijbehorende gas fees zullen aanzienlijk lager zijn. Als je een gepubliceerde subgraph wilt overdragen maar het eigenaarsaccount heeft nog geen enkel curatie-signalen, kun je een klein bedrag signaleren (bv.: 1 GRT) vanaf dat account; zorg ervoor dat je "auto-migrating" signalen kiest. +To take advantage of the Subgraph transfer tool, your Subgraph must be already published to Ethereum mainnet and must have some curation signal owned by the wallet that owns the Subgraph. If your Subgraph is not published, it is recommended you simply publish directly on Arbitrum One - the associated gas fees will be considerably lower. If you want to transfer a published Subgraph but the owner account hasn't curated any signal on it, you can signal a small amount (e.g. 1 GRT) from that account; make sure to choose "auto-migrating" signal. -### Wat gebeurt er met de Ethereum mainnet versie van mijn subgraph nadat ik overdraag naar Arbitrum? +### What happens to the Ethereum mainnet version of my Subgraph after I transfer to Arbitrum? -Nadat je je subgraph naar Arbitrum hebt overgezet, zal de versie op het Ethereum mainnet als verouderd worden beschouwd. We raden aan om je query URL binnen 48 uur bij te werken. Er is echter een overgangsperiode waardoor je mainnet URL nog steeds werkt, zodat ondersteuning voor externe dapps kan worden bijgewerkt. +After transferring your Subgraph to Arbitrum, the Ethereum mainnet version will be deprecated. We recommend you update your query URL within 48 hours. However, there is a grace period in place that keeps your mainnet URL functioning so that any third-party dapp support can be updated. ### Nadat ik overgedragen heb, moet ik opnieuw publiceren op Arbitrum? @@ -80,21 +80,21 @@ Na de overdracht periode van 20 minuten, zul je de overdracht moeten bevestigen ### Zal mijn eindpunt downtime ervaren tijdens het opnieuw publiceren? -Het is onwaarschijnlijk, maar mogelijk om een korte downtime te ervaren afhankelijk van welke Indexers de subgraph op L1 ondersteunen en of zij blijven indexen totdat de subgraph volledig ondersteund wordt op L2. +It is unlikely, but possible to experience a brief downtime depending on which Indexers are supporting the Subgraph on L1 and whether they keep indexing it until the Subgraph is fully supported on L2. ### Is het publiceren en versiebeheer hetzelfde op L2 als Ethereum mainnet? -Ja. Selecteer Arbiturm One als jou gepubliceerde netwerk tijdens het publiceren in Subrgaph Studio. In de studio, de laatste endpoint die beschikbaar is zal wijzen naar de meest recentelijk bijgewerkte versie van de subgraph. +Yes. Select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the Subgraph. -### Zal mijn subgraphs curatie mee verplaatsen met mijn subgraph? +### Will my Subgraph's curation move with my Subgraph? -Als je gekozen hebt voor auto-migrating signal, dan zal 100% van je eigen curatie mee verplaatsen met jouw subgraph naar Arbitrum One. Alle curatie signalen van de subgraph zullen worden omgezet naar GRT tijdens de overdracht en alle GRT die corresponderen met jouw curatie signaal zullen worden gebruikt om signalen te minten op de L2 subgraph. +If you've chosen auto-migrating signal, 100% of your own curation will move with your Subgraph to Arbitrum One. All of the Subgraph's curation signal will be converted to GRT at the time of the transfer, and the GRT corresponding to your curation signal will be used to mint signal on the L2 Subgraph. -Andere curators kunnen kiezen of ze hun deel van GRT kunnen opnemen, of overdragen naar L2 om signalen te minten op dezelfde subgraph. +Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same Subgraph. -### Kan ik nadat ik mijn subgraph overgedragen heb deze weer terug overdragen naar Ethereum mainnet? +### Can I move my Subgraph back to Ethereum mainnet after I transfer? -Wanneer overgedragen, zal jouw Ethereum mainnet versie van deze subgraph als verouderd worden beschouwd. Als je terug wilt gaan naar het mainnet, zul je deze opnieuw moeten implementeren en publiceren op het mainnet. Echter, het wordt sterk afgeraden om terug naar het Ethereum mainnet over te dragen gezien index beloningen uiteindelijk op Arbitrum One zullen worden verdeeld. +Once transferred, your Ethereum mainnet version of this Subgraph will be deprecated. If you would like to move back to mainnet, you will need to redeploy and publish back to mainnet. However, transferring back to Ethereum mainnet is strongly discouraged as indexing rewards will eventually be distributed entirely on Arbitrum One. ### Waarom heb ik gebrugd ETH nodig om mijn transactie te voltooien? @@ -206,19 +206,19 @@ Om je curatie over te dragen, moet je de volgende stappen volgen: \*indien nodig - bv. als je een contract adres gebruikt hebt. -### Hoe weet ik of de subgraph die ik cureer verplaatst is naar L2? +### How will I know if the Subgraph I curated has moved to L2? -Bij het bekijken van de details pagina van de subgraph zal er een banner verschijnen om je te laten weten dat deze subgraph is overgedragen. Je kunt de instructies volgen om je curatie over te zetten. Deze informatie is ook te vinden op de detailspagina van elke subgraph die is overgezet. +When viewing the Subgraph details page, a banner will notify you that this Subgraph has been transferred. You can follow the prompt to transfer your curation. You can also find this information on the Subgraph details page of any Subgraph that has moved. ### Wat als ik niet mijn curatie wil overdragen naar L2? -Wanneer een subgraph is verouderd, heb je de optie om je signaal terug te trekken. Op dezelfde manier, als een subgraph naar L2 is verhuisd, kun je ervoor kiezen om je signaal op het Ethereum-mainnet terug te trekken of het signaal naar L2 te sturen. +When a Subgraph is deprecated you have the option to withdraw your signal. Similarly, if a Subgraph has moved to L2, you can choose to withdraw your signal in Ethereum mainnet or send the signal to L2. ### Hoe weet ik of mijn curatie succesvol is overgedragen? Signaal details zullen toegankelijk zijn via Explorer ongeveer 20 minuten nadat de L2 transfer tool is gestart. -### Kan ik mijn curatie overdragen op meer dan een subgraph per keer? +### Can I transfer my curation on more than one Subgraph at a time? Op dit moment is er geen bulk overdracht optie. @@ -266,7 +266,7 @@ Het duurt ongeveer 20 minuten voordat de L2-overdrachtstool je inzet heeft overg ### Moet ik indexeren op Arbitrum voordat ik mijn inzet overdraag? -Je kunt je inzet effectief overdragen voordat je indexing opzet, maar je zult geen beloningen kunnen claimen op L2 totdat je toewijst aan subgraphs op L2, ze indexeert en POI's presenteert. +You can effectively transfer your stake first before setting up indexing, but you will not be able to claim any rewards on L2 until you allocate to Subgraphs on L2, index them, and present POIs. ### Kunnen Delegators hun delegatie overdragen voordat ik mijn index inzet overdraag? From 0a09908477f267f35ab600d03abd5282bc61569a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:13:28 -0500 Subject: [PATCH 0304/1789] New translations l2-transfer-tools-faq.mdx (Polish) --- .../arbitrum/l2-transfer-tools-faq.mdx | 54 +++++++++---------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/website/src/pages/pl/archived/arbitrum/l2-transfer-tools-faq.mdx b/website/src/pages/pl/archived/arbitrum/l2-transfer-tools-faq.mdx index c7f851bd8d87..50b904d5ef38 100644 --- a/website/src/pages/pl/archived/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/src/pages/pl/archived/arbitrum/l2-transfer-tools-faq.mdx @@ -24,9 +24,9 @@ The exception is with smart contract wallets like multisigs: these are smart con Narzędzia przesyłania L2 używają natywnego mechanizmu Arbitrum do wysyłania wiadomości z L1 do L2. Mechanizm ten nazywany jest "ponowny bilet" i jest używany przez wszystkie natywne mosty tokenowe, w tym most Arbitrum GRT. Więcej informacji na temat "ponownych biletów" można znaleźć w [dokumentacji Arbitrum](https://docs.arbitrum.io/arbos/l1-to-l2-messaging). -Kiedy przenosisz swoje aktywa (subgraph, stake, delegowanie lub kuratorstwo) do L2, wiadomość jest wysyłana przez most Arbitrum GRT, który tworzy bilet z możliwością ponownej próby w L2. Narzędzie transferu zawiera pewną wartość ETH w transakcji, która jest wykorzystywana do 1) zapłaty za utworzenie biletu i 2) zapłaty za gaz do wykonania biletu w L2. Ponieważ jednak ceny gazu mogą się różnić w czasie do momentu, gdy bilet będzie gotowy do zrealizowania w L2, możliwe jest, że ta próba automatycznego wykonania zakończy się niepowodzeniem. Gdy tak się stanie, most Arbitrum utrzyma ten bilet aktywnym przez maksymalnie 7 dni, i każdy może ponowić próbę "zrealizowania" biletu (co wymaga portfela z pewną ilością ETH pzesłanego do Arbitrum). +When you transfer your assets (Subgraph, stake, delegation or curation) to L2, a message is sent through the Arbitrum GRT bridge which creates a retryable ticket in L2. The transfer tool includes some ETH value in the transaction, that is used to 1) pay to create the ticket and 2) pay for the gas to execute the ticket in L2. However, because gas prices might vary in the time until the ticket is ready to execute in L2, it is possible that this auto-execution attempt fails. When that happens, the Arbitrum bridge will keep the retryable ticket alive for up to 7 days, and anyone can retry “redeeming” the ticket (which requires a wallet with some ETH bridged to Arbitrum). -Nazywamy to etapem "Potwierdzenia" we wszystkich narzędziach do przesyłania - w większości przypadków będzie on wykonywany automatycznie, ponieważ najczęściej kończy się sukcesem, ale ważne jest, aby sprawdzić i upewnić się, że się powiódł. Jeśli się nie powiedzie i w ciągu 7 dni nie będzie skutecznych ponownych prób, most Arbitrum odrzuci bilet, a twoje zasoby ( subgraf, stake, delegowanie lub kuratorstwo) zostaną utracone i nie będzie można ich odzyskać. Główni programiści Graph mają system monitorowania, który wykrywa takie sytuacje i próbuje zrealizować bilety, zanim będzie za późno, ale ostatecznie to ty jesteś odpowiedzialny za zapewnienie, że przesyłanie zostanie zakończone na czas. Jeśli masz problemy z potwierdzeniem transakcji, skontaktuj się z nami za pomocą [tego formularza] (https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms), a nasi deweloperzy udzielą Ci pomocy. +This is what we call the “Confirm” step in all the transfer tools - it will run automatically in most cases, as the auto-execution is most often successful, but it is important that you check back to make sure it went through. If it doesn’t succeed and there are no successful retries in 7 days, the Arbitrum bridge will discard the ticket, and your assets (Subgraph, stake, delegation or curation) will be lost and can’t be recovered. The Graph core devs have a monitoring system in place to detect these situations and try to redeem the tickets before it’s too late, but it is ultimately your responsibility to ensure your transfer is completed in time. If you’re having trouble confirming your transaction, please reach out using [this form](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) and core devs will be there help you. ### I started my delegation/stake/curation transfer and I'm not sure if it made it through to L2, how can I confirm that it was transferred correctly? @@ -36,43 +36,43 @@ If you have the L1 transaction hash (which you can find by looking at the recent ## Subgraph Transfer -### Jak mogę przenieść swój subgraph? +### How do I transfer my Subgraph? -Aby przesłać swój subgraf, należy wykonać następujące kroki: +To transfer your Subgraph, you will need to complete the following steps: 1. Zainicjuj przesyłanie w sieci głównej Ethereum 2. Poczekaj 20 minut na potwierdzenie -3. Potwierdź przesyłanie subgrafu na Arbitrum\* +3. Confirm Subgraph transfer on Arbitrum\* -4. Zakończ publikowanie subgrafu na Arbitrum +4. Finish publishing Subgraph on Arbitrum 5. Zaktualizuj adres URL zapytania (zalecane) -\*Note that you must confirm the transfer within 7 days otherwise your subgraph may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). +\*Note that you must confirm the transfer within 7 days otherwise your Subgraph may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). ### Skąd powinienem zainicjować przesyłanie? -Przesyłanie można zainicjować ze strony [Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) lub dowolnej strony zawierającej szczegóły subgrafu. Kliknij przycisk "Prześlij subgraf " na tej stronie, aby zainicjować proces przesyłania. +You can initiate your transfer from the [Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) or any Subgraph details page. Click the "Transfer Subgraph" button in the Subgraph details page to start the transfer. -### Jak długo muszę czekać, aż mój subgraf zostanie przesłany +### How long do I need to wait until my Subgraph is transferred Przesyłanie trwa około 20 minut. Most Arbitrum działa w tle, automatycznie kończąc przesyłanie danych. W niektórych przypadkach koszty gazu mogą wzrosnąć i konieczne będzie ponowne potwierdzenie transakcji. -### Czy mój subgraf będzie nadal wykrywalny po przesłaniu go do L2? +### Will my Subgraph still be discoverable after I transfer it to L2? -Twój subgraf będzie można znaleźć tylko w sieci, w której został opublikowany. Na przykład, jeśli subgraf znajduje się w Arbitrum One, można go znaleźć tylko w Eksploratorze w Arbitrum One i nie będzie można go znaleźć w Ethereum. Upewnij się, że wybrałeś Arbitrum One w przełączniku sieci u góry strony i że jesteś we właściwej sieci. Po przesłaniu subgraf L1 będzie oznaczony jako nieaktualny. +Your Subgraph will only be discoverable on the network it is published to. For example, if your Subgraph is on Arbitrum One, then you can only find it in Explorer on Arbitrum One and will not be able to find it on Ethereum. Please ensure that you have Arbitrum One selected in the network switcher at the top of the page to ensure you are on the correct network.  After the transfer, the L1 Subgraph will appear as deprecated. -### Czy mój subgraf musi zostać opublikowany, aby móc go przesłać? +### Does my Subgraph need to be published to transfer it? -Aby skorzystać z narzędzia do przesyłania subgrafów, musi on być już opublikowany w sieci głównej Ethereum i musi mieć jakiś sygnał kuratorski należący do portfela, który jest właścicielem subgrafu. Jeśli subgraf nie został opublikowany, zaleca się po prostu opublikowanie go bezpośrednio na Arbitrum One - związane z tym opłaty za gaz będą znacznie niższe. Jeśli chcesz przesłać opublikowany subgraf, ale konto właściciela nie ma na nim żadnego sygnału, możesz zasygnalizować niewielką kwotę (np. 1 GRT) z tego konta; upewnij się, że wybrałeś sygnał "automatycznej migracji". +To take advantage of the Subgraph transfer tool, your Subgraph must be already published to Ethereum mainnet and must have some curation signal owned by the wallet that owns the Subgraph. If your Subgraph is not published, it is recommended you simply publish directly on Arbitrum One - the associated gas fees will be considerably lower. If you want to transfer a published Subgraph but the owner account hasn't curated any signal on it, you can signal a small amount (e.g. 1 GRT) from that account; make sure to choose "auto-migrating" signal. -### Co stanie się z wersją mojego subgrafu w sieci głównej Ethereum po przesłaniu go do Arbitrum? +### What happens to the Ethereum mainnet version of my Subgraph after I transfer to Arbitrum? -Po przesłaniu subgrafu do Arbitrum, wersja głównej sieci Ethereum zostanie wycofana. Zalecamy zaktualizowanie adresu URL zapytania w ciągu 48 godzin. Istnieje jednak okres prolongaty, dzięki któremu adres URL sieci głównej będzie dalej funkcjonował, tak aby można było zaktualizować obsługę innych aplikacji. +After transferring your Subgraph to Arbitrum, the Ethereum mainnet version will be deprecated. We recommend you update your query URL within 48 hours. However, there is a grace period in place that keeps your mainnet URL functioning so that any third-party dapp support can be updated. ### Czy po przesłaniu muszę również ponownie opublikować na Arbitrum? @@ -80,21 +80,21 @@ Po upływie 20-minutowego okna przesyłania konieczne będzie jego potwierdzenie ### Will my endpoint experience downtime while re-publishing? -It is unlikely, but possible to experience a brief downtime depending on which Indexers are supporting the subgraph on L1 and whether they keep indexing it until the subgraph is fully supported on L2. +It is unlikely, but possible to experience a brief downtime depending on which Indexers are supporting the Subgraph on L1 and whether they keep indexing it until the Subgraph is fully supported on L2. ### Czy publikowanie i wersjonowanie jest takie samo w L2 jak w sieci głównej Ethereum? -Yes. Select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the subgraph. +Yes. Select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the Subgraph. -### Czy kurator mojego subgrafu będzie się przemieszczał wraz z moim subgrafem? +### Will my Subgraph's curation move with my Subgraph? -Jeśli wybrałeś automatyczną migrację sygnału, 100% twojego własnego kuratorstwa zostanie przeniesione wraz z subgrafem do Arbitrum One. Cały sygnał kuratorski subgrafu zostanie przekonwertowany na GRT w momencie transferu, a GRT odpowiadający sygnałowi kuratorskiemu zostanie użyty do zmintowania sygnału na subgrafie L2. +If you've chosen auto-migrating signal, 100% of your own curation will move with your Subgraph to Arbitrum One. All of the Subgraph's curation signal will be converted to GRT at the time of the transfer, and the GRT corresponding to your curation signal will be used to mint signal on the L2 Subgraph. -Inni kuratorzy mogą zdecydować, czy wycofać swoją część GRT, czy też przesłać ją do L2 w celu zmintowania sygnału na tym samym subgrafie. +Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same Subgraph. -### Czy mogę przenieść swój subgraf z powrotem do głównej sieci Ethereum po jego przesłaniu? +### Can I move my Subgraph back to Ethereum mainnet after I transfer? -Po przesłaniu, wersja tego subgrafu w sieci głównej Ethereum zostanie wycofana. Jeśli chcesz ją przywrócić do sieci głównej, musisz ją ponownie wdrożyć i opublikować. Jednak przeniesienie z powrotem do sieci głównej Ethereum nie jest zalecane, ponieważ nagrody za indeksowanie zostaną całkowicie rozdzielone na Arbitrum One. +Once transferred, your Ethereum mainnet version of this Subgraph will be deprecated. If you would like to move back to mainnet, you will need to redeploy and publish back to mainnet. However, transferring back to Ethereum mainnet is strongly discouraged as indexing rewards will eventually be distributed entirely on Arbitrum One. ### Dlaczego potrzebuję bridgowanego ETH do przesłania? @@ -206,19 +206,19 @@ Aby przesłać swoje kuratorstwo, należy wykonać następujące kroki: \*Jeżeli będzie wymagane - np. w przypadku korzystania z adresu kontraktu. -### Skąd będę wiedzieć, czy subgraf, którego jestem kuratorem, został przeniesiony do L2? +### How will I know if the Subgraph I curated has moved to L2? -Podczas przeglądania strony ze szczegółami subgrafu pojawi się baner informujący, że subgraf został przeniesiony. Możesz postępować zgodnie z wyświetlanymi instrukcjami, aby przesłać swoje kuratorstwo. Informacje te można również znaleźć na stronie ze szczegółami subgrafu każdego z tych, które zostały przeniesione. +When viewing the Subgraph details page, a banner will notify you that this Subgraph has been transferred. You can follow the prompt to transfer your curation. You can also find this information on the Subgraph details page of any Subgraph that has moved. ### Co jeśli nie chcę przenosić swojego kuratorstwa do L2? -Gdy subgraf jest nieaktualny, masz możliwość wycofania swojego sygnału. Podobnie, jeśli subgraf został przeniesiony do L2, możesz wycofać swój sygnał w sieci głównej Ethereum lub wysłać sygnał do L2. +When a Subgraph is deprecated you have the option to withdraw your signal. Similarly, if a Subgraph has moved to L2, you can choose to withdraw your signal in Ethereum mainnet or send the signal to L2. ### Skąd mam wiedzieć, że moje kuratorstwo zostało pomyślnie przesłane? Szczegóły sygnału będą dostępne za pośrednictwem Eksploratora po upływie ok. 20 minut od uruchomienia narzędzia do przesyłania L2. -### Czy mogę przesłać swoje kuratorstwo do więcej niż jednego subgrafu na raz? +### Can I transfer my curation on more than one Subgraph at a time? Obecnie nie ma opcji zbiorczego przesyłania. @@ -266,7 +266,7 @@ Przesyłanie stake'a przez narzędzie do przesyłania L2 zajmie około 20 minut. ### Czy muszę indeksować na Arbitrum, zanim przekażę swój stake? -Możesz skutecznie przesłać swój stake przed skonfigurowaniem indeksowania, lecz nie będziesz w stanie odebrać żadnych nagród na L2, dopóki nie alokujesz do subgrafów na L2, nie zindeksujesz ich i nie podasz POI. +You can effectively transfer your stake first before setting up indexing, but you will not be able to claim any rewards on L2 until you allocate to Subgraphs on L2, index them, and present POIs. ### Czy delegaci mogą przenieść swoje delegacje, zanim ja przeniosę swój indeksujący stake? From 7aa3d0a62306623be5cef11db26da7b21cf8e7b7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:13:29 -0500 Subject: [PATCH 0305/1789] New translations l2-transfer-tools-faq.mdx (Portuguese) --- .../arbitrum/l2-transfer-tools-faq.mdx | 54 +++++++++---------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/website/src/pages/pt/archived/arbitrum/l2-transfer-tools-faq.mdx b/website/src/pages/pt/archived/arbitrum/l2-transfer-tools-faq.mdx index d542d643adc4..a821b0e0b588 100644 --- a/website/src/pages/pt/archived/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/src/pages/pt/archived/arbitrum/l2-transfer-tools-faq.mdx @@ -24,9 +24,9 @@ A exceção é com carteiras de contrato inteligente como multisigs: estas são As Ferramentas de Transferência para L2 usam o mecanismo nativo do Arbitrum para enviar mensagens da L1 à L2. Este mecanismo é chamado de "retryable ticket" (bilhete retentável) e é usado por todos os bridges de tokens nativos, incluindo o bridge de GRT do Arbitrum. Leia mais na [documentação do Arbitrum](https://docs.arbitrum.io/arbos/l1-to-l2-messaging). -Ao transferir os seus ativos (subgraph, stake, delegação ou curadoria) à L2, é enviada uma mensagem através do bridge de GRT do Arbitrum, que cria um retryable ticket na L2. A ferramenta de transferência inclui um valor de ETH na transação, que é usado para pagar 1) pela criação do ticket e 2) pelo gas da execução do ticket na L2. Porém, devido à possível variação dos preços de gas no tempo até a execução do ticket na L2, esta tentativa de execução automática pode falhar. Se isto acontecer, o bridge do Arbitrum tentará manter o retryable ticket ativo por até 7 dias; assim, qualquer pessoa pode tentar novamente o "resgate" do ticket (que requer uma carteira com algum ETH em bridge ao Arbitrum). +When you transfer your assets (Subgraph, stake, delegation or curation) to L2, a message is sent through the Arbitrum GRT bridge which creates a retryable ticket in L2. The transfer tool includes some ETH value in the transaction, that is used to 1) pay to create the ticket and 2) pay for the gas to execute the ticket in L2. However, because gas prices might vary in the time until the ticket is ready to execute in L2, it is possible that this auto-execution attempt fails. When that happens, the Arbitrum bridge will keep the retryable ticket alive for up to 7 days, and anyone can retry “redeeming” the ticket (which requires a wallet with some ETH bridged to Arbitrum). -Este é o passo de "Confirmação" em todas as ferramentas de transferência. Ele será executado automaticamente e com êxito na maioria dos casos, mas é importante verificar que ele foi executado. Se não tiver êxito na primeira execução e nem em quaisquer das novas tentativas dentro de 7 dias, o bridge do Arbitrum descartará o ticket, e os seus ativos (subgraph, stake, delegação ou curadoria) serão perdidos sem volta. Os programadores-núcleo do The Graph têm um sistema de monitoria para detectar estas situações e tentar resgatar os tickets antes que seja tarde, mas no final, a responsabilidade é sua de que a sua transferência complete a tempo. Caso haja problemas ao confirmar a sua transação, contacte-nos com [este formulário](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) e o núcleo tentará lhe ajudar. +This is what we call the “Confirm” step in all the transfer tools - it will run automatically in most cases, as the auto-execution is most often successful, but it is important that you check back to make sure it went through. If it doesn’t succeed and there are no successful retries in 7 days, the Arbitrum bridge will discard the ticket, and your assets (Subgraph, stake, delegation or curation) will be lost and can’t be recovered. The Graph core devs have a monitoring system in place to detect these situations and try to redeem the tickets before it’s too late, but it is ultimately your responsibility to ensure your transfer is completed in time. If you’re having trouble confirming your transaction, please reach out using [this form](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) and core devs will be there help you. ### Eu comecei a transferir a minha delegação/meu stake/minha curadoria e não tenho certeza se ela chegou à L2, como posso ter certeza de que a mesma foi transferida corretamente? @@ -36,43 +36,43 @@ Se tiver o hash de transação da L1 (confira as transações recentes na sua ca ## Transferência de Subgraph -### Como transfiro o meu subgraph? +### How do I transfer my Subgraph? -Para transferir o seu subgraph, complete os seguintes passos: +To transfer your Subgraph, you will need to complete the following steps: 1. Inicie a transferência na mainnet Ethereum 2. Espere 20 minutos pela confirmação -3. Confirme a transferência do subgraph no Arbitrum\* +3. Confirm Subgraph transfer on Arbitrum\* -4. Termine de editar o subgraph no Arbitrum +4. Finish publishing Subgraph on Arbitrum 5. Atualize o URL de Query (recomendado) -\*Você deve confirmar a transferência dentro de 7 dias, ou o seu subgraph poderá ser perdido. Na maioria dos casos, este passo será executado automaticamente, mas pode ser necessário confirmar manualmente caso haja um surto no preço de gas no Arbitrum. Caso haja quaisquer dificuldades neste processo, contacte o suporte em support@thegraph.com ou no [Discord](https://discord.gg/graphprotocol). +\*Note that you must confirm the transfer within 7 days otherwise your Subgraph may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). ### De onde devo iniciar a minha transferência? -Do [Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) ou de qualquer página de detalhes de subgraph. Clique no botão "Transfer Subgraph" (Transferir Subgraph) na página de detalhes de subgraph para começar a transferência. +You can initiate your transfer from the [Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) or any Subgraph details page. Click the "Transfer Subgraph" button in the Subgraph details page to start the transfer. -### Quanto tempo devo esperar até que o meu subgraph seja transferido +### How long do I need to wait until my Subgraph is transferred A transferência leva cerca de 20 minutos. O bridge do Arbitrum trabalha em segundo plano para completar a transferência automaticamente. Às vezes, os custos de gas podem subir demais e a transação deverá ser confirmada novamente. -### O meu subgraph ainda poderá ser descoberto após ser transferido para a L2? +### Will my Subgraph still be discoverable after I transfer it to L2? -O seu subgraph só será descobrível na rede em qual foi editado. Por exemplo, se o seu subgraph estiver no Arbitrum One, então só poderá encontrá-lo no Explorer do Arbitrum One e não no Ethereum. Garanta que o Arbitrum One está selecionado no seletor de rede no topo da página para garantir que está na rede correta.  Após a transferência, o subgraph na L1 aparecerá como depreciado. +Your Subgraph will only be discoverable on the network it is published to. For example, if your Subgraph is on Arbitrum One, then you can only find it in Explorer on Arbitrum One and will not be able to find it on Ethereum. Please ensure that you have Arbitrum One selected in the network switcher at the top of the page to ensure you are on the correct network.  After the transfer, the L1 Subgraph will appear as deprecated. -### O meu subgraph precisa ser editado para poder ser transferido? +### Does my Subgraph need to be published to transfer it? -Para aproveitar a ferramenta de transferência de subgraph, o seu subgraph já deve estar editado na mainnet Ethereum e deve ter algum sinal de curadoria em posse da carteira titular do subgraph. Se o seu subgraph não estiver editado, edite-o diretamente no Arbitrum One - as taxas de gas associadas serão bem menores. Se quiser transferir um subgraph editado, mas a conta titular não curou qualquer sinal nele, você pode sinalizar uma quantidade pequena (por ex. 1 GRT) daquela conta; escolha o sinal "migração automática". +To take advantage of the Subgraph transfer tool, your Subgraph must be already published to Ethereum mainnet and must have some curation signal owned by the wallet that owns the Subgraph. If your Subgraph is not published, it is recommended you simply publish directly on Arbitrum One - the associated gas fees will be considerably lower. If you want to transfer a published Subgraph but the owner account hasn't curated any signal on it, you can signal a small amount (e.g. 1 GRT) from that account; make sure to choose "auto-migrating" signal. -### O que acontece com a versão da mainnet Ethereum do meu subgraph após eu transferi-lo ao Arbitrum? +### What happens to the Ethereum mainnet version of my Subgraph after I transfer to Arbitrum? -Após transferir o seu subgraph ao Arbitrum, a versão na mainnet Ethereum será depreciada. Recomendamos que atualize o seu URL de query em dentro de 28 horas. Porém, há um período que mantém o seu URL na mainnet em funcionamento, para que qualquer apoio de dapp de terceiros seja atualizado. +After transferring your Subgraph to Arbitrum, the Ethereum mainnet version will be deprecated. We recommend you update your query URL within 48 hours. However, there is a grace period in place that keeps your mainnet URL functioning so that any third-party dapp support can be updated. ### Após a transferência, preciso reeditar no Arbitrum? @@ -80,21 +80,21 @@ Após a janela de transferência de 20 minutos, confirme a transferência com um ### O meu endpoint estará fora do ar durante a reedição? -É improvável, mas é possível passar por um breve desligamento a depender de quais Indexadores apoiam o subgraph na L1, e de se eles continuarão a indexá-lo até o subgraph ter apoio total na L2. +It is unlikely, but possible to experience a brief downtime depending on which Indexers are supporting the Subgraph on L1 and whether they keep indexing it until the Subgraph is fully supported on L2. ### Editar e versionar na L2 funcionam da mesma forma que na mainnet Ethereum? -Sim. Selcione o Arbitrum One como a sua rede editada ao editar no Subgraph Studio. No Studio, o último endpoint disponível apontará à versão atualizada mais recente do subgraph. +Yes. Select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the Subgraph. -### A curadoria do meu subgraph se mudará com o meu subgraph? +### Will my Subgraph's curation move with my Subgraph? -Caso tenha escolhido o sinal automigratório, 100% da sua própria curadoria se mudará ao Arbitrum One junto com o seu subgraph. Todo o sinal de curadoria do subgraph será convertido em GRT na hora da transferência, e o GRT correspondente ao seu sinal de curadoria será usado para mintar sinais no subgraph na L2. +If you've chosen auto-migrating signal, 100% of your own curation will move with your Subgraph to Arbitrum One. All of the Subgraph's curation signal will be converted to GRT at the time of the transfer, and the GRT corresponding to your curation signal will be used to mint signal on the L2 Subgraph. -Outros Curadores podem escolher se querem sacar a sua fração de GRT, ou também transferi-la à L2 para mintar sinais no mesmo subgraph. +Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same Subgraph. -### Posso devolver o meu subgraph à mainnet Ethereum após a transferência? +### Can I move my Subgraph back to Ethereum mainnet after I transfer? -Após a transferência, a versão da mainnet Ethereum deste subgraph será depreciada. Se quiser devolvê-lo à mainnet, será necessário relançá-lo e editá-lo de volta à mainnet. Porém, transferir de volta à mainnet do Ethereum é muito arriscado, já que as recompensas de indexação logo serão distribuidas apenas no Arbitrum One. +Once transferred, your Ethereum mainnet version of this Subgraph will be deprecated. If you would like to move back to mainnet, you will need to redeploy and publish back to mainnet. However, transferring back to Ethereum mainnet is strongly discouraged as indexing rewards will eventually be distributed entirely on Arbitrum One. ### Por que preciso de ETH em bridge para completar a minha transferência? @@ -206,19 +206,19 @@ Para transferir a sua curadoria, complete os seguintes passos: \*Se necessário - por ex. se você usar um endereço de contrato. -### Como saberei se o subgraph que eu curei foi transferido para a L2? +### How will I know if the Subgraph I curated has moved to L2? -Ao visualizar a página de detalhes do subgraph, um banner notificará-lhe que este subgraph foi transferido. Siga o prompt para transferir a sua curadoria. Esta informação também aparece na página de detalhes de qualquer subgraph transferido. +When viewing the Subgraph details page, a banner will notify you that this Subgraph has been transferred. You can follow the prompt to transfer your curation. You can also find this information on the Subgraph details page of any Subgraph that has moved. ### E se eu não quiser mudar a minha curadoria para a L2? -Quando um subgraph é depreciado, há a opção de retirar o seu sinal. Desta forma, se um subgraph for movido à L2, dá para escolher retirar o seu sinal na mainnet Ethereum ou enviar o sinal à L2. +When a Subgraph is deprecated you have the option to withdraw your signal. Similarly, if a Subgraph has moved to L2, you can choose to withdraw your signal in Ethereum mainnet or send the signal to L2. ### Como sei se a minha curadoria foi transferida com êxito? Os detalhes do sinal serão acessíveis através do Explorer cerca de 20 minutos após a ativação da ferramenta de transferência à L2. -### Posso transferir a minha curadoria em vários subgraphs de uma vez? +### Can I transfer my curation on more than one Subgraph at a time? Não há opção de transferências em conjunto no momento. @@ -266,7 +266,7 @@ A ferramenta de transferência à L2 finalizará a transferência do seu stake e ### Devo indexar no Arbitrum antes de transferir o meu stake? -Você pode transferir o seu stake antes de preparar a indexação, mas não terá como resgatar recompensas na L2 até alocar para subgraphs na L2, indexá-los, e apresentar POIs. +You can effectively transfer your stake first before setting up indexing, but you will not be able to claim any rewards on L2 until you allocate to Subgraphs on L2, index them, and present POIs. ### Os Delegadores podem mudar a sua delegação antes que eu mude o meu stake de indexação? From 4cb81aa71c126632a55fde6c1559887fdb31450c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:13:30 -0500 Subject: [PATCH 0306/1789] New translations l2-transfer-tools-faq.mdx (Russian) --- .../arbitrum/l2-transfer-tools-faq.mdx | 54 +++++++++---------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/website/src/pages/ru/archived/arbitrum/l2-transfer-tools-faq.mdx b/website/src/pages/ru/archived/arbitrum/l2-transfer-tools-faq.mdx index ebb1f3b1b165..4982403c1db2 100644 --- a/website/src/pages/ru/archived/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/src/pages/ru/archived/arbitrum/l2-transfer-tools-faq.mdx @@ -24,9 +24,9 @@ If you are using an [EOA](https://ethereum.org/en/developers/docs/accounts/#type Инструменты переноса L2 используют встроенный механизм Arbitrum для передачи сообщений с L1 на L2. Этот механизм называется "retryable ticket", или "повторный тикет", и используется всеми собственными токен-мостами, включая мост Arbitrum GRT. Подробнее о повторном тикете можно прочитать в [Arbitrum docs](https://docs.arbitrum.io/arbos/l1-to-l2-messaging). -Когда Вы переносите свои активы (субграф, стейк, делегирование или курирование) на L2, через мост Arbitrum GRT отправляется сообщение, которое создает повторный тикет на L2. Инструмент переноса включает в транзакцию некоторую стоимость ETH, которая используется для 1) оплаты создания тикета и 2) оплаты стоимости газа для выполнения тикета на L2. Однако, поскольку стоимость газа может измениться за время, пока тикет будет готов к исполнению на L2, возможна ситуация, когда попытка автоматического исполнения не удастся. В этом случае мост Arbitrum сохранит повторный тикет в течение 7 дней, и любой желающий может повторить попытку "погасить" тикет (для этого необходимо иметь кошелек с некоторым количеством ETH, подключенный к мосту Arbitrum). +When you transfer your assets (Subgraph, stake, delegation or curation) to L2, a message is sent through the Arbitrum GRT bridge which creates a retryable ticket in L2. The transfer tool includes some ETH value in the transaction, that is used to 1) pay to create the ticket and 2) pay for the gas to execute the ticket in L2. However, because gas prices might vary in the time until the ticket is ready to execute in L2, it is possible that this auto-execution attempt fails. When that happens, the Arbitrum bridge will keep the retryable ticket alive for up to 7 days, and anyone can retry “redeeming” the ticket (which requires a wallet with some ETH bridged to Arbitrum). -Это так называемый шаг "Подтверждение" во всех инструментах переноса - в большинстве случаев он выполняется автоматически, поскольку автоисполнение чаще всего бывает успешным, но важно, чтобы Вы проверили, прошел ли он. Если он не исполнился и в течение 7 дней не будет повторных успешных попыток, мост Arbitrum отменит тикет, и Ваши активы (субграф, стейк, делегирование или курирование) будут потеряны и не смогут быть восстановлены. У разработчиков ядра The Graph есть система мониторинга, позволяющая выявлять такие ситуации и пытаться погасить тикеты, пока не стало слишком поздно, но в конечном итоге ответственность за своевременное завершение переноса лежит на Вас. Если у Вас возникли проблемы с подтверждением переноса, пожалуйста, свяжитесь с нами через [эту форму] (https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms), и разработчики ядра помогут Вам. +This is what we call the “Confirm” step in all the transfer tools - it will run automatically in most cases, as the auto-execution is most often successful, but it is important that you check back to make sure it went through. If it doesn’t succeed and there are no successful retries in 7 days, the Arbitrum bridge will discard the ticket, and your assets (Subgraph, stake, delegation or curation) will be lost and can’t be recovered. The Graph core devs have a monitoring system in place to detect these situations and try to redeem the tickets before it’s too late, but it is ultimately your responsibility to ensure your transfer is completed in time. If you’re having trouble confirming your transaction, please reach out using [this form](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) and core devs will be there help you. ### Я начал передачу делегирования/стейка/курирования и не уверен, что она дошла до уровня L2. Как я могу убедиться, что она была передана правильно? @@ -36,43 +36,43 @@ If you are using an [EOA](https://ethereum.org/en/developers/docs/accounts/#type ## Перенос субграфа -### Как мне перенести свой субграф? +### How do I transfer my Subgraph? -Чтобы перенести Ваш субграф, необходимо выполнить следующие действия: +To transfer your Subgraph, you will need to complete the following steps: 1. Инициировать перенос в основной сети Ethereum 2. Подождать 20 минут для получения подтверждения -3. Подтвердить перенос субграфа в Arbitrum\* +3. Confirm Subgraph transfer on Arbitrum\* -4. Завершить публикацию субграфа в Arbitrum +4. Finish publishing Subgraph on Arbitrum 5. Обновить URL-адрес запроса (рекомендуется) -\* Обратите внимание, что Вы должны подтвердить перенос в течение 7 дней, иначе Ваш субграф может быть потерян. В большинстве случаев этот шаг выполнится автоматически, но в случае скачка стоимости комиссии сети в Arbitrum может потребоваться ручное подтверждение. Если в ходе этого процесса возникнут какие-либо проблемы, Вам помогут: обратитесь в службу поддержки по адресу support@thegraph.com или в [Discord](https://discord.gg/graphprotocol). +\*Note that you must confirm the transfer within 7 days otherwise your Subgraph may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). ### С чего необходимо начать перенос? -Вы можете начать перенос со страницы [Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) или любой другой страницы с информацией о субграфе. Для начала переноса нажмите кнопку "Перенести субграф" на странице сведений о субграфе. +You can initiate your transfer from the [Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) or any Subgraph details page. Click the "Transfer Subgraph" button in the Subgraph details page to start the transfer. -### Как долго мне необходимо ждать, пока мой субграф будет перенесен +### How long do I need to wait until my Subgraph is transferred Время переноса занимает около 20 минут. Мост Arbitrum работает в фоновом режиме, чтобы автоматически завершить перенос через мост. В некоторых случаях стоимость комиссии сети может повыситься, и Вам потребуется повторно подтвердить транзакцию. -### Будет ли мой субграф по-прежнему доступен для поиска после того, как я перенесу его на L2? +### Will my Subgraph still be discoverable after I transfer it to L2? -Ваш субграф можно будет найти только в той сети, в которой он опубликован. Например, если Ваш субграф находится в сети Arbitrum One, то Вы сможете найти его в Explorer только в сети Arbitrum One, и не сможете найти в сети Ethereum. Обратите внимание, что в переключателе сетей в верхней части страницы выбран Arbitrum One, чтобы убедиться, что Вы находитесь в правильной сети. После переноса субграф L1 будет отображаться как устаревший. +Your Subgraph will only be discoverable on the network it is published to. For example, if your Subgraph is on Arbitrum One, then you can only find it in Explorer on Arbitrum One and will not be able to find it on Ethereum. Please ensure that you have Arbitrum One selected in the network switcher at the top of the page to ensure you are on the correct network.  After the transfer, the L1 Subgraph will appear as deprecated. -### Должен ли мой субграф быть опубликован, чтобы его можно было перенести? +### Does my Subgraph need to be published to transfer it? -Чтобы воспользоваться инструментом переноса субграфа, Ваш субграф должен быть уже опубликован в основной сети Ethereum и иметь какой-либо сигнал курирования, принадлежащий кошельку, которому принадлежит субграф. Если Ваш субграф не опубликован, рекомендуется просто опубликовать его непосредственно на Arbitrum One - связанная с этим стоимость комиссии сети будет значительно ниже. Если Вы хотите перенести опубликованный субграф, но на счете владельца нет сигнала курирования, Вы можете подать сигнал на небольшую сумму (например, 1 GRT) с этого счета; при этом обязательно выберите сигнал "автомиграция". +To take advantage of the Subgraph transfer tool, your Subgraph must be already published to Ethereum mainnet and must have some curation signal owned by the wallet that owns the Subgraph. If your Subgraph is not published, it is recommended you simply publish directly on Arbitrum One - the associated gas fees will be considerably lower. If you want to transfer a published Subgraph but the owner account hasn't curated any signal on it, you can signal a small amount (e.g. 1 GRT) from that account; make sure to choose "auto-migrating" signal. -### Что произойдет с версией моего субграфа в основной сети Ethereum после его переноса на Arbitrum? +### What happens to the Ethereum mainnet version of my Subgraph after I transfer to Arbitrum? -После переноса Вашего субграфа на Arbitrum версия, находящаяся на основной сети Ethereum станет устаревшей. Мы рекомендуем Вам обновить URL-адрес запроса в течение 48 часов. Однако существует отсрочка, в течение которой Ваш URL-адрес на основной сети будет функционировать, чтобы можно было обновить стороннюю поддержку децентрализованных приложений. +After transferring your Subgraph to Arbitrum, the Ethereum mainnet version will be deprecated. We recommend you update your query URL within 48 hours. However, there is a grace period in place that keeps your mainnet URL functioning so that any third-party dapp support can be updated. ### Нужно ли мне после переноса повторно опубликовываться на Arbitrum? @@ -80,21 +80,21 @@ If you are using an [EOA](https://ethereum.org/en/developers/docs/accounts/#type ### Будет ли моя конечная точка простаивать при повторной публикации? -Это маловероятно, но возможно возникновение кратковременного простоя в зависимости от того, какие индексаторы поддерживают субграф на уровне L1 и продолжают ли они индексировать его до тех пор, пока субграф не будет полностью поддерживаться на уровне L2. +It is unlikely, but possible to experience a brief downtime depending on which Indexers are supporting the Subgraph on L1 and whether they keep indexing it until the Subgraph is fully supported on L2. ### Публикация и версионность на L2 такие же, как и в основной сети Ethereum? -Да. При публикации в Subgraph Studio выберите Arbitrum One в качестве публикуемой сети. В Studio будет доступна последняя конечная точка, которая указывает на последнюю обновленную версию субграфа. +Yes. Select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the Subgraph. -### Будет ли курирование моего субграфа перемещено вместе с моим субграфом? +### Will my Subgraph's curation move with my Subgraph? -Если Вы выбрали автомиграцию сигнала, то 100% Вашего собственного кураторства переместится вместе с Вашим субграфом на Arbitrum One. Весь сигнал курирования субграфа будет преобразован в GRT в момент переноса, а GRT, соответствующий Вашему сигналу курирования, будет использован для обработки сигнала на субграфе L2. +If you've chosen auto-migrating signal, 100% of your own curation will move with your Subgraph to Arbitrum One. All of the Subgraph's curation signal will be converted to GRT at the time of the transfer, and the GRT corresponding to your curation signal will be used to mint signal on the L2 Subgraph. -Другие кураторы могут выбрать, снять ли им свою долю GRT, или также перевести ее в L2 для обработки сигнала на том же субграфе. +Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same Subgraph. -### Могу ли я переместить свой субграф обратно в основную сеть Ethereum после переноса? +### Can I move my Subgraph back to Ethereum mainnet after I transfer? -После переноса Ваша версия данного субграфа в основной сети Ethereum станет устаревшей. Если Вы захотите вернуться в основную сеть, Вам нужно будет переразвернуть и снова опубликовать субграф в основной сети. Однако перенос обратно в основную сеть Ethereum настоятельно не рекомендуется, так как вознаграждения за индексирование в конечном итоге будут полностью распределяться на Arbitrum One. +Once transferred, your Ethereum mainnet version of this Subgraph will be deprecated. If you would like to move back to mainnet, you will need to redeploy and publish back to mainnet. However, transferring back to Ethereum mainnet is strongly discouraged as indexing rewards will eventually be distributed entirely on Arbitrum One. ### Зачем мне необходимо использовать мост ETH для завершения переноса? @@ -206,19 +206,19 @@ If you are using an [EOA](https://ethereum.org/en/developers/docs/accounts/#type \*При необходимости - т.е. если Вы используете контрактный адрес. -### Как я узнаю, что курируемый мною субграф перешел в L2? +### How will I know if the Subgraph I curated has moved to L2? -При просмотре страницы сведений о субграфе появится баннер, уведомляющий о том, что данный субграф был перенесен. Вы можете следовать подсказке, чтобы перенести свое курирование. Эту информацию можно также найти на странице сведений о субграфе любого перемещенного субграфа. +When viewing the Subgraph details page, a banner will notify you that this Subgraph has been transferred. You can follow the prompt to transfer your curation. You can also find this information on the Subgraph details page of any Subgraph that has moved. ### Что делать, если я не хочу переносить свое курирование в L2? -Когда субграф устаревает, у Вас есть возможность отозвать свой сигнал. Аналогично, если субграф переместился в L2, Вы можете выбрать, отозвать свой сигнал из основной сети Ethereum или отправить его в L2. +When a Subgraph is deprecated you have the option to withdraw your signal. Similarly, if a Subgraph has moved to L2, you can choose to withdraw your signal in Ethereum mainnet or send the signal to L2. ### Как я узнаю, что мое курирование успешно перенесено? Информация о сигнале будет доступна через Explorer примерно через 20 минут после запуска инструмента переноса L2. -### Можно ли перенести курирование на несколько субграфов одновременно? +### Can I transfer my curation on more than one Subgraph at a time? В настоящее время опция массового переноса отсутствует. @@ -266,7 +266,7 @@ If you are using an [EOA](https://ethereum.org/en/developers/docs/accounts/#type ### Должен ли я индексироваться на Arbitrum перед тем, как перенести стейк? -Вы можете эффективно перенести свой стейк до начала настройки индексации, но Вы не сможете претендовать на вознаграждение на L2 до тех пор, пока не распределите субграфы на L2, не проиндексируете их, а также пока не представите POI. +You can effectively transfer your stake first before setting up indexing, but you will not be able to claim any rewards on L2 until you allocate to Subgraphs on L2, index them, and present POIs. ### Могут ли делегаторы перемещать свои делегации до того, как я перемещу свой индексируемый стейк? From c4fd8ed126949312c6fa46a22cdf4e22fb0e4b89 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:13:32 -0500 Subject: [PATCH 0307/1789] New translations l2-transfer-tools-faq.mdx (Swedish) --- .../arbitrum/l2-transfer-tools-faq.mdx | 54 +++++++++---------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/website/src/pages/sv/archived/arbitrum/l2-transfer-tools-faq.mdx b/website/src/pages/sv/archived/arbitrum/l2-transfer-tools-faq.mdx index b158efaed6ff..272fa705dfe5 100644 --- a/website/src/pages/sv/archived/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/src/pages/sv/archived/arbitrum/l2-transfer-tools-faq.mdx @@ -24,9 +24,9 @@ The exception is with smart contract wallets like multisigs: these are smart con L2 Överföringsverktygen använder Arbitrums nativa mekanism för att skicka meddelanden från L1 till L2. Denna mekanism kallas en "retryable ticket" och används av alla nativa token-broar, inklusive Arbitrum GRT-broen. Du kan läsa mer om retryable tickets i [Arbitrums dokumentation](https://docs.arbitrum.io/arbos/l1-to-l2-messaging). -När du överför dina tillgångar (subgraf, insats, delegation eller kurering) till L2 skickas ett meddelande genom Arbitrum GRT-broen, vilket skapar en retryable ticket i L2. Överföringsverktyget inkluderar ett visst ETH-värde i transaktionen, som används för att 1) betala för att skapa biljetten och 2) betala för gasen för att utföra biljetten i L2. Men eftersom gaspriserna kan variera fram till att biljetten är redo att utföras i L2 kan det hända att detta automatiska utförsel försöket misslyckas. När det händer kommer Arbitrum-broen att behålla retryable ticket i livet i upp till 7 dagar, och vem som helst kan försöka "inlösa" biljetten (vilket kräver en plånbok med en viss mängd ETH broad till Arbitrum). +When you transfer your assets (Subgraph, stake, delegation or curation) to L2, a message is sent through the Arbitrum GRT bridge which creates a retryable ticket in L2. The transfer tool includes some ETH value in the transaction, that is used to 1) pay to create the ticket and 2) pay for the gas to execute the ticket in L2. However, because gas prices might vary in the time until the ticket is ready to execute in L2, it is possible that this auto-execution attempt fails. When that happens, the Arbitrum bridge will keep the retryable ticket alive for up to 7 days, and anyone can retry “redeeming” the ticket (which requires a wallet with some ETH bridged to Arbitrum). -Detta är vad vi kallar "Bekräfta"-steget i alla överföringsverktygen - det kommer att köras automatiskt i de flesta fall, eftersom den automatiska utförandet oftast är framgångsrikt, men det är viktigt att du kontrollerar att det gick igenom. Om det inte lyckas och det inte finns några framgångsrika försök på 7 dagar kommer Arbitrum-broen att kasta biljetten, och dina tillgångar (subgraf, insats, delegation eller kurering) kommer att gå förlorade och kan inte återvinnas. The Graphs kärnutvecklare har ett övervakningssystem på plats för att upptäcka dessa situationer och försöka lösa biljetterna innan det är för sent, men det är i slutändan ditt ansvar att se till att din överföring är klar i tid. Om du har svårt att bekräfta din transaktion, kontakta oss via [detta formulär](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms), och kärnutvecklarna kommer att vara där för att hjälpa dig. +This is what we call the “Confirm” step in all the transfer tools - it will run automatically in most cases, as the auto-execution is most often successful, but it is important that you check back to make sure it went through. If it doesn’t succeed and there are no successful retries in 7 days, the Arbitrum bridge will discard the ticket, and your assets (Subgraph, stake, delegation or curation) will be lost and can’t be recovered. The Graph core devs have a monitoring system in place to detect these situations and try to redeem the tickets before it’s too late, but it is ultimately your responsibility to ensure your transfer is completed in time. If you’re having trouble confirming your transaction, please reach out using [this form](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) and core devs will be there help you. ### Jag startade min överföring av delegation/insats/kurering, och jag är osäker på om den lyckades komma till L2, hur kan jag bekräfta att den överfördes korrekt? @@ -36,43 +36,43 @@ Om du har L1-transaktionshashen (som du kan hitta genom att titta på de senaste ## Subgraf Överföring -### Hur överför jag min subgraf? +### How do I transfer my Subgraph? -För att överföra din subgraf måste du slutföra följande steg: +To transfer your Subgraph, you will need to complete the following steps: 1. Initiera överföringen på Ethereum huvudnätet 2. Vänta 20 minuter på bekräftelse -3. Bekräfta subgraföverföringen på Arbitrum\* +3. Confirm Subgraph transfer on Arbitrum\* -4. Slutför publiceringen av subgraf på Arbitrum +4. Finish publishing Subgraph on Arbitrum 5. Uppdatera fråge-URL (rekommenderas) -\*Observera att du måste bekräfta överföringen inom 7 dagar, annars kan din subgraf gå förlorad. I de flesta fall kommer detta steg att köras automatiskt, men en manuell bekräftelse kan behövas om det finns en gasprisspike på Arbitrum. Om det uppstår några problem under denna process finns det resurser för att hjälpa: kontakta support på support@thegraph.com eller på [Discord](https://discord.gg/graphprotocol). +\*Note that you must confirm the transfer within 7 days otherwise your Subgraph may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). ### Var ska jag initiera min överföring från? -Du kan initiera din överföring från [Subgraph Studio](https://thegraph.com/studio/), [Utforskaren,](https://thegraph.com/explorer) eller från vilken som helst subgrafsdetaljsida. Klicka på knappen "Överför subgraf" på subgrafsdetaljsidan för att starta överföringen. +You can initiate your transfer from the [Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) or any Subgraph details page. Click the "Transfer Subgraph" button in the Subgraph details page to start the transfer. -### Hur länge måste jag vänta tills min subgraf överförs? +### How long do I need to wait until my Subgraph is transferred Överföringstiden tar ungefär 20 minuter. Arbitrum-broen arbetar i bakgrunden för att slutföra broöverföringen automatiskt. I vissa fall kan gasavgifterna öka, och du måste bekräfta transaktionen igen. -### Kommer min subgraf fortfarande vara sökbar efter att jag har överfört den till L2? +### Will my Subgraph still be discoverable after I transfer it to L2? -Din subgraf kommer endast vara sökbar på det nätverk där den är publicerad. Till exempel, om din subgraf är på Arbitrum One, kan du endast hitta den i Utforskaren på Arbitrum One och kommer inte att kunna hitta den på Ethereum. Se till att du har valt Arbitrum One i nätverksväxlaren högst upp på sidan för att säkerställa att du är på rätt nätverk.  Efter överföringen kommer L1-subgrafen att visas som föråldrad. +Your Subgraph will only be discoverable on the network it is published to. For example, if your Subgraph is on Arbitrum One, then you can only find it in Explorer on Arbitrum One and will not be able to find it on Ethereum. Please ensure that you have Arbitrum One selected in the network switcher at the top of the page to ensure you are on the correct network.  After the transfer, the L1 Subgraph will appear as deprecated. -### Måste min subgraf vara publicerad för att kunna överföra den? +### Does my Subgraph need to be published to transfer it? -För att dra nytta av subgraföverföringsverktyget måste din subgraf redan vara publicerad på Ethereum huvudnät och måste ha något kureringssignal ägt av plånboken som äger subgrafen. Om din subgraf inte är publicerad rekommenderas det att du helt enkelt publicerar direkt på Arbitrum One - de associerade gasavgifterna kommer att vara betydligt lägre. Om du vill överföra en publicerad subgraf men ägarplånboken inte har kuraterat något signal på den kan du signalera en liten mängd (t.ex. 1 GRT) från den plånboken; se till att välja "automigrering" signal. +To take advantage of the Subgraph transfer tool, your Subgraph must be already published to Ethereum mainnet and must have some curation signal owned by the wallet that owns the Subgraph. If your Subgraph is not published, it is recommended you simply publish directly on Arbitrum One - the associated gas fees will be considerably lower. If you want to transfer a published Subgraph but the owner account hasn't curated any signal on it, you can signal a small amount (e.g. 1 GRT) from that account; make sure to choose "auto-migrating" signal. -### Vad händer med Ethereum huvudnätversionen av min subgraf efter att jag har överfört till Arbitrum? +### What happens to the Ethereum mainnet version of my Subgraph after I transfer to Arbitrum? -Efter att ha överfört din subgraf till Arbitrum kommer Ethereum huvudnätversionen att föråldras. Vi rekommenderar att du uppdaterar din fråge-URL inom 48 timmar. Det finns dock en nådperiod som gör att din huvudnät-URL fungerar så att stöd från tredjeparts-dappar kan uppdateras. +After transferring your Subgraph to Arbitrum, the Ethereum mainnet version will be deprecated. We recommend you update your query URL within 48 hours. However, there is a grace period in place that keeps your mainnet URL functioning so that any third-party dapp support can be updated. ### Behöver jag också publicera om på Arbitrum efter överföringen? @@ -80,21 +80,21 @@ Efter de 20 minuters överföringsfönstret måste du bekräfta överföringen m ### Kommer min endpunkt att ha nertid under ompubliceringen? -Det är osannolikt, men det är möjligt att uppleva en kort nertid beroende på vilka indexeringar som stöder subgrafen på L1 och om de fortsätter att indexera den tills subgrafen är fullt stödd på L2. +It is unlikely, but possible to experience a brief downtime depending on which Indexers are supporting the Subgraph on L1 and whether they keep indexing it until the Subgraph is fully supported on L2. ### Är publicering och versionering densamma på L2 som på Ethereum huvudnätet? -Ja. Välj Arbitrum One som ditt publicerade nätverk när du publicerar i Subgraph Studio. I studion kommer den senaste ändpunkt att vara tillgänglig, som pekar till den senaste uppdaterade versionen av subgrafen. +Yes. Select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the Subgraph. -### Kommer min subgrafs kurering att flyttas med min subgraf? +### Will my Subgraph's curation move with my Subgraph? -Om du har valt automatisk migreringssignal kommer 100% av din egen kurering att flyttas med din subgraf till Arbitrum One. All subgrafens kureringssignal kommer att konverteras till GRT vid överföringstillfället, och GRT som motsvarar din kureringssignal kommer att användas för att prägla signal på L2-subgrafen. +If you've chosen auto-migrating signal, 100% of your own curation will move with your Subgraph to Arbitrum One. All of the Subgraph's curation signal will be converted to GRT at the time of the transfer, and the GRT corresponding to your curation signal will be used to mint signal on the L2 Subgraph. -Andra kuratorer kan välja att ta tillbaka sin del av GRT eller också överföra den till L2 för att prägla signal på samma subgraf. +Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same Subgraph. -### Kan jag flytta min subgraf tillbaka till Ethereum huvudnätet efter överföringen? +### Can I move my Subgraph back to Ethereum mainnet after I transfer? -När den är överförd kommer din Ethereum huvudnätversion av denna subgraf att vara föråldrad. Om du vill flytta tillbaka till huvudnätet måste du omimplementera och publicera på huvudnätet igen. Dock avråds starkt från att flytta tillbaka till Ethereum huvudnätet eftersom indexbelöningar till sist kommer att fördelas helt på Arbitrum One. +Once transferred, your Ethereum mainnet version of this Subgraph will be deprecated. If you would like to move back to mainnet, you will need to redeploy and publish back to mainnet. However, transferring back to Ethereum mainnet is strongly discouraged as indexing rewards will eventually be distributed entirely on Arbitrum One. ### Varför behöver jag bridged ETH för att slutföra min överföring? @@ -206,19 +206,19 @@ För att överföra din kurering måste du följa följande steg: \*Om det behövs - dvs. du använder en kontraktadress. -### Hur vet jag om den subgraph jag har kuraterat har flyttats till L2? +### How will I know if the Subgraph I curated has moved to L2? -När du tittar på sidan med detaljer om subgraphen kommer en banner att meddela dig att denna subgraph har flyttats. Du kan följa uppmaningen för att överföra din kurering. Du kan också hitta denna information på sidan med detaljer om subgraphen som har flyttat. +When viewing the Subgraph details page, a banner will notify you that this Subgraph has been transferred. You can follow the prompt to transfer your curation. You can also find this information on the Subgraph details page of any Subgraph that has moved. ### Vad händer om jag inte vill flytta min kurering till L2? -När en subgraph avvecklas har du möjlighet att ta tillbaka din signal. På samma sätt, om en subgraph har flyttats till L2, kan du välja att ta tillbaka din signal på Ethereum huvudnät eller skicka signalen till L2. +When a Subgraph is deprecated you have the option to withdraw your signal. Similarly, if a Subgraph has moved to L2, you can choose to withdraw your signal in Ethereum mainnet or send the signal to L2. ### Hur vet jag att min kurering har överförts framgångsrikt? Signaldetaljer kommer att vara tillgängliga via Explorer ungefär 20 minuter efter att L2-överföringsverktyget har initierats. -### Kan jag överföra min kurering på fler än en subgraph samtidigt? +### Can I transfer my curation on more than one Subgraph at a time? Det finns för närvarande ingen möjlighet till bulköverföring. @@ -266,7 +266,7 @@ Det tar ungefär 20 minuter för L2-överföringsverktyget att slutföra överf ### Måste jag indexer på Arbitrum innan jag överför min insats? -Du kan effektivt överföra din insats först innan du sätter upp indexering, men du kommer inte att kunna hämta några belöningar på L2 förrän du allokerar till subgrapher på L2, indexerar dem och presenterar POI. +You can effectively transfer your stake first before setting up indexing, but you will not be able to claim any rewards on L2 until you allocate to Subgraphs on L2, index them, and present POIs. ### Kan Delegators flytta sin delegation innan jag flyttar min indexinsats? From 442b6a422a05a319ed9b0c313482a0e0f8b3857c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:13:33 -0500 Subject: [PATCH 0308/1789] New translations l2-transfer-tools-faq.mdx (Turkish) --- .../arbitrum/l2-transfer-tools-faq.mdx | 54 +++++++++---------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/website/src/pages/tr/archived/arbitrum/l2-transfer-tools-faq.mdx b/website/src/pages/tr/archived/arbitrum/l2-transfer-tools-faq.mdx index 709689c6ca55..e82d00f0809b 100644 --- a/website/src/pages/tr/archived/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/src/pages/tr/archived/arbitrum/l2-transfer-tools-faq.mdx @@ -24,9 +24,9 @@ Bunun istisnası çoklu imza gibi akıllı sözleşme cüzdanlarıdır. Bunlar h L2 Transfer Araçları, Katman1'den Katman2'ye mesaj göndermek için Arbitrum'un yerel mekanizmasını kullanır. Bu mekanizma "yeniden denenebilir bilet" olarak adlandırılır ve Arbitrum GRT köprüsü de dahil olmak üzere tüm yerel token köprüleri tarafından kullanılır. Tekrar denenebilir biletler hakkında daha fazla bilgiyi [Arbitrum dökümantasyonunda] (https://docs.arbitrum.io/arbos/l1-to-l2-messaging) okuyabilirsiniz. -Varlıklarınızı (subgraph, stake, delegasyon veya kürasyon) Katman2'ye aktardığınızda, Katman2'de yeniden denenebilir bir bilet oluşturan Arbitrum GRT köprüsü aracılığıyla bir mesaj gönderilir. Transfer aracı, işlemde 1) bileti oluşturmak için ödeme yapmak ve 2) bileti Katman2'de yürütmek üzere gas için ödeme yapmak amacıyla kullanılan bir miktar ETH içerir. Ancak, bilet Katman2'de yürütülmeye hazır olana kadar geçen sürede gas fiyatları değişebileceğinden ötürü, bu otomatik yürütme girişiminin başarısız olma ihtimali vardır. Bu durumda, Arbitrum köprüsü yeniden denenebilir bileti 7 güne kadar kullanılabilir tutacaktır ve herkes bileti "kullanmayı" yeniden deneyebilir (bunun için Arbitrum'a köprülenmiş bir miktar ETH'ye sahip bir cüzdan gereklidir). +When you transfer your assets (Subgraph, stake, delegation or curation) to L2, a message is sent through the Arbitrum GRT bridge which creates a retryable ticket in L2. The transfer tool includes some ETH value in the transaction, that is used to 1) pay to create the ticket and 2) pay for the gas to execute the ticket in L2. However, because gas prices might vary in the time until the ticket is ready to execute in L2, it is possible that this auto-execution attempt fails. When that happens, the Arbitrum bridge will keep the retryable ticket alive for up to 7 days, and anyone can retry “redeeming” the ticket (which requires a wallet with some ETH bridged to Arbitrum). -Bu, tüm transfer araçlarında "Onayla" adımı olarak adlandırdığımız adımdır - otomatik yürütme çoğu zaman başarılı olduğu için çoğu durumda otomatik olarak çalışacaktır, ancak başarılı bir şekilde gerçekleştiğinden emin olmak için tekrar kontrol etmeniz önemlidir. Başarılı olmazsa ve 7 gün içinde başarılı bir yeniden deneme gerçekleşmezse, Arbitrum köprüsü bileti iptal edecek ve varlıklarınız (subgraph, stake, delegasyon veya kürasyon) kaybolacak ve kurtarılamayacaktır. Graph çekirdek geliştiricileri bu durumları tespit etmek ve çok geç olmadan biletleri kurtarmaya çalışmak için bir izleme sistemine sahiptir, ancak transferinizin zamanında tamamlanmasını sağlamak nihayetinde sizin sorumluluğunuzdadır. İşleminizi onaylamakta sorun yaşıyorsanız, lütfen [bu formu](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) kullanarak bize ulaşın; çekirdek geliştiriciler size yardımcı olacaktır. +This is what we call the “Confirm” step in all the transfer tools - it will run automatically in most cases, as the auto-execution is most often successful, but it is important that you check back to make sure it went through. If it doesn’t succeed and there are no successful retries in 7 days, the Arbitrum bridge will discard the ticket, and your assets (Subgraph, stake, delegation or curation) will be lost and can’t be recovered. The Graph core devs have a monitoring system in place to detect these situations and try to redeem the tickets before it’s too late, but it is ultimately your responsibility to ensure your transfer is completed in time. If you’re having trouble confirming your transaction, please reach out using [this form](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) and core devs will be there help you. ### Delegasyon/stake/kürasyon transferimi başlattım ve Katman2'ye ulaşıp ulaşmadığından emin değilim, doğru şekilde transfer edilip edilmediğini nasıl teyit edebilirim? @@ -36,43 +36,43 @@ Katman1 işlem hash'ına sahipseniz (cüzdanınızdaki son işlemlere bakarak bu ## Subgraph Transferi -### Subgraph'ımı nasıl transfer edebilirim? +### How do I transfer my Subgraph? -Subgraph'ınızı transfer etmek için aşağıdaki adımları tamamlamanız gerekecektir: +To transfer your Subgraph, you will need to complete the following steps: 1. Ethereum ana ağında transferi başlatın 2. Onaylanması için 20 dakika bekleyin -3. Arbitrum\* üzerinde subgraph transferini onaylayın +3. Confirm Subgraph transfer on Arbitrum\* -4. Arbitrum üzerinde subgraph'ı yayınlamayı bitirin +4. Finish publishing Subgraph on Arbitrum 5. Sorgu URL'sini Güncelle (önerilir) -\*Transferi 7 gün içinde onaylamanız gerektiğini unutmayın, aksi takdirde subgraph'ınız kaybolabilir. Çoğunlukla, bu adım otomatik olarak çalışacaktır, ancak Arbitrum'da gas fiyatlarında bir artış varsa manuel bir onay gerekebilir. Bu süreç sırasında herhangi bir sorun yaşanırsa, yardımcı olacak kaynaklar olacaktır: support@thegraph.com veya [Discord](https://discord.gg/graphprotocol) üzerinden destek ile iletişime geçin. +\*Note that you must confirm the transfer within 7 days otherwise your Subgraph may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). ### Transferimi nereden başlatmalıyım? -Transferinizi [Subgraph Stüdyo](https://thegraph.com/studio/), [Gezgin](https://thegraph.com/explorer) veya herhangi bir subgraph ayrıntıları sayfasından başlatabilirsiniz. Transferi başlatmak için subgraph ayrıntıları sayfasındaki "Subgraph Transfer" butonuna tıklayın. +You can initiate your transfer from the [Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) or any Subgraph details page. Click the "Transfer Subgraph" button in the Subgraph details page to start the transfer. -### Subgraph'ım transfer edilene kadar ne kadar beklemem gerekir? +### How long do I need to wait until my Subgraph is transferred Transfer süresi yaklaşık 20 dakika alır. Arbitrum köprüsü, köprü transferini otomatik olarak tamamlamak için arka planda çalışmaktadır. Bazı durumlarda gaz maliyetleri artabilir ve işlemi tekrar onaylamanız gerekebilir. -### Katman2'ye transfer ettikten sonra subgraph'ım hala keşfedilebilir olacak mı? +### Will my Subgraph still be discoverable after I transfer it to L2? -Subgraph'ınız yalnızca yayınlandığı ağda keşfedilebilir olacaktır. Örneğin, subgraph'ınız Arbitrum One üzerindeyse, onu yalnızca Arbitrum One üzerindeki Gezgin'de bulabilirsiniz, Ethereum'da aradığınızda bulamazsınız. Doğru ağda olduğunuzdan emin olmak için lütfen sayfanın üst kısmındaki ağ değiştiricisinde Arbitrum One'ın seçili olduğundan emin olun. Transferden sonra, Katman1 subgraph'ı kullanımdan kaldırılmış olarak görünecektir. +Your Subgraph will only be discoverable on the network it is published to. For example, if your Subgraph is on Arbitrum One, then you can only find it in Explorer on Arbitrum One and will not be able to find it on Ethereum. Please ensure that you have Arbitrum One selected in the network switcher at the top of the page to ensure you are on the correct network.  After the transfer, the L1 Subgraph will appear as deprecated. -### Transfer etmek için subgraph'ımın yayınlanmış olması gerekiyor mu? +### Does my Subgraph need to be published to transfer it? -Subgraph transfer aracından yararlanmak için, subgraph'ınızın Ethereum ana ağı'nda yayınlanmış olması ve subgraph'ın sahibi olan cüzdanın, belirli miktarda kürasyon sinyaline sahip olması gerekmektedir. Eğer subgraph'ınız yayınlanmamışsa, doğrudan Arbitrum One'da yayınlamanız önerilir böylece ilgili gas ücretleri önemli ölçüde daha düşük olacaktır. Yayınlanmış bir subgraph'ı transfer etmek istiyorsanız, ancak sahip hesap üzerinde herhangi bir sinyal kürasyonu yapılmamışsa, bu hesaptan küçük bir miktar (örneğin 1 GRT) sinyal verebilirsiniz; "otomatik geçiş" sinyalini seçtiğinizden emin olun. +To take advantage of the Subgraph transfer tool, your Subgraph must be already published to Ethereum mainnet and must have some curation signal owned by the wallet that owns the Subgraph. If your Subgraph is not published, it is recommended you simply publish directly on Arbitrum One - the associated gas fees will be considerably lower. If you want to transfer a published Subgraph but the owner account hasn't curated any signal on it, you can signal a small amount (e.g. 1 GRT) from that account; make sure to choose "auto-migrating" signal. -### Arbitrum'a transfer olduktan sonra subgraph'ımın Ethereum ana ağ versiyonuna ne olur? +### What happens to the Ethereum mainnet version of my Subgraph after I transfer to Arbitrum? -Subgraph'ınızı Arbitrum'a transfer ettikten sonra, Ethereum ana ağ versiyonu kullanımdan kaldırılacaktır. Sorgu URL'nizi 48 saat içinde güncellemenizi öneririz. Bununla birlikte, herhangi bir üçüncü taraf merkeziyetsiz uygulama desteğinin güncellenebilmesi için ana ağ URL'nizin çalışmasını sağlayan bir ödemesiz dönem vardır. +After transferring your Subgraph to Arbitrum, the Ethereum mainnet version will be deprecated. We recommend you update your query URL within 48 hours. However, there is a grace period in place that keeps your mainnet URL functioning so that any third-party dapp support can be updated. ### Transferi tamamladıktan sonra Arbitrum'da da yeniden yayınlamam gerekiyor mu? @@ -80,21 +80,21 @@ Subgraph'ınızı Arbitrum'a transfer ettikten sonra, Ethereum ana ağ versiyonu ### Yeniden yayınlama sırasında uç noktam kesinti yaşar mı? -Olası değildir, fakat Katman1'de hangi İndeksleyicilerin subgraph'ı desteklediğine ve subgraph Katman2'de tam olarak desteklenene kadar indekslemeye devam edip etmediklerine bağlı olarak kısa bir kesinti yaşanması mümkündür. +It is unlikely, but possible to experience a brief downtime depending on which Indexers are supporting the Subgraph on L1 and whether they keep indexing it until the Subgraph is fully supported on L2. ### Yayınlama ve sürüm oluşturma Katman2'de Ethereum ana ağı ile aynı mı? -Evet. Subgraph Stüdyo'da yayınlarken, yayınlanan ağınız olarak Arbitrum One'ı seçin. Stüdyo'da, subgprah'ın en son güncellenmiş sürümüne yönlendiren en son uç nokta mevcut olacaktır. +Yes. Select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the Subgraph. -### Subgraph'ımın kürasyonu subgraph'ımla birlikte hareket edecek mi? +### Will my Subgraph's curation move with my Subgraph? -Otomatik geçiş sinyalini seçtiyseniz, kendi kürasyonunuzun %100'ü subgraph'ınızla birlikte Arbitrum One'a taşınacaktır. Subgraph'ın tüm kürasyon sinyali, aktarım sırasında GRT'ye dönüştürülecek ve kürasyon sinyalinize karşılık gelen GRT, Katman2 subgraph'ında sinyal basmak için kullanılacaktır. +If you've chosen auto-migrating signal, 100% of your own curation will move with your Subgraph to Arbitrum One. All of the Subgraph's curation signal will be converted to GRT at the time of the transfer, and the GRT corresponding to your curation signal will be used to mint signal on the L2 Subgraph. -Diğer Küratörler kendilerne ait GRT miktarını geri çekmeyi ya da aynı subgraph üzerinde sinyal basmak için Katman2'ye transfer etmeyi seçebilirler. +Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same Subgraph. -### Transferden sonra subgraph'ımı Ethereum ana ağı'na geri taşıyabilir miyim? +### Can I move my Subgraph back to Ethereum mainnet after I transfer? -Transfer edildikten sonra, bu subgraph'ınızın Ethereum ana ağı sürümü kullanımdan kaldırılacaktır. Ana ağa geri dönmek isterseniz, ana ağa yeniden dağıtmanız ve geri yayınlamanız gerekecektir. Öte yandan, indeksleme ödülleri eninde sonunda tamamen Arbitrum One üzerinde dağıtılacağından, Ethereum ana ağına geri transfer kesinlikle önerilmez. +Once transferred, your Ethereum mainnet version of this Subgraph will be deprecated. If you would like to move back to mainnet, you will need to redeploy and publish back to mainnet. However, transferring back to Ethereum mainnet is strongly discouraged as indexing rewards will eventually be distributed entirely on Arbitrum One. ### Transferimi tamamlamak için neden köprülenmiş ETH'ye ihtiyacım var? @@ -206,19 +206,19 @@ Kürasyonunuzu transfer etmek için aşağıdaki adımları tamamlamanız gereke \*Gerekliyse - yani bir sözleşme adresi kullanıyorsanız. -### Küratörlüğünü yaptığım subgraph'ın Katman2'ye taşınıp taşınmadığını nasıl bileceğim? +### How will I know if the Subgraph I curated has moved to L2? -Subgraph ayrıntıları sayfasını görüntülerken, bir afiş size bu subgraph'ın transfer edildiğini bildirecektir. Kürasyonunuzu transfer etmek için komut istemini takip edebilirsiniz. Bu bilgiyi taşınan herhangi bir subgraph'ın subgraph ayrıntıları sayfasında da bulabilirsiniz. +When viewing the Subgraph details page, a banner will notify you that this Subgraph has been transferred. You can follow the prompt to transfer your curation. You can also find this information on the Subgraph details page of any Subgraph that has moved. ### Kürasyonumu Katman2'ye taşımak istemezsem ne olur? -Bir subgraph kullanımdan kaldırıldığında sinyalinizi geri çekme opsiyonu bulunmaktadır. Benzer şekilde, bir subgraph Katman2'ye taşındıysa, sinyalinizi Ethereum ana ağı'nda geri çekmeyi veya sinyali Katman2'ye göndermeyi seçebilirsiniz. +When a Subgraph is deprecated you have the option to withdraw your signal. Similarly, if a Subgraph has moved to L2, you can choose to withdraw your signal in Ethereum mainnet or send the signal to L2. ### Kürasyonumun başarıyla transfer edildiğini nasıl bilebilirim? Sinyal ayrıntıları, Katman2 transfer aracı başlatıldıktan yaklaşık 20 dakika sonra Gezgin üzerinden erişilebilir olacaktır. -### Kürasyonumu aynı anda birden fazla subgraph'a transfer edebilir miyim? +### Can I transfer my curation on more than one Subgraph at a time? Şu anda toplu transfer seçeneği bulunmamaktadır. @@ -266,7 +266,7 @@ Katman2 transfer aracının stake'inizi transfer etmeyi tamamlaması yaklaşık ### Stake'imi transfer etmeden önce Arbitrum'da indekslemem gerekiyor mu? -İndekslemeyi oluşturmadan önce hissenizi etkin bir şekilde aktarabilirsiniz, ancak Katman2'deki subgraph'lara tahsis edene, bunları indeksleyene ve POI'leri sunana kadar Katman2'de herhangi bir ödül talep edemezsiniz. +You can effectively transfer your stake first before setting up indexing, but you will not be able to claim any rewards on L2 until you allocate to Subgraphs on L2, index them, and present POIs. ### Ben indeksleme stake'imi taşımadan önce Delegatörler delegasyonlarını taşıyabilir mi? From 879dac77ecfb643e8907dbd21a89ba5702220ea8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:13:34 -0500 Subject: [PATCH 0309/1789] New translations l2-transfer-tools-faq.mdx (Ukrainian) --- .../arbitrum/l2-transfer-tools-faq.mdx | 54 +++++++++---------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/website/src/pages/uk/archived/arbitrum/l2-transfer-tools-faq.mdx b/website/src/pages/uk/archived/arbitrum/l2-transfer-tools-faq.mdx index 612b61fd0515..7edde3d0cbcd 100644 --- a/website/src/pages/uk/archived/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/src/pages/uk/archived/arbitrum/l2-transfer-tools-faq.mdx @@ -24,9 +24,9 @@ The exception is with smart contract wallets like multisigs: these are smart con The L2 Transfer Tools use Arbitrum’s native mechanism to send messages from L1 to L2. This mechanism is called a “retryable ticket” and is used by all native token bridges, including the Arbitrum GRT bridge. You can read more about retryable tickets in the [Arbitrum docs](https://docs.arbitrum.io/arbos/l1-to-l2-messaging). -When you transfer your assets (subgraph, stake, delegation or curation) to L2, a message is sent through the Arbitrum GRT bridge which creates a retryable ticket in L2. The transfer tool includes some ETH value in the transaction, that is used to 1) pay to create the ticket and 2) pay for the gas to execute the ticket in L2. However, because gas prices might vary in the time until the ticket is ready to execute in L2, it is possible that this auto-execution attempt fails. When that happens, the Arbitrum bridge will keep the retryable ticket alive for up to 7 days, and anyone can retry “redeeming” the ticket (which requires a wallet with some ETH bridged to Arbitrum). +When you transfer your assets (Subgraph, stake, delegation or curation) to L2, a message is sent through the Arbitrum GRT bridge which creates a retryable ticket in L2. The transfer tool includes some ETH value in the transaction, that is used to 1) pay to create the ticket and 2) pay for the gas to execute the ticket in L2. However, because gas prices might vary in the time until the ticket is ready to execute in L2, it is possible that this auto-execution attempt fails. When that happens, the Arbitrum bridge will keep the retryable ticket alive for up to 7 days, and anyone can retry “redeeming” the ticket (which requires a wallet with some ETH bridged to Arbitrum). -This is what we call the “Confirm” step in all the transfer tools - it will run automatically in most cases, as the auto-execution is most often successful, but it is important that you check back to make sure it went through. If it doesn’t succeed and there are no successful retries in 7 days, the Arbitrum bridge will discard the ticket, and your assets (subgraph, stake, delegation or curation) will be lost and can’t be recovered. The Graph core devs have a monitoring system in place to detect these situations and try to redeem the tickets before it’s too late, but it is ultimately your responsibility to ensure your transfer is completed in time. If you’re having trouble confirming your transaction, please reach out using [this form](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) and core devs will be there help you. +This is what we call the “Confirm” step in all the transfer tools - it will run automatically in most cases, as the auto-execution is most often successful, but it is important that you check back to make sure it went through. If it doesn’t succeed and there are no successful retries in 7 days, the Arbitrum bridge will discard the ticket, and your assets (Subgraph, stake, delegation or curation) will be lost and can’t be recovered. The Graph core devs have a monitoring system in place to detect these situations and try to redeem the tickets before it’s too late, but it is ultimately your responsibility to ensure your transfer is completed in time. If you’re having trouble confirming your transaction, please reach out using [this form](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) and core devs will be there help you. ### I started my delegation/stake/curation transfer and I'm not sure if it made it through to L2, how can I confirm that it was transferred correctly? @@ -36,43 +36,43 @@ If you have the L1 transaction hash (which you can find by looking at the recent ## Subgraph Transfer -### How do I transfer my subgraph? +### How do I transfer my Subgraph? -To transfer your subgraph, you will need to complete the following steps: +To transfer your Subgraph, you will need to complete the following steps: 1. Initiate the transfer on Ethereum mainnet 2. Wait 20 minutes for confirmation -3. Confirm subgraph transfer on Arbitrum\* +3. Confirm Subgraph transfer on Arbitrum\* -4. Finish publishing subgraph on Arbitrum +4. Finish publishing Subgraph on Arbitrum 5. Update Query URL (recommended) -\*Note that you must confirm the transfer within 7 days otherwise your subgraph may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). +\*Note that you must confirm the transfer within 7 days otherwise your Subgraph may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). ### Where should I initiate my transfer from? -You can initiate your transfer from the [Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) or any subgraph details page. Click the "Transfer Subgraph" button in the subgraph details page to start the transfer. +You can initiate your transfer from the [Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) or any Subgraph details page. Click the "Transfer Subgraph" button in the Subgraph details page to start the transfer. -### How long do I need to wait until my subgraph is transferred +### How long do I need to wait until my Subgraph is transferred The transfer time takes approximately 20 minutes. The Arbitrum bridge is working in the background to complete the bridge transfer automatically. In some cases, gas costs may spike and you will need to confirm the transaction again. -### Will my subgraph still be discoverable after I transfer it to L2? +### Will my Subgraph still be discoverable after I transfer it to L2? -Your subgraph will only be discoverable on the network it is published to. For example, if your subgraph is on Arbitrum One, then you can only find it in Explorer on Arbitrum One and will not be able to find it on Ethereum. Please ensure that you have Arbitrum One selected in the network switcher at the top of the page to ensure you are on the correct network.  After the transfer, the L1 subgraph will appear as deprecated. +Your Subgraph will only be discoverable on the network it is published to. For example, if your Subgraph is on Arbitrum One, then you can only find it in Explorer on Arbitrum One and will not be able to find it on Ethereum. Please ensure that you have Arbitrum One selected in the network switcher at the top of the page to ensure you are on the correct network.  After the transfer, the L1 Subgraph will appear as deprecated. -### Does my subgraph need to be published to transfer it? +### Does my Subgraph need to be published to transfer it? -To take advantage of the subgraph transfer tool, your subgraph must be already published to Ethereum mainnet and must have some curation signal owned by the wallet that owns the subgraph. If your subgraph is not published, it is recommended you simply publish directly on Arbitrum One - the associated gas fees will be considerably lower. If you want to transfer a published subgraph but the owner account hasn't curated any signal on it, you can signal a small amount (e.g. 1 GRT) from that account; make sure to choose "auto-migrating" signal. +To take advantage of the Subgraph transfer tool, your Subgraph must be already published to Ethereum mainnet and must have some curation signal owned by the wallet that owns the Subgraph. If your Subgraph is not published, it is recommended you simply publish directly on Arbitrum One - the associated gas fees will be considerably lower. If you want to transfer a published Subgraph but the owner account hasn't curated any signal on it, you can signal a small amount (e.g. 1 GRT) from that account; make sure to choose "auto-migrating" signal. -### What happens to the Ethereum mainnet version of my subgraph after I transfer to Arbitrum? +### What happens to the Ethereum mainnet version of my Subgraph after I transfer to Arbitrum? -After transferring your subgraph to Arbitrum, the Ethereum mainnet version will be deprecated. We recommend you update your query URL within 48 hours. However, there is a grace period in place that keeps your mainnet URL functioning so that any third-party dapp support can be updated. +After transferring your Subgraph to Arbitrum, the Ethereum mainnet version will be deprecated. We recommend you update your query URL within 48 hours. However, there is a grace period in place that keeps your mainnet URL functioning so that any third-party dapp support can be updated. ### After I transfer, do I also need to re-publish on Arbitrum? @@ -80,21 +80,21 @@ After the 20 minute transfer window, you will need to confirm the transfer with ### Will my endpoint experience downtime while re-publishing? -It is unlikely, but possible to experience a brief downtime depending on which Indexers are supporting the subgraph on L1 and whether they keep indexing it until the subgraph is fully supported on L2. +It is unlikely, but possible to experience a brief downtime depending on which Indexers are supporting the Subgraph on L1 and whether they keep indexing it until the Subgraph is fully supported on L2. ### Is publishing and versioning the same on L2 as Ethereum Ethereum mainnet? -Yes. Select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the subgraph. +Yes. Select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the Subgraph. -### Will my subgraph's curation move with my subgraph? +### Will my Subgraph's curation move with my Subgraph? -If you've chosen auto-migrating signal, 100% of your own curation will move with your subgraph to Arbitrum One. All of the subgraph's curation signal will be converted to GRT at the time of the transfer, and the GRT corresponding to your curation signal will be used to mint signal on the L2 subgraph. +If you've chosen auto-migrating signal, 100% of your own curation will move with your Subgraph to Arbitrum One. All of the Subgraph's curation signal will be converted to GRT at the time of the transfer, and the GRT corresponding to your curation signal will be used to mint signal on the L2 Subgraph. -Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same subgraph. +Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same Subgraph. -### Can I move my subgraph back to Ethereum mainnet after I transfer? +### Can I move my Subgraph back to Ethereum mainnet after I transfer? -Once transferred, your Ethereum mainnet version of this subgraph will be deprecated. If you would like to move back to mainnet, you will need to redeploy and publish back to mainnet. However, transferring back to Ethereum mainnet is strongly discouraged as indexing rewards will eventually be distributed entirely on Arbitrum One. +Once transferred, your Ethereum mainnet version of this Subgraph will be deprecated. If you would like to move back to mainnet, you will need to redeploy and publish back to mainnet. However, transferring back to Ethereum mainnet is strongly discouraged as indexing rewards will eventually be distributed entirely on Arbitrum One. ### Why do I need bridged ETH to complete my transfer? @@ -206,19 +206,19 @@ To transfer your curation, you will need to complete the following steps: \*If necessary - i.e. you are using a contract address. -### How will I know if the subgraph I curated has moved to L2? +### How will I know if the Subgraph I curated has moved to L2? -When viewing the subgraph details page, a banner will notify you that this subgraph has been transferred. You can follow the prompt to transfer your curation. You can also find this information on the subgraph details page of any subgraph that has moved. +When viewing the Subgraph details page, a banner will notify you that this Subgraph has been transferred. You can follow the prompt to transfer your curation. You can also find this information on the Subgraph details page of any Subgraph that has moved. ### What if I do not wish to move my curation to L2? -When a subgraph is deprecated you have the option to withdraw your signal. Similarly, if a subgraph has moved to L2, you can choose to withdraw your signal in Ethereum mainnet or send the signal to L2. +When a Subgraph is deprecated you have the option to withdraw your signal. Similarly, if a Subgraph has moved to L2, you can choose to withdraw your signal in Ethereum mainnet or send the signal to L2. ### How do I know my curation successfully transferred? Signal details will be accessible via Explorer approximately 20 minutes after the L2 transfer tool is initiated. -### Can I transfer my curation on more than one subgraph at a time? +### Can I transfer my curation on more than one Subgraph at a time? There is no bulk transfer option at this time. @@ -266,7 +266,7 @@ It will take approximately 20 minutes for the L2 transfer tool to complete trans ### Do I have to index on Arbitrum before I transfer my stake? -You can effectively transfer your stake first before setting up indexing, but you will not be able to claim any rewards on L2 until you allocate to subgraphs on L2, index them, and present POIs. +You can effectively transfer your stake first before setting up indexing, but you will not be able to claim any rewards on L2 until you allocate to Subgraphs on L2, index them, and present POIs. ### Can Delegators move their delegation before I move my indexing stake? From 872e9ba172dfbfbb1204ea9eed219543f3eeb0be Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:13:35 -0500 Subject: [PATCH 0310/1789] New translations l2-transfer-tools-faq.mdx (Chinese Simplified) --- .../arbitrum/l2-transfer-tools-faq.mdx | 54 +++++++++---------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/website/src/pages/zh/archived/arbitrum/l2-transfer-tools-faq.mdx b/website/src/pages/zh/archived/arbitrum/l2-transfer-tools-faq.mdx index 5ee091bbc5a3..236d5734f793 100644 --- a/website/src/pages/zh/archived/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/src/pages/zh/archived/arbitrum/l2-transfer-tools-faq.mdx @@ -24,9 +24,9 @@ If you are using an [EOA](https://ethereum.org/en/developers/docs/accounts/#type L2 传输工具使用 Arbitrum 的原生机制将信息从 L1 发送至 L2。这种机制被称为 "retryable ticket,所有本地令牌网桥都使用这种机制,包括Arbitrum GRT网桥。您可以在[Arbitrum文档](https://docs.arbitrum.io/arbos/l1-to-l2-messaging)中阅读更多关于retryable ticket的信息。 -当您将您的资产(子图、股权、委托)转移到 L2 时,会通过 Arbitrum GRT 桥接器发送一条信息,该桥接器会在 L2 中创建一个可retryable ticket。转移工具在交易中包含一些 ETH ,用于:1)支付创建票据的费用;2)支付在 L2 中执行票据的气体费用。但是,在票据准备好在 L2 中执行之前,gas价格可能会发生变化,因此自动执行尝试可能会失败。当这种情况发生时,Arbitrum 桥接器会将retryable ticket保留最多 7 天,任何人都可以重试 "赎回 "票据(这需要一个与 Arbitrum 桥接了一些 ETH 的钱包)。 +When you transfer your assets (Subgraph, stake, delegation or curation) to L2, a message is sent through the Arbitrum GRT bridge which creates a retryable ticket in L2. The transfer tool includes some ETH value in the transaction, that is used to 1) pay to create the ticket and 2) pay for the gas to execute the ticket in L2. However, because gas prices might vary in the time until the ticket is ready to execute in L2, it is possible that this auto-execution attempt fails. When that happens, the Arbitrum bridge will keep the retryable ticket alive for up to 7 days, and anyone can retry “redeeming” the ticket (which requires a wallet with some ETH bridged to Arbitrum). -这就是我们在所有传输工具中所说的 "确认 "步骤--在大多数情况下,它会自动运行,因为自动执行通常都会成功,但重要的是,您要回过头来检查,以确保它成功了。如果没有成功,并且在 7 天内没有成功的重试,Arbitrum 桥接器将丢弃该票据,您的资产(子图、股权、委托或管理)将丢失且无法恢复。The Graph核心开发人员有一个监控系统来检测这些情况,并尝试在为时已晚之前赎回门票,但确保您的转让及时完成最终还是您的责任。如果您在确认交易时遇到困难,请使用[此表单](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms)联系我们,核心开发人员将为您提供帮助。 +This is what we call the “Confirm” step in all the transfer tools - it will run automatically in most cases, as the auto-execution is most often successful, but it is important that you check back to make sure it went through. If it doesn’t succeed and there are no successful retries in 7 days, the Arbitrum bridge will discard the ticket, and your assets (Subgraph, stake, delegation or curation) will be lost and can’t be recovered. The Graph core devs have a monitoring system in place to detect these situations and try to redeem the tickets before it’s too late, but it is ultimately your responsibility to ensure your transfer is completed in time. If you’re having trouble confirming your transaction, please reach out using [this form](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) and core devs will be there help you. ### 我开始了我的委托/质押/策展转移,但不确定是否成功转移到了 Layer 2(L2),我如何确认转移是否正确进行了? @@ -36,43 +36,43 @@ L2 传输工具使用 Arbitrum 的原生机制将信息从 L1 发送至 L2。这 ## 子图转移 -### 如何转移我的子图? +### How do I transfer my Subgraph? -要转移您的子图,您需要完成以下步骤: +To transfer your Subgraph, you will need to complete the following steps: 1. 在以太坊主网上启动转移过程 2. 等待20分钟进行确认 -3. 在Arbitrum上确认子图转移\* +3. Confirm Subgraph transfer on Arbitrum\* -4. 在Arbitrum上完成子图发布 +4. Finish publishing Subgraph on Arbitrum 5. 更新查询URL(推荐) -\*请注意,您必须在7天内确认转移,否则您的子图可能会丢失。在大多数情况下,此步骤将自动运行,但如果Arbitrum的燃气价格飙升,则可能需要手动确认。如果在此过程中遇到任何问题,我们将提供帮助:请通过support@thegraph.com或[Discord](https://discord.gg/graphprotocol)与我们联系。 +\*Note that you must confirm the transfer within 7 days otherwise your Subgraph may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). ### 我应该从哪里发起转移? -您可以从[Subgraph Studio](https://thegraph.com/studio/), [Explorer](https://thegraph.com/explorer) 或任何子图详细信息页面发起转移。在子图详细信息页面中,点击“Transfer Subgraph”按钮开始转移。 +You can initiate your transfer from the [Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) or any Subgraph details page. Click the "Transfer Subgraph" button in the Subgraph details page to start the transfer. -### 我需要等多久才能完成子图转移? +### How long do I need to wait until my Subgraph is transferred 转移时间大约需要20分钟。Arbitrum跨链桥在后台工作,自动完成桥接转移。在某些情况下,燃气费用可能会上涨,您需要再次确认交易。 -### 在我将子图转移到L2之后,它还能被发现吗? +### Will my Subgraph still be discoverable after I transfer it to L2? -您的子图只能在其发布所在的网络上被发现。例如,如果您的子图在Arbitrum One上,那么您只能在Arbitrum One的Explorer中找到它,而无法在以太坊上找到它。请确保您已在页面顶部的网络切换器中选择了Arbitrum One,以确保您位于正确的网络上。转移后,L1子图将显示为已弃用。 +Your Subgraph will only be discoverable on the network it is published to. For example, if your Subgraph is on Arbitrum One, then you can only find it in Explorer on Arbitrum One and will not be able to find it on Ethereum. Please ensure that you have Arbitrum One selected in the network switcher at the top of the page to ensure you are on the correct network.  After the transfer, the L1 Subgraph will appear as deprecated. -### 我的子图是否需要发布才能转移? +### Does my Subgraph need to be published to transfer it? -要使用子图转移工具,您的子图必须已经发布到以太坊主网上,并且拥有子图的钱包必须拥有一定的策划信号。如果您的子图尚未发布,建议您直接在Arbitrum One上进行发布-相关的燃气费用将大大降低。如果您想转移已发布的子图,但拥有该子图的所有者账户尚未对其进行任何策划信号的策展,您可以从该账户中发送一小笔金额(例如1 GRT)进行信号,确保选择“自动迁移”信号。 +To take advantage of the Subgraph transfer tool, your Subgraph must be already published to Ethereum mainnet and must have some curation signal owned by the wallet that owns the Subgraph. If your Subgraph is not published, it is recommended you simply publish directly on Arbitrum One - the associated gas fees will be considerably lower. If you want to transfer a published Subgraph but the owner account hasn't curated any signal on it, you can signal a small amount (e.g. 1 GRT) from that account; make sure to choose "auto-migrating" signal. -### 我将我的子图转移到Arbitrum后,以太坊主网版本的子图会发生什么? +### What happens to the Ethereum mainnet version of my Subgraph after I transfer to Arbitrum? -将子图转移到Arbitrum后,以太坊主网版本的子图将被弃用。我们建议您在48小时内更新查询URL。但是,我们已经设置了一个宽限期使您的主网URL继续可用,以便更新任何第三方dapp的支持。 +After transferring your Subgraph to Arbitrum, the Ethereum mainnet version will be deprecated. We recommend you update your query URL within 48 hours. However, there is a grace period in place that keeps your mainnet URL functioning so that any third-party dapp support can be updated. ### 转移后,我还需要在Arbitrum上重新发布吗? @@ -80,21 +80,21 @@ L2 传输工具使用 Arbitrum 的原生机制将信息从 L1 发送至 L2。这 ### 重新发布期间,您的端点会经历停机时间吗? -这是不太可能的,但取决于在 L1 上支持子图的索引人以及他们是否一直进行索引直到子图在 L2 上完全受支持,可能会经历短暂的停机时间。 +It is unlikely, but possible to experience a brief downtime depending on which Indexers are supporting the Subgraph on L1 and whether they keep indexing it until the Subgraph is fully supported on L2. ### L2上的发布和版本控制与以太坊主网上相同吗? -是的。请确保在Subgraph Studio中选择Arbitrum One作为您的发布网络。在Studio中,将提供最新的端点,指向子图的最新更新版本。 +Yes. Select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the Subgraph. -### 我的子图的策展是否会随着子图一起转移? +### Will my Subgraph's curation move with my Subgraph? -如果您选择了自动迁移信号,您自己的全部策展将与子图一起转移到Arbitrum One。在转移时,所有子图的策展信号将转换为GRT,并且与您的策展信号相对应的GRT将用于在L2子图上生成信号。 +If you've chosen auto-migrating signal, 100% of your own curation will move with your Subgraph to Arbitrum One. All of the Subgraph's curation signal will be converted to GRT at the time of the transfer, and the GRT corresponding to your curation signal will be used to mint signal on the L2 Subgraph. -其他策展人可以选择是否撤回他们的一部分GRT,或者将其转移到L2上,在同一子图上生成信号。 +Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same Subgraph. -### 在将子图转移到Arbitrum后,我能否将其转回以太坊主网? +### Can I move my Subgraph back to Ethereum mainnet after I transfer? -一旦转移,您的以太坊主网版本的子图将被弃用。如果您想转回主网,您需要重新部署并发布到主网。然而,强烈不建议再次转移到以太坊主网,因为索引奖励最终将完全在Arbitrum One上分发。 +Once transferred, your Ethereum mainnet version of this Subgraph will be deprecated. If you would like to move back to mainnet, you will need to redeploy and publish back to mainnet. However, transferring back to Ethereum mainnet is strongly discouraged as indexing rewards will eventually be distributed entirely on Arbitrum One. ### 为什么在完成转移时需要桥接ETH? @@ -206,19 +206,19 @@ L2 转移工具将始终将您的委托转移到您先前委托的同一索引 \*如果需要-即您正在使用合约地址。 -### 我如何知道我策展的子图已经转移到L2? +### How will I know if the Subgraph I curated has moved to L2? -在查看子图详细信息页面时,将显示一个横幅,通知您该子图已转移。您可以按照提示进行策展转移。您还可以在已转移的任何子图的子图详细信息页面上找到此信息。 +When viewing the Subgraph details page, a banner will notify you that this Subgraph has been transferred. You can follow the prompt to transfer your curation. You can also find this information on the Subgraph details page of any Subgraph that has moved. ### 如果我不希望将我的策展转移到L2怎么办? -当子图被弃用时,您可以选择撤回您的信号。同样,如果子图转移到L2,您可以选择在以太坊主网上撤回信号,或将信号发送到L2。 +When a Subgraph is deprecated you have the option to withdraw your signal. Similarly, if a Subgraph has moved to L2, you can choose to withdraw your signal in Ethereum mainnet or send the signal to L2. ### 如何知道我的策展是否成功转移? 信号详细信息将在大约20分钟后通过Exploer可访问。 -### 我可以一次在多个子图上转移我的策展吗? +### Can I transfer my curation on more than one Subgraph at a time? 目前没有批量转移选项。 @@ -266,7 +266,7 @@ L2 转移工具将始终将您的委托转移到您先前委托的同一索引 ### 在转移质押之前,我是否需要在 Arbitrum 上进行索引(indexing)? -你可以在转移质押之前先有效地进行转移,但在你在 L2 上分配和索引子图之前,你将无法领取 L2 上的任何奖励。要在 L2 上领取奖励,你需要在 L2 上分配子图、对其进行索引并提供 POI。 +You can effectively transfer your stake first before setting up indexing, but you will not be able to claim any rewards on L2 until you allocate to Subgraphs on L2, index them, and present POIs. ### 在我转移索引质押之前,委托人是否可以转移他们的委托? From 6990f53b5a68a5388e61454f9585e40d37a60dd8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:13:36 -0500 Subject: [PATCH 0311/1789] New translations l2-transfer-tools-faq.mdx (Urdu (Pakistan)) --- .../arbitrum/l2-transfer-tools-faq.mdx | 54 +++++++++---------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/website/src/pages/ur/archived/arbitrum/l2-transfer-tools-faq.mdx b/website/src/pages/ur/archived/arbitrum/l2-transfer-tools-faq.mdx index ce46b35ce79b..466aa1cc8f3f 100644 --- a/website/src/pages/ur/archived/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/src/pages/ur/archived/arbitrum/l2-transfer-tools-faq.mdx @@ -25,9 +25,9 @@ The exception is with smart contract wallets like multisigs: these are smart con L2 ٹرانسفر ٹول L1 کو پیغامات بھیجنے کے لیے Arbitrum کا مقامی طریقہ استعمال کرتے ہیں۔ اس طریقہ کار کو "ریٹری ایبل ٹکٹ" کہا جاتا ہے اور اس کا استعمال تمام مقامی ٹوکن برجز بشمول Arbitrum GRT بریج کے ذریعے کیا جاتا ہے۔ آپ دوبارہ قابل کوشش ٹکٹوں کے بارے میں مزید پڑھ سکتے ہیں [Arbitrum دستاویزات](https://docs.arbitrum.io/arbos/l1-to-l2-messaging) میں۔ -جب آپ اپنے اثاثے (سب گراف، سٹیک، ڈیلیگیشن یا کیوریشن) L2 پر منتقل کرتے ہیں، تو Arbitrum GRT بریج کے ذریعے ایک پیغام بھیجا جاتا ہے جو L2 میں دوبارہ ریٹری ایبل ٹکٹ بناتا ہے۔ ٹرانسفر ٹول میں ٹرانزیکشن میں کچھ ایتھیریم ویلیو شامل ہوتی ہے، جس کا استعمال 1) ٹکٹ بنانے کے لیے ادائیگی اور 2) L2 میں ٹکٹ کو انجام دینے کے لیے گیس کی ادائیگی کے لیے کیا جاتا ہے۔ تاہم، چونکہ L2 میں ٹکٹ کے مکمل ہونے کے لیے تیار ہونے تک گیس کی قیمتیں مختلف ہو سکتی ہیں، اس لیے یہ ممکن ہے کہ خودکار طریقے سے عمل درآمد کی یہ کوشش ناکام ہو جائے۔ جب ایسا ہوتا ہے، تو Arbitrum بریج دوبارہ کوشش کے قابل ٹکٹ کو 7 دنوں تک زندہ رکھے گا، اور کوئی بھی ٹکٹ کو "چھڑانے" کی دوبارہ کوشش کر سکتا ہے (جس کے لیے Arbitrum کے لیے کچھ ایتھیریم والے والیٹ کی ضرورت ہوتی ہے)۔ +When you transfer your assets (Subgraph, stake, delegation or curation) to L2, a message is sent through the Arbitrum GRT bridge which creates a retryable ticket in L2. The transfer tool includes some ETH value in the transaction, that is used to 1) pay to create the ticket and 2) pay for the gas to execute the ticket in L2. However, because gas prices might vary in the time until the ticket is ready to execute in L2, it is possible that this auto-execution attempt fails. When that happens, the Arbitrum bridge will keep the retryable ticket alive for up to 7 days, and anyone can retry “redeeming” the ticket (which requires a wallet with some ETH bridged to Arbitrum). -اسے ہم منتقلی کے تمام ٹولز میں "تصدیق" مرحلہ کہتے ہیں - یہ زیادہ تر معاملات میں خود بخود چلے گا، کیونکہ خود کار طریقے سے عمل اکثر کامیاب ہوتا ہے، لیکن یہ ضروری ہے کہ آپ اس بات کو یقینی بنانے کے لیے دوبارہ چیک کریں۔ اگر یہ کامیاب نہیں ہوتا ہے اور 7 دنوں میں کوئی کامیاب کوشش نہیں ہوتی ہے، تو Arbitrum بریج ٹکٹ کو رد کر دے گا، اور آپ کے اثاثے (سب گراف، سٹیک، ڈیلیگیشن یا کیوریشن) ضائع ہو جائیں گے اور بازیافت نہیں ہو سکیں گے۔ گراف کور ڈویلپرز کے پاس ان حالات کا پتہ لگانے کے لیے ایک نگرانی کا نظام موجود ہے اور بہت دیر ہونے سے پہلے ٹکٹوں کو چھڑانے کی کوشش کریں، لیکن یہ یقینی بنانا آپ کی ذمہ داری ہے کہ آپ کی منتقلی بروقت مکمل ہو جائے۔ اگر آپ کو اپنے ٹرانزیکشن کی تصدیق کرنے میں دشواری ہو رہی ہے، تو براہ کرم [اس فارم](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) اور کور ڈویلپرز کا استعمال کرتے ہوئے رابطہ کریں۔ وہاں آپ کی مدد ہو گی. +This is what we call the “Confirm” step in all the transfer tools - it will run automatically in most cases, as the auto-execution is most often successful, but it is important that you check back to make sure it went through. If it doesn’t succeed and there are no successful retries in 7 days, the Arbitrum bridge will discard the ticket, and your assets (Subgraph, stake, delegation or curation) will be lost and can’t be recovered. The Graph core devs have a monitoring system in place to detect these situations and try to redeem the tickets before it’s too late, but it is ultimately your responsibility to ensure your transfer is completed in time. If you’re having trouble confirming your transaction, please reach out using [this form](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) and core devs will be there help you. ### میں نے اپنا ڈیلیگیشن/سٹیک/کیوریشن کی منتقلی شروع کی ہے اور مجھے یقین نہیں ہے کہ آیا یہ L2 تک پہنچا ہے، میں کیسے تصدیق کر سکتا ہوں کہ اسے صحیح طریقے سے منتقل کیا گیا تھا؟ @@ -37,43 +37,43 @@ L2 ٹرانسفر ٹول L1 کو پیغامات بھیجنے کے لیے Arbitru ## سب گراف منتقلی -### میں اپنا سب گراف کیسے منتقل کروں؟ +### How do I transfer my Subgraph? -اپنے سب گراف کو منتقل کرنے کے لیے، آپ کو درج ذیل مراحل کو مکمل کرنے کی ضرورت ہو گی: +To transfer your Subgraph, you will need to complete the following steps: 1. ایتھیریم مین نیٹ پر منتقلی شروع کریں 2. تصدیق کے لیے 20 منٹ انتظار کریں -3. Arbitrum پر سب گراف منتقلی کی تصدیق کریں\* +3. Confirm Subgraph transfer on Arbitrum\* -4. Arbitrum پر سب گراف کی اشاعت مکمل کریں +4. Finish publishing Subgraph on Arbitrum 5. کیوری لنک اپ ڈیٹ کریں (تجویز کردہ) -\*Note that you must confirm the transfer within 7 days otherwise your subgraph may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). +\*Note that you must confirm the transfer within 7 days otherwise your Subgraph may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). ### مجھے اپنی منتقلی کہاں سے شروع کرنی چاہیے؟ -آپ اپنی منتقلی کو [سب گراف سٹوڈیو](https://thegraph.com/studio/)، [ایکسپلورر](https://thegraph.com/explorer) یا کسی بھی سب گراف کی تفصیلات کے پیج سے شروع کر سکتے ہیں۔ منتقلی شروع کرنے کے لیے سب گراف کی تفصیلات کے پیج میں "سب گراف منتقل کریں" بٹن کلک کریں۔ +You can initiate your transfer from the [Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) or any Subgraph details page. Click the "Transfer Subgraph" button in the Subgraph details page to start the transfer. -### میرا سب گراف منتقل ہونے تک مجھے کتنا انتظار کرنا پڑے گا +### How long do I need to wait until my Subgraph is transferred منتقلی کا وقت تقریباً 20 منٹ لگتا ہے۔ Arbitrum بریج کی منتقلی کو خود بخود مکمل کرنے کے لیے پس منظر میں کام کر رہا ہے۔ کچھ معاملات میں، گیس کی قیمتیں بڑھ سکتی ہیں اور آپ کو دوبارہ ٹرانزیکشن کی تصدیق کرنی ہوگی. -### کیا میرا سب گراف L2 میں منتقل کرنے کے بعد بھی قابل دریافت ہو گا؟ +### Will my Subgraph still be discoverable after I transfer it to L2? -آپ کا سب گراف صرف اس نیٹ ورک پر قابل دریافت ہوگا جس پر اسے شائع کیا گیا ہے۔ مثال کے طور پر، اگر آپ کا سب گراف Arbitrum One پر ہے، تو آپ اسے صرف Arbitrum One پر ایکسپلورر میں تلاش کر سکتے ہیں اور اسے ایتھیریم پر تلاش نہیں کر پائیں گے۔ براہ کرم یقینی بنائیں کہ آپ نے نیٹ ورک سوئچر میں پیج کے اوپری حصے میں Arbitrum One کا انتخاب کیا ہے تاکہ یہ یقینی بنایا جا سکے کہ آپ درست نیٹ ورک پر ہیں۔ منتقلی کے بعد، L1 سب گراف فرسودہ کے طور پر ظاہر ہوگا. +Your Subgraph will only be discoverable on the network it is published to. For example, if your Subgraph is on Arbitrum One, then you can only find it in Explorer on Arbitrum One and will not be able to find it on Ethereum. Please ensure that you have Arbitrum One selected in the network switcher at the top of the page to ensure you are on the correct network.  After the transfer, the L1 Subgraph will appear as deprecated. -### کیا میرے سب گراف کو منتقل کرنے کے لیے اسے شائع کرنے کی ضرورت ہے؟ +### Does my Subgraph need to be published to transfer it? -سب گراف ٹرانسفر ٹول سے فائدہ اٹھانے کے لیے، آپ کا سب گراف پہلے سے ہی ایتھیریم مین نیٹ پر شائع ہونا چاہیے اور اس میں کچھ کیوریشن سگنل ہونا چاہیے جو والیٹ کی ملکیت ہے جو سب گراف کا مالک ہے۔ اگر آپ کا سب گراف شائع نہیں ہوا ہے، تو یہ تجویز کیا جاتا ہے کہ آپ براہ راست Arbitrum One پر شائع کریں - متعلقہ گیس کی فیسیں کافی کم ہوں گی۔ اگر آپ شائع شدہ سب گراف کو منتقل کرنا چاہتے ہیں لیکن مالک کے اکاؤنٹ نے اس پر کوئی سگنل کیوریٹ نہیں کیا ہے، تو آپ اس اکاؤنٹ سے ایک چھوٹی رقم (جیسے 1 GRT) کا اشارہ دے سکتے ہیں۔ یقینی بنائیں کہ "خودکار منتقلی" سگنل کا انتخاب کریں. +To take advantage of the Subgraph transfer tool, your Subgraph must be already published to Ethereum mainnet and must have some curation signal owned by the wallet that owns the Subgraph. If your Subgraph is not published, it is recommended you simply publish directly on Arbitrum One - the associated gas fees will be considerably lower. If you want to transfer a published Subgraph but the owner account hasn't curated any signal on it, you can signal a small amount (e.g. 1 GRT) from that account; make sure to choose "auto-migrating" signal. -### میرے سب گراف کے ایتھیریم مین نیٹ ورزن کا کیا ہوتا ہے جب میں Arbitrum میں منتقل ہو جاتا ہوں؟ +### What happens to the Ethereum mainnet version of my Subgraph after I transfer to Arbitrum? -آپ کے سب گراف کو Arbitrum میں منتقل کرنے کے بعد، ایتھیریم مین نیٹ ورزن فرسودہ ہو جائے گا۔ ہمارا مشورہ ہے کہ آپ 48 گھنٹوں کے اندر اپنے کیوری کے لنک کو اپ ڈیٹ کریں۔ تاہم، ایک رعایتی مدت موجود ہے جو آپ کے مین نیٹ لنک کو کام میں لاتی رہتی ہے تاکہ کسی بھی فریق ثالث ڈیپ سپورٹ کو اپ ڈیٹ کیا جا سکے۔ +After transferring your Subgraph to Arbitrum, the Ethereum mainnet version will be deprecated. We recommend you update your query URL within 48 hours. However, there is a grace period in place that keeps your mainnet URL functioning so that any third-party dapp support can be updated. ### میری منتقلی کے بعد، کیا مجھے بھی Arbitrum پر دوبارہ شائع کرنے کی ضرورت ہے؟ @@ -81,21 +81,21 @@ L2 ٹرانسفر ٹول L1 کو پیغامات بھیجنے کے لیے Arbitru ### کیا دوبارہ شائع کرنے کے دوران میرا اینڈ پوائنٹ ڈاؤن ٹائم کا تجربہ کرے گا؟ -اس بات کا امکان نہیں ہے، لیکن اس بات پر منحصر ہے کہ انڈیکسرز L1 پر سب گراف کو سپورٹ کر رہے ہیں اور کیا وہ اس کو انڈیکس کرتے رہیں گے جب تک کہ L2 پر سب گراف مکمل طور پر سپورٹ نہ ہو جائے، مختصر وقت کا تجربہ کرنا ممکن ہے۔ +It is unlikely, but possible to experience a brief downtime depending on which Indexers are supporting the Subgraph on L1 and whether they keep indexing it until the Subgraph is fully supported on L2. ### کیا L2 پر اشاعت اور ورزن ایتھیریم مین نیٹ کی طرح ہے؟ -جی ہاں. سب گراف سٹوڈیو میں شائع کرتے وقت اپنے شائع شدہ نیٹ ورک کے طور پر Arbitrum One کو منتخب کریں۔ سٹوڈیو میں، تازہ ترین اختتامی نقطہ دستیاب ہوگا جو سب گراف کے تازہ ترین اپ ڈیٹ شدہ ورژن کی طرف اشارہ کرتا ہے۔ +Yes. Select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the Subgraph. -### کیا میرے سب گراف کا کیوریشن میرے سب گراف کے ساتھ منتقل ہو جائے گا؟ +### Will my Subgraph's curation move with my Subgraph? -اگر آپ نے خودکار منتقلی کے سگنل کا انتخاب کیا ہے، تو آپ کی اپنی کیوریشن کا 100% حصہ آپ کے سب گراف کے ساتھ Arbitrum One میں منتقل ہو جائے گا۔ منتقلی کے وقت سب گراف کے تمام کیوریشن سگنل کو GRT میں تبدیل کر دیا جائے گا، اور آپ کے کیوریشن سگنل کے مطابق GRT L2 سب گراف پر سگنل منٹ کے لیے استعمال کیا جائے گا. +If you've chosen auto-migrating signal, 100% of your own curation will move with your Subgraph to Arbitrum One. All of the Subgraph's curation signal will be converted to GRT at the time of the transfer, and the GRT corresponding to your curation signal will be used to mint signal on the L2 Subgraph. -دوسرے کیوریٹرز یہ انتخاب کر سکتے ہیں کہ آیا GRT کا اپنا حصہ واپس لینا ہے، یا اسی سب گراف پر اسے L2 پر منٹ سگنل پر منتقل کرنا ہے۔ +Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same Subgraph. -### کیا میں منتقلی کے بعد اپنے سب گراف کو واپس ایتھیریم مین نیٹ پر منتقل کر سکتا ہوں؟ +### Can I move my Subgraph back to Ethereum mainnet after I transfer? -ایک بار منتقل ہونے کے بعد، اس سب گراف کا آپ کا ایتھیریم مین نیٹ ورزن فرسودہ ہو جائے گا۔ اگر آپ مین نیٹ پر واپس جانا چاہتے ہیں، تو آپ کو مین نیٹ پر دوبارہ تعینات اور شائع کرنے کی ضرورت ہوگی۔ تاہم، ایتھیریم مین نیٹ پر واپس منتقلی کی سختی سے حوصلہ شکنی کی جاتی ہے کیونکہ انڈیکسنگ انعامات بالآخر Arbitrum One پر مکمل طور پر تقسیم کیے جائیں گے. +Once transferred, your Ethereum mainnet version of this Subgraph will be deprecated. If you would like to move back to mainnet, you will need to redeploy and publish back to mainnet. However, transferring back to Ethereum mainnet is strongly discouraged as indexing rewards will eventually be distributed entirely on Arbitrum One. ### مجھے اپنی منتقلی مکمل کرنے کے لیے پریجڈ ایتھیریم کی ضرورت کیوں ہے؟ @@ -207,19 +207,19 @@ If you'd like to release GRT from the vesting contract, you can transfer them ba \*اگر ضروری ہو تو - یعنی آپ کنٹریکٹ ایڈریس استعمال کر رہے ہیں. -### مجھے کیسے پتہ چلے گا کہ میں نے جو سب گراف تیار کیا ہے وہ L2 میں چلا گیا ہے؟ +### How will I know if the Subgraph I curated has moved to L2? -سب گراف کی تفصیلات کا پیج دیکھتے وقت، ایک بینر آپ کو مطلع کرے گا کہ اس سب گراف کو منتقل کر دیا گیا ہے۔ آپ اپنے کیوریشن کو منتقل کرنے کے لیے پرامپٹ پر عمل کر سکتے ہیں۔ آپ یہ معلومات کسی بھی سب گراف کے سب گراف کی تفصیلات کے پیج پر بھی حاصل کر سکتے ہیں جو منتقل ہوا ہے۔ +When viewing the Subgraph details page, a banner will notify you that this Subgraph has been transferred. You can follow the prompt to transfer your curation. You can also find this information on the Subgraph details page of any Subgraph that has moved. ### اگر میں اپنے کیوریشن کو L2 میں منتقل نہیں کرنا چاہتا تو کیا ہو گا؟ -جب سب گراف فرسودہ ہو جاتا ہے تو آپ کے پاس اپنا سگنل واپس لینے کا اختیار ہوتا ہے۔ اسی طرح، اگر کوئی سب گراف L2 میں منتقل ہو گیا ہے، تو آپ ایتھیریم مین نیٹ میں اپنے سگنل کو واپس لینے یا L2 کو سگنل بھیجنے کا انتخاب کر سکتے ہیں. +When a Subgraph is deprecated you have the option to withdraw your signal. Similarly, if a Subgraph has moved to L2, you can choose to withdraw your signal in Ethereum mainnet or send the signal to L2. ### میں کیسے جان سکتا ہوں کہ میری کیوریشن کامیابی سے منتقل ہو گئی ہے؟ L2 ٹرانسفر ٹول شروع ہونے کے تقریباً 20 منٹ بعد سگنل کی تفصیلات ایکسپلورر کے ذریعے قابل رسائی ہوں گی. -### کیا میں ایک وقت میں ایک سے زیادہ سب گراف پر اپنی کیوریشن منتقل کر سکتا ہوں؟ +### Can I transfer my curation on more than one Subgraph at a time? اس وقت بلک ٹرانسفر کا کوئی آپشن نہیں ہے. @@ -267,7 +267,7 @@ L2 ٹرانسفر ٹول کو آپ کے سٹیک کی منتقلی مکمل ک ### کیا مجھے اپنا سٹیک منتقل کرنے سے پہلے Arbitrum پر انڈیکس کرنا ہو گا؟ -آپ انڈیکسنگ کو ترتیب دینے سے پہلے مؤثر طریقے سے اپنا سٹیک منتقل کر سکتے ہیں، لیکن آپ L2 پر کسی بھی انعام کا دعویٰ نہیں کر سکیں گے جب تک کہ آپ L2 پر سب گرافس کے لیے مختص نہیں کر دیتے، ان کو انڈیکس نہیں کرتے اور POIs پیش نہیں کرتے. +You can effectively transfer your stake first before setting up indexing, but you will not be able to claim any rewards on L2 until you allocate to Subgraphs on L2, index them, and present POIs. ### کیا ڈیلیگیٹرز اپنے ڈیلیگیشن کو منتقل کر سکتے ہیں اس سے پہلے کہ میں اپنے انڈیکسنگ کا سٹیک منتقل کروں؟ From 32d91940d357c8590a313541992183eccf1663a7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:13:37 -0500 Subject: [PATCH 0312/1789] New translations l2-transfer-tools-faq.mdx (Vietnamese) --- .../arbitrum/l2-transfer-tools-faq.mdx | 54 +++++++++---------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/website/src/pages/vi/archived/arbitrum/l2-transfer-tools-faq.mdx b/website/src/pages/vi/archived/arbitrum/l2-transfer-tools-faq.mdx index cbc7b6346f33..2fec5116ce58 100644 --- a/website/src/pages/vi/archived/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/src/pages/vi/archived/arbitrum/l2-transfer-tools-faq.mdx @@ -24,9 +24,9 @@ The exception is with smart contract wallets like multisigs: these are smart con The L2 Transfer Tools use Arbitrum’s native mechanism to send messages from L1 to L2. This mechanism is called a “retryable ticket” and is used by all native token bridges, including the Arbitrum GRT bridge. You can read more about retryable tickets in the [Arbitrum docs](https://docs.arbitrum.io/arbos/l1-to-l2-messaging). -When you transfer your assets (subgraph, stake, delegation or curation) to L2, a message is sent through the Arbitrum GRT bridge which creates a retryable ticket in L2. The transfer tool includes some ETH value in the transaction, that is used to 1) pay to create the ticket and 2) pay for the gas to execute the ticket in L2. However, because gas prices might vary in the time until the ticket is ready to execute in L2, it is possible that this auto-execution attempt fails. When that happens, the Arbitrum bridge will keep the retryable ticket alive for up to 7 days, and anyone can retry “redeeming” the ticket (which requires a wallet with some ETH bridged to Arbitrum). +When you transfer your assets (Subgraph, stake, delegation or curation) to L2, a message is sent through the Arbitrum GRT bridge which creates a retryable ticket in L2. The transfer tool includes some ETH value in the transaction, that is used to 1) pay to create the ticket and 2) pay for the gas to execute the ticket in L2. However, because gas prices might vary in the time until the ticket is ready to execute in L2, it is possible that this auto-execution attempt fails. When that happens, the Arbitrum bridge will keep the retryable ticket alive for up to 7 days, and anyone can retry “redeeming” the ticket (which requires a wallet with some ETH bridged to Arbitrum). -This is what we call the “Confirm” step in all the transfer tools - it will run automatically in most cases, as the auto-execution is most often successful, but it is important that you check back to make sure it went through. If it doesn’t succeed and there are no successful retries in 7 days, the Arbitrum bridge will discard the ticket, and your assets (subgraph, stake, delegation or curation) will be lost and can’t be recovered. The Graph core devs have a monitoring system in place to detect these situations and try to redeem the tickets before it’s too late, but it is ultimately your responsibility to ensure your transfer is completed in time. If you’re having trouble confirming your transaction, please reach out using [this form](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) and core devs will be there help you. +This is what we call the “Confirm” step in all the transfer tools - it will run automatically in most cases, as the auto-execution is most often successful, but it is important that you check back to make sure it went through. If it doesn’t succeed and there are no successful retries in 7 days, the Arbitrum bridge will discard the ticket, and your assets (Subgraph, stake, delegation or curation) will be lost and can’t be recovered. The Graph core devs have a monitoring system in place to detect these situations and try to redeem the tickets before it’s too late, but it is ultimately your responsibility to ensure your transfer is completed in time. If you’re having trouble confirming your transaction, please reach out using [this form](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) and core devs will be there help you. ### I started my delegation/stake/curation transfer and I'm not sure if it made it through to L2, how can I confirm that it was transferred correctly? @@ -36,43 +36,43 @@ If you have the L1 transaction hash (which you can find by looking at the recent ## Subgraph Transfer -### How do I transfer my subgraph? +### How do I transfer my Subgraph? -To transfer your subgraph, you will need to complete the following steps: +To transfer your Subgraph, you will need to complete the following steps: 1. Initiate the transfer on Ethereum mainnet 2. Wait 20 minutes for confirmation -3. Confirm subgraph transfer on Arbitrum\* +3. Confirm Subgraph transfer on Arbitrum\* -4. Finish publishing subgraph on Arbitrum +4. Finish publishing Subgraph on Arbitrum 5. Update Query URL (recommended) -\*Note that you must confirm the transfer within 7 days otherwise your subgraph may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). +\*Note that you must confirm the transfer within 7 days otherwise your Subgraph may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). ### Where should I initiate my transfer from? -You can initiate your transfer from the [Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) or any subgraph details page. Click the "Transfer Subgraph" button in the subgraph details page to start the transfer. +You can initiate your transfer from the [Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) or any Subgraph details page. Click the "Transfer Subgraph" button in the Subgraph details page to start the transfer. -### How long do I need to wait until my subgraph is transferred +### How long do I need to wait until my Subgraph is transferred The transfer time takes approximately 20 minutes. The Arbitrum bridge is working in the background to complete the bridge transfer automatically. In some cases, gas costs may spike and you will need to confirm the transaction again. -### Will my subgraph still be discoverable after I transfer it to L2? +### Will my Subgraph still be discoverable after I transfer it to L2? -Your subgraph will only be discoverable on the network it is published to. For example, if your subgraph is on Arbitrum One, then you can only find it in Explorer on Arbitrum One and will not be able to find it on Ethereum. Please ensure that you have Arbitrum One selected in the network switcher at the top of the page to ensure you are on the correct network.  After the transfer, the L1 subgraph will appear as deprecated. +Your Subgraph will only be discoverable on the network it is published to. For example, if your Subgraph is on Arbitrum One, then you can only find it in Explorer on Arbitrum One and will not be able to find it on Ethereum. Please ensure that you have Arbitrum One selected in the network switcher at the top of the page to ensure you are on the correct network.  After the transfer, the L1 Subgraph will appear as deprecated. -### Does my subgraph need to be published to transfer it? +### Does my Subgraph need to be published to transfer it? -To take advantage of the subgraph transfer tool, your subgraph must be already published to Ethereum mainnet and must have some curation signal owned by the wallet that owns the subgraph. If your subgraph is not published, it is recommended you simply publish directly on Arbitrum One - the associated gas fees will be considerably lower. If you want to transfer a published subgraph but the owner account hasn't curated any signal on it, you can signal a small amount (e.g. 1 GRT) from that account; make sure to choose "auto-migrating" signal. +To take advantage of the Subgraph transfer tool, your Subgraph must be already published to Ethereum mainnet and must have some curation signal owned by the wallet that owns the Subgraph. If your Subgraph is not published, it is recommended you simply publish directly on Arbitrum One - the associated gas fees will be considerably lower. If you want to transfer a published Subgraph but the owner account hasn't curated any signal on it, you can signal a small amount (e.g. 1 GRT) from that account; make sure to choose "auto-migrating" signal. -### What happens to the Ethereum mainnet version of my subgraph after I transfer to Arbitrum? +### What happens to the Ethereum mainnet version of my Subgraph after I transfer to Arbitrum? -After transferring your subgraph to Arbitrum, the Ethereum mainnet version will be deprecated. We recommend you update your query URL within 48 hours. However, there is a grace period in place that keeps your mainnet URL functioning so that any third-party dapp support can be updated. +After transferring your Subgraph to Arbitrum, the Ethereum mainnet version will be deprecated. We recommend you update your query URL within 48 hours. However, there is a grace period in place that keeps your mainnet URL functioning so that any third-party dapp support can be updated. ### After I transfer, do I also need to re-publish on Arbitrum? @@ -80,21 +80,21 @@ After the 20 minute transfer window, you will need to confirm the transfer with ### Will my endpoint experience downtime while re-publishing? -It is unlikely, but possible to experience a brief downtime depending on which Indexers are supporting the subgraph on L1 and whether they keep indexing it until the subgraph is fully supported on L2. +It is unlikely, but possible to experience a brief downtime depending on which Indexers are supporting the Subgraph on L1 and whether they keep indexing it until the Subgraph is fully supported on L2. ### Is publishing and versioning the same on L2 as Ethereum Ethereum mainnet? -Yes. Select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the subgraph. +Yes. Select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the Subgraph. -### Will my subgraph's curation move with my subgraph? +### Will my Subgraph's curation move with my Subgraph? -If you've chosen auto-migrating signal, 100% of your own curation will move with your subgraph to Arbitrum One. All of the subgraph's curation signal will be converted to GRT at the time of the transfer, and the GRT corresponding to your curation signal will be used to mint signal on the L2 subgraph. +If you've chosen auto-migrating signal, 100% of your own curation will move with your Subgraph to Arbitrum One. All of the Subgraph's curation signal will be converted to GRT at the time of the transfer, and the GRT corresponding to your curation signal will be used to mint signal on the L2 Subgraph. -Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same subgraph. +Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same Subgraph. -### Can I move my subgraph back to Ethereum mainnet after I transfer? +### Can I move my Subgraph back to Ethereum mainnet after I transfer? -Once transferred, your Ethereum mainnet version of this subgraph will be deprecated. If you would like to move back to mainnet, you will need to redeploy and publish back to mainnet. However, transferring back to Ethereum mainnet is strongly discouraged as indexing rewards will eventually be distributed entirely on Arbitrum One. +Once transferred, your Ethereum mainnet version of this Subgraph will be deprecated. If you would like to move back to mainnet, you will need to redeploy and publish back to mainnet. However, transferring back to Ethereum mainnet is strongly discouraged as indexing rewards will eventually be distributed entirely on Arbitrum One. ### Why do I need bridged ETH to complete my transfer? @@ -206,19 +206,19 @@ To transfer your curation, you will need to complete the following steps: \*If necessary - i.e. you are using a contract address. -### How will I know if the subgraph I curated has moved to L2? +### How will I know if the Subgraph I curated has moved to L2? -When viewing the subgraph details page, a banner will notify you that this subgraph has been transferred. You can follow the prompt to transfer your curation. You can also find this information on the subgraph details page of any subgraph that has moved. +When viewing the Subgraph details page, a banner will notify you that this Subgraph has been transferred. You can follow the prompt to transfer your curation. You can also find this information on the Subgraph details page of any Subgraph that has moved. ### What if I do not wish to move my curation to L2? -When a subgraph is deprecated you have the option to withdraw your signal. Similarly, if a subgraph has moved to L2, you can choose to withdraw your signal in Ethereum mainnet or send the signal to L2. +When a Subgraph is deprecated you have the option to withdraw your signal. Similarly, if a Subgraph has moved to L2, you can choose to withdraw your signal in Ethereum mainnet or send the signal to L2. ### How do I know my curation successfully transferred? Signal details will be accessible via Explorer approximately 20 minutes after the L2 transfer tool is initiated. -### Can I transfer my curation on more than one subgraph at a time? +### Can I transfer my curation on more than one Subgraph at a time? There is no bulk transfer option at this time. @@ -266,7 +266,7 @@ It will take approximately 20 minutes for the L2 transfer tool to complete trans ### Do I have to index on Arbitrum before I transfer my stake? -You can effectively transfer your stake first before setting up indexing, but you will not be able to claim any rewards on L2 until you allocate to subgraphs on L2, index them, and present POIs. +You can effectively transfer your stake first before setting up indexing, but you will not be able to claim any rewards on L2 until you allocate to Subgraphs on L2, index them, and present POIs. ### Can Delegators move their delegation before I move my indexing stake? From 715672b24085bd8607b51a60a84bcd3e0c7b01e3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:13:38 -0500 Subject: [PATCH 0313/1789] New translations l2-transfer-tools-faq.mdx (Marathi) --- .../arbitrum/l2-transfer-tools-faq.mdx | 54 +++++++++---------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/website/src/pages/mr/archived/arbitrum/l2-transfer-tools-faq.mdx b/website/src/pages/mr/archived/arbitrum/l2-transfer-tools-faq.mdx index b6ee08a5bbed..696f3c69a4fc 100644 --- a/website/src/pages/mr/archived/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/src/pages/mr/archived/arbitrum/l2-transfer-tools-faq.mdx @@ -24,9 +24,9 @@ The exception is with smart contract wallets like multisigs: these are smart con L2 ट्रांस्फर टूल्स आपल्याला L1 वरून L2ला संदेश पाठविण्याच्या अर्बिट्रमच्या स्वभाविक विधानाचा वापर करतात. हा विधान "पुनः प्रयासयोग्य पर्याय" म्हणून ओळखला जातो आणि हा सर्व स्थानिक टोकन ब्रिजेस, अर्बिट्रम GRT ब्रिज यासह सहाय्यक आहे. आपण पुनः प्रयासयोग्य पर्यायांबद्दल अधिक माहिती [Arbitrum docs](https://docs.arbitrum.io/arbos/l1-to-l2-messaging) वाचू शकता. -आपल्याला आपल्या संपत्तींच्या (सबग्राफ, स्टेक, प्रतिनिधित्व किंवा पुरवणी) L2ला स्थानांतरित केल्यास, एक संदेश अर्बिट्रम GRT ब्रिजमध्ये पाठविला जातो ज्याने L2वर पुनः प्रयासयोग्य पर्याय तयार करतो. स्थानांतरण उपकरणात्रूटील वैल्यूत्या किंवा संचलनसाठी काही ईटीएच वॅल्यू आहे, ज्यामुळे 1) पर्याय तयार करण्यासाठी पैसे देणे आणि 2) L2मध्ये पर्याय संचालित करण्यासाठी गॅस देणे ह्याचा वापर केला जातो. परंतु, पर्याय संचालनाच्या काळात गॅसची किंमते वेळेत बदलू शकतात, ज्यामुळे ही स्वयंप्रयत्न किंवा संचालन प्रयत्न अपयशी होऊ शकतात. जेव्हा ती प्रक्रिया अपयशी होते, तेव्हा अर्बिट्रम ब्रिज किंवा 7 दिवसापर्यंत पुन्हा प्रयत्न करण्याची क्षमता आहे, आणि कोणत्याही व्यक्ती त्या "पुनर्मिलन" पर्यायाचा प्रयत्न करू शकतो (त्यासाठी अर्बिट्रमवर काही ईटीएच स्थानांतरित केलेले असणे आवश्यक आहे). +When you transfer your assets (Subgraph, stake, delegation or curation) to L2, a message is sent through the Arbitrum GRT bridge which creates a retryable ticket in L2. The transfer tool includes some ETH value in the transaction, that is used to 1) pay to create the ticket and 2) pay for the gas to execute the ticket in L2. However, because gas prices might vary in the time until the ticket is ready to execute in L2, it is possible that this auto-execution attempt fails. When that happens, the Arbitrum bridge will keep the retryable ticket alive for up to 7 days, and anyone can retry “redeeming” the ticket (which requires a wallet with some ETH bridged to Arbitrum). -ही आपल्याला सगळ्या स्थानांतरण उपकरणांमध्ये "पुष्टीकरण" चरण म्हणून ओळखता - आपल्याला अधिकांशपेक्षा अधिक आपल्याला स्वयंप्रयत्न सध्याच्या वेळेत स्वयंप्रयत्न सध्याच्या वेळेत स्वतः संचालित होईल, परंतु आपल्याला येते कि ते दिले आहे ह्याची तपासणी करणे महत्वपूर्ण आहे. आपल्याला किंवा 7 दिवसात कोणत्याही सफल पुनर्मिलनाचे प्रयत्न केले त्यामुळे प्रयत्नशील नसत्या आणि त्या 7 दिवसांत कोणताही प्रयत्न नसत्याने, अर्बिट्रम ब्रिजने पुनर्मिलन पर्यायाचा त्याग केला आहे, आणि आपली संपत्ती (सबग्राफ, स्टेक, प्रतिनिधित्व किंवा पुरवणी) वेळेत विचली जाईल आणि पुनर्प्राप्त केली जाऊ शकणार नाही. ग्राफचे मुख्य डेव्हलपर्सन्सने या परिस्थितियांच्या जाणीवपणे प्राणीसमूह ठरविले आहे आणि त्याच्या अगोदर पुनर्मिलन केले जाईल, परंतु याच्यातून, आपल्याला आपल्या स्थानांतरणाची पूर्ण करण्याची जबाबदारी आहे. आपल्याला आपल्या व्यवहाराची पुष्टी करण्यात किंवा संचालनाची समस्या आहे का, कृपया [this form](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) वापरून संपूर्ण डेव्हलपर्सन्सची मदत करण्याची क्षमता आहे. +This is what we call the “Confirm” step in all the transfer tools - it will run automatically in most cases, as the auto-execution is most often successful, but it is important that you check back to make sure it went through. If it doesn’t succeed and there are no successful retries in 7 days, the Arbitrum bridge will discard the ticket, and your assets (Subgraph, stake, delegation or curation) will be lost and can’t be recovered. The Graph core devs have a monitoring system in place to detect these situations and try to redeem the tickets before it’s too late, but it is ultimately your responsibility to ensure your transfer is completed in time. If you’re having trouble confirming your transaction, please reach out using [this form](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) and core devs will be there help you. ### I started my delegation/stake/curation transfer and I'm not sure if it made it through to L2, how can I confirm that it was transferred correctly? @@ -36,43 +36,43 @@ If you have the L1 transaction hash (which you can find by looking at the recent ## सबग्राफ हस्तांतरण -### मी माझा सबग्राफ कसा हस्तांतरित करू? +### How do I transfer my Subgraph? -तुमचा सबग्राफ हस्तांतरित करण्यासाठी, तुम्हाला खालील चरण पूर्ण करावे लागतील: +To transfer your Subgraph, you will need to complete the following steps: 1. Ethereum mainnet वर हस्तांतरण सुरू करा 2. पुष्टीकरणासाठी 20 मिनिटे प्रतीक्षा करा -3. आर्बिट्रमवर सबग्राफ हस्तांतरणाची पुष्टी करा\* +3. Confirm Subgraph transfer on Arbitrum\* -4. आर्बिट्रम वर सबग्राफ प्रकाशित करणे समाप्त करा +4. Finish publishing Subgraph on Arbitrum 5. क्वेरी URL अपडेट करा (शिफारस केलेले) -\*Note that you must confirm the transfer within 7 days otherwise your subgraph may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). +\*Note that you must confirm the transfer within 7 days otherwise your Subgraph may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). ### मी माझे हस्तांतरण कोठून सुरू करावे? -आपल्याला स्थानांतरण सुरू करण्याची क्षमता आहे Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) किंवा कोणत्याही सबग्राफ तपशील पृष्ठापासून सुरू करू शकता. सबग्राफ तपशील पृष्ठावर "सबग्राफ स्थानांतरित करा" बटणवर क्लिक करा आणि स्थानांतरण सुरू करा. +You can initiate your transfer from the [Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) or any Subgraph details page. Click the "Transfer Subgraph" button in the Subgraph details page to start the transfer. -### माझा सबग्राफ हस्तांतरित होईपर्यंत मला किती वेळ प्रतीक्षा करावी लागेल +### How long do I need to wait until my Subgraph is transferred स्थानांतरणासाठी किंमतीतून प्रायः 20 मिनिटे लागतात. आर्बिट्रम ब्रिज आपल्याला स्वत: स्थानांतरण स्वयंप्रयत्नातून पूर्ण करण्यासाठी पारंपारिकपणे काम करत आहे. कितीतरी प्रकारांत स्थानांतरण केल्यास, गॅस किंमती वाढू शकतात आणि आपल्याला परिपुष्टीकरण पुन्हा करण्याची आवश्यकता लागू शकते. -### मी L2 मध्ये हस्तांतरित केल्यानंतर माझा सबग्राफ अजूनही शोधण्यायोग्य असेल का? +### Will my Subgraph still be discoverable after I transfer it to L2? -आपला सबग्राफ केवळ त्या नेटवर्कवर शोधन्यायला येतो, ज्यावर तो प्रकाशित केला जातो. उदाहरणार्थ, आपला सबग्राफ आर्बिट्रम वनवर आहे तर आपल्याला तो केवळ आर्बिट्रम वनवरच्या एक्सप्लोररमध्ये शोधू शकता आणि आपल्याला इथे एथेरियमवर शोधायला सक्षम नसेल. कृपया पृष्ठाच्या वरील नेटवर्क स्विचरमध्ये आर्बिट्रम वन निवडल्याची आपल्याला कसे सुनिश्चित करण्याची आवश्यकता आहे. स्थानांतरणानंतर, L1 सबग्राफ विकलप म्हणून दिसणारा. +Your Subgraph will only be discoverable on the network it is published to. For example, if your Subgraph is on Arbitrum One, then you can only find it in Explorer on Arbitrum One and will not be able to find it on Ethereum. Please ensure that you have Arbitrum One selected in the network switcher at the top of the page to ensure you are on the correct network.  After the transfer, the L1 Subgraph will appear as deprecated. -### माझा सबग्राफ हस्तांतरित करण्यासाठी प्रकाशित करणे आवश्यक आहे का? +### Does my Subgraph need to be published to transfer it? -सबग्राफ स्थानांतरण उपकरणाचा लाभ घेण्यासाठी, आपल्याला आपल्या सबग्राफला आधीच प्रकाशित केलेला पाहिजे आणि त्याच्या सबग्राफच्या मालक वॉलेटमध्ये काही परिपुष्टी संकेत असणे आवश्यक आहे. आपला सबग्राफ प्रकाशित नसल्यास, आपल्याला साधारणपणे आर्बिट्रम वनवर सीधे प्रकाशित करण्यात योग्य आहे - संबंधित गॅस फीस खूपच किमान असतील. आपल्याला प्रकाशित सबग्राफ स्थानांतरित करू इच्छित असल्यास, परंतु मालक खाते त्यावर कोणतीही प्रतिसाद संकेत दिली नाही, तर आपण त्या खाते पासून थोडीसी परिपुष्टी (उदा. 1 GRT) संकेतिक करू शकता; कृपया "स्वत: स्थानांतरित होणारी" संकेत निवडायला नक्की करा. +To take advantage of the Subgraph transfer tool, your Subgraph must be already published to Ethereum mainnet and must have some curation signal owned by the wallet that owns the Subgraph. If your Subgraph is not published, it is recommended you simply publish directly on Arbitrum One - the associated gas fees will be considerably lower. If you want to transfer a published Subgraph but the owner account hasn't curated any signal on it, you can signal a small amount (e.g. 1 GRT) from that account; make sure to choose "auto-migrating" signal. -### माझ्या सबग्राफच्या इथेरियम मुख्य नेटवर्कचा संस्करण हस्तांतरित करताना Arbitrum वर काय होतं? +### What happens to the Ethereum mainnet version of my Subgraph after I transfer to Arbitrum? -आर्बिट्रमकडे आपल्या सबग्राफ स्थानांतरित करण्यानंतर, एथेरियम मुख्यनेट आवृत्ती विकलप म्हणून दिली जाईल. आपल्याला आपल्या क्वेरी URL वरील बदल करण्याची सल्ला आहे की त्याच्या 48 तासांत दिला जाईल. हेरंब विलंबप्रदान केलेले आहे ज्यामुळे आपली मुख्यनेट URL सक्रिय ठेवली जाईल आणि कोणत्याही तृतीय पक्षाच्या dapp समर्थनाच्या आधी अद्यतनित केल्या जाऊ शकतात. +After transferring your Subgraph to Arbitrum, the Ethereum mainnet version will be deprecated. We recommend you update your query URL within 48 hours. However, there is a grace period in place that keeps your mainnet URL functioning so that any third-party dapp support can be updated. ### मी हस्तांतरित केल्यानंतर, मला आर्बिट्रमवर पुन्हा प्रकाशित करण्याची देखील आवश्यकता आहे का? @@ -80,21 +80,21 @@ If you have the L1 transaction hash (which you can find by looking at the recent ### Will my endpoint experience downtime while re-publishing? -It is unlikely, but possible to experience a brief downtime depending on which Indexers are supporting the subgraph on L1 and whether they keep indexing it until the subgraph is fully supported on L2. +It is unlikely, but possible to experience a brief downtime depending on which Indexers are supporting the Subgraph on L1 and whether they keep indexing it until the Subgraph is fully supported on L2. ### एल2 व Ethereum मुख्य नेटवर्कवर प्रकाशन आणि संस्करणदेखील सारखं आहे का? -Yes. Select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the subgraph. +Yes. Select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the Subgraph. -### पुन्हा प्रकाशित करताना माझ्या एंडपॉईंटला डाउन-टाइम असेल का? +### Will my Subgraph's curation move with my Subgraph? -आपण "स्वत: स्थानांतरित होणारी" संकेत निवडल्यास, आपल्या आपल्या स्वत: स्थानांतरित करणार्या सबग्राफसह 100% आपल्या पुरवणीने निवडलेल्या स्थानांतरण होईल. सबग्राफच्या सर्व स्थानांतरण संकेताच्या स्थानांतरणाच्या क्षणी जीआरटीत रूपांतरित केली जाईल, आणि आपल्या पुरवणीसंकेताशी संबंधित जीआरटी आपल्याला L2 सबग्राफवर संकेत वितरित करण्यासाठी वापरली जाईल. +If you've chosen auto-migrating signal, 100% of your own curation will move with your Subgraph to Arbitrum One. All of the Subgraph's curation signal will be converted to GRT at the time of the transfer, and the GRT corresponding to your curation signal will be used to mint signal on the L2 Subgraph. -इतर क्युरेटर्सनी त्याच्या भागाची GRT वापरून घेण्याची किंवा त्याच्या सबग्राफवर सिग्नल मिंट करण्यासाठी त्याची GRT L2वर हस्तांतरित करण्याची परवानगी आहे. +Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same Subgraph. -### तुम्ही आपले सबग्राफ L2 वर हस्तांतरित केल्यानंतर पुन्हा Ethereum मुख्य नेटवर्कवर परत करू शकता का? +### Can I move my Subgraph back to Ethereum mainnet after I transfer? -स्थानांतरित केल्यानंतर, आपल्या आर्बिट्रम वनवरच्या सबग्राफची एथेरियम मुख्यनेट आवृत्ती विकलप म्हणून दिली जाईल. आपल्याला मुख्यनेटवर परत जाण्याची इच्छा आहे किंवा, आपल्याला मुख्यनेटवर परत जाण्याची इच्छा आहे तर आपल्याला पुन्हा डिप्लॉय आणि प्रकाशित करण्याची आवश्यकता आहे. परंतु आर्बिट्रम वनवर परत गेल्याच्या बदलाच्या दिल्लाला मुख्यनेटवरील सूचना पूर्णपणे त्यात दिलेली आहे. +Once transferred, your Ethereum mainnet version of this Subgraph will be deprecated. If you would like to move back to mainnet, you will need to redeploy and publish back to mainnet. However, transferring back to Ethereum mainnet is strongly discouraged as indexing rewards will eventually be distributed entirely on Arbitrum One. ### माझे हस्तांतरण पूर्ण करण्यासाठी मला ब्रिज्ड ETH का आवश्यक आहे? @@ -206,19 +206,19 @@ The tokens that are being undelegated are "locked" and therefore cannot be trans \*आवश्यक असल्यास - उदा. तुम्ही एक कॉन्ट्रॅक्ट पत्ता वापरत आहात. -### मी क्युरेट केलेला सबग्राफ L2 वर गेला असल्यास मला कसे कळेल? +### How will I know if the Subgraph I curated has moved to L2? -सबग्राफ तपशील पृष्ठाची पाहणी केल्यास, एक बॅनर आपल्याला सूचित करेल की हा सबग्राफ स्थानांतरित केलेला आहे. आपल्याला सुचवल्यास, आपल्या पुरवणीचे स्थानांतरण करण्यासाठी प्रॉम्प्ट अनुसरण करू शकता. आपल्याला ह्या माहितीला सापडण्याची किंवा स्थानांतरित केलेल्या कोणत्याही सबग्राफच्या तपशील पृष्ठावर मिळवू शकता. +When viewing the Subgraph details page, a banner will notify you that this Subgraph has been transferred. You can follow the prompt to transfer your curation. You can also find this information on the Subgraph details page of any Subgraph that has moved. ### मी माझे क्युरेशन L2 वर हलवू इच्छित नसल्यास काय करावे? -कोणत्याही सबग्राफला प्राकृतिक रितीने प्रतिसादित केल्यानंतर, आपल्याला आपल्या सिग्नलला वापरून घेण्याची पर्वाह आहे. तसेच, आपल्याला जर सबग्राफ L2 वर हस्तांतरित केलेला असेल तर, आपल्याला आपल्या सिग्नलला ईथेरियम मेननेटवरून वापरून घेण्याची किंवा L2 वर सिग्नल पाठवण्याची पर्वाह आहे. +When a Subgraph is deprecated you have the option to withdraw your signal. Similarly, if a Subgraph has moved to L2, you can choose to withdraw your signal in Ethereum mainnet or send the signal to L2. ### माझे क्युरेशन यशस्वीरित्या हस्तांतरित झाले हे मला कसे कळेल? L2 हस्तांतरण साधन सुरू केल्यानंतर, संकेत तपशील २० मिनिटांनंतर Explorer मध्ये पहिल्या दिशेने प्रवेशक्षम होईल. -### किंवा तुम्ही एकापेक्षा अधिक सबग्राफवर एकावेळी आपल्या कुरेशनची हस्तांतरण करू शकता का? +### Can I transfer my curation on more than one Subgraph at a time? यावेळी मोठ्या प्रमाणात हस्तांतरण पर्याय नाही. @@ -266,7 +266,7 @@ L2 स्थानांतरण उपकरणाने आपल्याच ### माझ्या शेअर्स हस्तांतरित करण्यापूर्वी मला Arbitrum वर सूचीबद्ध करण्याची आवश्यकता आहे का? -आपल्याला स्वारूपण ठरविण्यापूर्वीच आपले स्टेक प्रभावीपणे स्थानांतरित करू शकता, परंतु L2 वर कोणत्या उत्पादनाची मागणी करण्याची अनुमती नसेल तोंद, ते लागू करण्यास आपल्याला L2 वरील सबग्राफ्सला आवंटन देण्याची, त्यांची सूचीबद्धीकरण करण्याची आणि POIs प्रस्तुत करण्याची आवश्यकता आहे, ते तुम्ही L2 वर कोणत्याही प्रामोड पावण्याच्या पर्यायी नसेल. +You can effectively transfer your stake first before setting up indexing, but you will not be able to claim any rewards on L2 until you allocate to Subgraphs on L2, index them, and present POIs. ### मी माझा इंडेक्सिंग स्टेक हलवण्यापूर्वी प्रतिनिधी त्यांचे प्रतिनिधी हलवू शकतात का? From d5988a271589e49b9c87302be0d8163bf0670bae Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:13:40 -0500 Subject: [PATCH 0314/1789] New translations l2-transfer-tools-faq.mdx (Hindi) --- .../arbitrum/l2-transfer-tools-faq.mdx | 54 +++++++++---------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/website/src/pages/hi/archived/arbitrum/l2-transfer-tools-faq.mdx b/website/src/pages/hi/archived/arbitrum/l2-transfer-tools-faq.mdx index 66574cb53dd4..29dad7f06897 100644 --- a/website/src/pages/hi/archived/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/src/pages/hi/archived/arbitrum/l2-transfer-tools-faq.mdx @@ -24,9 +24,9 @@ The exception is with smart contract wallets like multisigs: these are smart con L2 ट्रांसफर टूल L1 से L2 तक संदेश भेजने के लिए आर्बिट्रम के मूल तंत्र का उपयोग करते हैं। इस तंत्र को "पुनर्प्रयास योग्य टिकट" कहा जाता है और इसका उपयोग आर्बिट्रम जीआरटी ब्रिज सहित सभी देशी टोकन ब्रिजों द्वारा किया जाता है। आप पुनः प्रयास योग्य टिकटों के बारे में अधिक जानकारी [आर्बिट्रम डॉक्स](https://docs.arbitrum.io/arbos/l1-to-l2-messageing) में पढ़ सकते हैं। -जब आप अपनी संपत्ति (सबग्राफ, हिस्सेदारी, प्रतिनिधिमंडल या क्यूरेशन) को एल2 में स्थानांतरित करते हैं, तो आर्बिट्रम जीआरटी ब्रिज के माध्यम से एक संदेश भेजा जाता है जो एल2 में एक पुनः प्रयास योग्य टिकट बनाता है। ट्रांसफ़र टूल में लेन-देन में कुछ ETH मान शामिल होते हैं, जिनका उपयोग 1) टिकट बनाने के लिए भुगतान करने और 2) L2 में टिकट निष्पादित करने के लिए गैस का भुगतान करने के लिए किया जाता है। हालाँकि, क्योंकि गैस की कीमतें L2 में निष्पादित होने के लिए टिकट तैयार होने तक के समय में भिन्न हो सकती हैं, यह संभव है कि यह ऑटो-निष्पादन प्रयास विफल हो जाए। जब ऐसा होता है, तो आर्बिट्रम ब्रिज पुनः प्रयास योग्य टिकट को 7 दिनों तक जीवित रखेगा, और कोई भी टिकट को "रिडीम" करने का पुनः प्रयास कर सकता है (जिसके लिए आर्बिट्रम में ब्रिज किए गए कुछ ईटीएच के साथ वॉलेट की आवश्यकता होती है)। +When you transfer your assets (Subgraph, stake, delegation or curation) to L2, a message is sent through the Arbitrum GRT bridge which creates a retryable ticket in L2. The transfer tool includes some ETH value in the transaction, that is used to 1) pay to create the ticket and 2) pay for the gas to execute the ticket in L2. However, because gas prices might vary in the time until the ticket is ready to execute in L2, it is possible that this auto-execution attempt fails. When that happens, the Arbitrum bridge will keep the retryable ticket alive for up to 7 days, and anyone can retry “redeeming” the ticket (which requires a wallet with some ETH bridged to Arbitrum). -इसे हम सभी स्थानांतरण टूल में "पुष्टि करें" चरण कहते हैं - यह ज्यादातर मामलों में स्वचालित रूप से चलेगा, क्योंकि ऑटो-निष्पादन अक्सर सफल होता है, लेकिन यह महत्वपूर्ण है कि आप यह सुनिश्चित करने के लिए वापस जांचें कि यह पूरा हो गया है। यदि यह सफल नहीं होता है और 7 दिनों में कोई सफल पुनर्प्रयास नहीं होता है, तो आर्बिट्रम ब्रिज टिकट को खारिज कर देगा, और आपकी संपत्ति (सबग्राफ, हिस्सेदारी, प्रतिनिधिमंडल या क्यूरेशन) खो जाएगी और पुनर्प्राप्त नहीं की जा सकेगी। ग्राफ़ कोर डेवलपर्स के पास इन स्थितियों का पता लगाने और बहुत देर होने से पहले टिकटों को भुनाने की कोशिश करने के लिए एक निगरानी प्रणाली है, लेकिन यह सुनिश्चित करना अंततः आपकी ज़िम्मेदारी है कि आपका स्थानांतरण समय पर पूरा हो जाए। यदि आपको अपने लेनदेन की पुष्टि करने में परेशानी हो रही है, तो कृपया [इस फॉर्म](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) और कोर डेव का उपयोग करके संपर्क करें आपकी मदद के लिए वहाँ मौजूद रहूँगा. +This is what we call the “Confirm” step in all the transfer tools - it will run automatically in most cases, as the auto-execution is most often successful, but it is important that you check back to make sure it went through. If it doesn’t succeed and there are no successful retries in 7 days, the Arbitrum bridge will discard the ticket, and your assets (Subgraph, stake, delegation or curation) will be lost and can’t be recovered. The Graph core devs have a monitoring system in place to detect these situations and try to redeem the tickets before it’s too late, but it is ultimately your responsibility to ensure your transfer is completed in time. If you’re having trouble confirming your transaction, please reach out using [this form](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) and core devs will be there help you. ### मैंने अपना डेलिगेशन/स्टेक/क्यूरेशन ट्रांसफर शुरू कर दिया है और मुझे यकीन नहीं है कि यह एल2 तक पहुंच गया है या नहीं, मैं कैसे पुष्टि कर सकता हूं कि इसे सही तरीके से ट्रांसफर किया गया था? @@ -36,43 +36,43 @@ If you have the L1 transaction hash (which you can find by looking at the recent ## सबग्राफ स्थानांतरण -### मैं अपना सबग्राफ कैसे स्थानांतरित करूं? +### How do I transfer my Subgraph? -अपने सबग्राफ को स्थानांतरित करने के लिए, आपको निम्नलिखित चरणों को पूरा करने होंगे: +To transfer your Subgraph, you will need to complete the following steps: 1. Ethereum mainnet वर हस्तांतरण सुरू करा 2. पुष्टि के लिए 20 मिनट का इंतजार करें: -3. आर्बिट्रमवर सबग्राफ हस्तांतरणाची पुष्टी करा\* +3. Confirm Subgraph transfer on Arbitrum\* -4. आर्बिट्रम पर सबग्राफ का प्रकाशन समाप्त करें +4. Finish publishing Subgraph on Arbitrum 5. क्वेरी यूआरएल अपडेट करें (अनुशंसित) -\*Note that you must confirm the transfer within 7 days otherwise your subgraph may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). +\*Note that you must confirm the transfer within 7 days otherwise your Subgraph may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). ### मुझे अपना स्थानांतरण कहाँ से आरंभ करना चाहिए? -आप[Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) या किसी भी Subgraph विवरण पृष्ठ से अपने transfer को प्रारंभ कर सकते हैं। Subgraph विवरण पृष्ठ में "Transfer " button पर click करके transfer आरंभ करें। +You can initiate your transfer from the [Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) or any Subgraph details page. Click the "Transfer Subgraph" button in the Subgraph details page to start the transfer. -### मेरा सबग्राफ़ स्थानांतरित होने तक मुझे कितने समय तक प्रतीक्षा करनी होगी? +### How long do I need to wait until my Subgraph is transferred अंतरण करने में लगभग 20 मिनट का समय लगता है। Arbitrum bridge स्वचालित रूप से bridge अंतरण पूरा करने के लिए पृष्ठभूमि में काम कर रहा है। कुछ मामलों में, गैस लागत में spike हो सकती है और आपको transaction की पुष्टि फिर से करनी होगी। -### क्या मेरा सबग्राफ L2 में स्थानांतरित करने के बाद भी खोजा जा सकेगा? +### Will my Subgraph still be discoverable after I transfer it to L2? -आपका सबग्राफ केवल उस नेटवर्क पर खोजने योग्य होगा जिस पर यह प्रकाशित किया गया है। उदाहरण स्वरूप, यदि आपका सबग्राफ आर्बिट्रम वन पर है, तो आपकेंद्रीय तंत्र पर केवल आर्बिट्रम वन के खोजक में ही ढूंढा जा सकता है और आप इथेरियम पर इसे नहीं खोज पाएंगे। कृपया सुनिश्चित करें कि आपने पृष्ठ के शीर्ष में नेटवर्क स्विचर में आर्बिट्रम वन को चुना है ताकि आप सही नेटवर्क पर हों। अंतरण के बाद, L1 सबग्राफ को पुराना किया गया माना जाएगा। +Your Subgraph will only be discoverable on the network it is published to. For example, if your Subgraph is on Arbitrum One, then you can only find it in Explorer on Arbitrum One and will not be able to find it on Ethereum. Please ensure that you have Arbitrum One selected in the network switcher at the top of the page to ensure you are on the correct network.  After the transfer, the L1 Subgraph will appear as deprecated. -### क्या मेरे सबग्राफ को स्थानांतरित करने के लिए इसे प्रकाशित किया जाना आवश्यक है? +### Does my Subgraph need to be published to transfer it? -सबग्राफ अंतरण उपकरण का लाभ उठाने के लिए, आपके सबग्राफ को पहले ही ईथेरियम मेननेट पर प्रकाशित किया जाना चाहिए और सबग्राफ के मालिक wallet द्वारा स्वामित्व signal subgraph का कुछ होना चाहिए। यदि आपका subgraph प्रकाशित नहीं है, तो सिफ़ारिश की जाती है कि आप सीधे Arbitrum One पर प्रकाशित करें - जुड़े गए gas fees काफी कम होंगे। यदि आप किसी प्रकाशित subgraph को अंतरण करना चाहते हैं लेकिन owner account ने उस पर कोई signal curate नहीं किया है, तो आप उस account से थोड़ी सी राशि (जैसे 1 GRT) के signal कर सकते हैं; सुनिश्चित करें कि आपने "auto-migrating" signal को चुना है। +To take advantage of the Subgraph transfer tool, your Subgraph must be already published to Ethereum mainnet and must have some curation signal owned by the wallet that owns the Subgraph. If your Subgraph is not published, it is recommended you simply publish directly on Arbitrum One - the associated gas fees will be considerably lower. If you want to transfer a published Subgraph but the owner account hasn't curated any signal on it, you can signal a small amount (e.g. 1 GRT) from that account; make sure to choose "auto-migrating" signal. -### मी आर्बिट्रममध्ये हस्तांतरित केल्यानंतर माझ्या सबग्राफच्या इथरियम मेननेट आवृत्तीचे काय होते? +### What happens to the Ethereum mainnet version of my Subgraph after I transfer to Arbitrum? -अपने सबग्राफ को आर्बिट्रम पर अंतरण करने के बाद, ईथेरियम मेननेट संस्करण को पुराना किया जाएगा। हम आपको 48 घंटों के भीतर अपनी क्वेरी URL को अद्यतन करने की सिफारिश करते हैं। हालांकि, एक ग्रेस पीरियड लागू होता है जिसके तहत आपकी मुख्यनेट URL को कार्यरत रखा जाता है ताकि किसी तिसरी पक्ष डैप समर्थन को अपडेट किया जा सके। +After transferring your Subgraph to Arbitrum, the Ethereum mainnet version will be deprecated. We recommend you update your query URL within 48 hours. However, there is a grace period in place that keeps your mainnet URL functioning so that any third-party dapp support can be updated. ### स्थानांतरण करने के बाद, क्या मुझे आर्बिट्रम पर पुनः प्रकाशन की आवश्यकता होती है? @@ -80,21 +80,21 @@ If you have the L1 transaction hash (which you can find by looking at the recent ### Will my endpoint experience downtime while re-publishing? -It is unlikely, but possible to experience a brief downtime depending on which Indexers are supporting the subgraph on L1 and whether they keep indexing it until the subgraph is fully supported on L2. +It is unlikely, but possible to experience a brief downtime depending on which Indexers are supporting the Subgraph on L1 and whether they keep indexing it until the Subgraph is fully supported on L2. ### क्या L2 पर प्रकाशन और संस्करणीकरण Ethereum मेननेट के समान होते हैं? -Yes. Select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the subgraph. +Yes. Select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the Subgraph. -### क्या मेरे subgraph की curation उसके साथ चलेगी जब मैंsubgraph को स्थानांतरित करूँगा? +### Will my Subgraph's curation move with my Subgraph? -यदि आपने " auto-migrating" signal का चयन किया है, तो आपके खुद के curation का 100% आपकेsubgraph के साथ Arbitrum One पर जाएगा। subgraph के सभी curation signalको अंतरण के समय GRT में परिवर्तित किया जाएगा, और आपके curation signal के समर्थन में उत्पन्न होने वाले GRT का उपयोग L2 subgraph पर signal mint करने के लिए किया जाएगा। +If you've chosen auto-migrating signal, 100% of your own curation will move with your Subgraph to Arbitrum One. All of the Subgraph's curation signal will be converted to GRT at the time of the transfer, and the GRT corresponding to your curation signal will be used to mint signal on the L2 Subgraph. -अन्य क्यूरेटर यह चुन सकते हैं कि जीआरटी का अपना अंश वापस लेना है या नहीं, या इसे उसी सबग्राफ पर मिंट सिग्नल के लिए एल2 में स्थानांतरित करना है या नहीं। +Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same Subgraph. -### क्या मैं स्थानांतरण के बाद अपने सबग्राफ को एथेरियम मेननेट पर वापस ले जा सकता हूं? +### Can I move my Subgraph back to Ethereum mainnet after I transfer? -एक बार अंतरित होने के बाद, आपके ईथेरियम मेननेट संस्करण को पुराना मान दिया जाएगा। अगर आप मुख्यनेट पर वापस जाना चाहते हैं, तो आपको पुनः डिप्लॉय और प्रकाशित करने की आवश्यकता होगी। हालांकि, वापस ईथेरियम मेननेट पर लौटने को मजबूरी से अनुशंसित किया जाता है क्योंकि सूचीकरण रिवॉर्ड आखिरकार पूरी तरह से आर्बिट्रम वन पर ही वितरित किए जाएंगे। +Once transferred, your Ethereum mainnet version of this Subgraph will be deprecated. If you would like to move back to mainnet, you will need to redeploy and publish back to mainnet. However, transferring back to Ethereum mainnet is strongly discouraged as indexing rewards will eventually be distributed entirely on Arbitrum One. ### मेरे स्थानांतरण को पूरा करने के लिए मुझे ब्रिज़्ड ईथ की आवश्यकता क्यों है? @@ -206,19 +206,19 @@ The tokens that are being undelegated are "locked" and therefore cannot be trans \*यदि आवश्यक हो - अर्थात्, आप एक कॉन्ट्रैक्ट पते का उपयोग कर रहे हैं | -### मी क्युरेट केलेला सबग्राफ L2 वर गेला असल्यास मला कसे कळेल? +### How will I know if the Subgraph I curated has moved to L2? -सबग्राफ विवरण पृष्ठ को देखते समय, एक बैनर आपको सूचित करेगा कि यह सबग्राफ अंतरण किया गया है। आप प्रोंप्ट का पालन करके अपने क्यूरेशन को अंतरण कर सकते हैं। आप इस जानकारी को भी उन सभी सबग्राफों के विवरण पृष्ठ पर पा सकते हैं जिन्होंने अंतरण किया है। +When viewing the Subgraph details page, a banner will notify you that this Subgraph has been transferred. You can follow the prompt to transfer your curation. You can also find this information on the Subgraph details page of any Subgraph that has moved. ### अगर मैं अपनी संरचना को L2 में स्थानांतरित करना नहीं चाहता हूँ तो क्या होगा? -जब एक सबग्राफ पुराना होता है, तो आपके पास सिग्नल वापस लेने का विकल्प होता है। उसी तरह, अगर कोई सबग्राफ L2 पर चल रहा है, तो आपको चुनने का विकल्प होता है कि क्या आप ईथेरियम मेननेट से सिग्नल वापस लेना चाहेंगे या सिग्नल को L2 पर भेजें। +When a Subgraph is deprecated you have the option to withdraw your signal. Similarly, if a Subgraph has moved to L2, you can choose to withdraw your signal in Ethereum mainnet or send the signal to L2. ### माझे क्युरेशन यशस्वीरित्या हस्तांतरित झाले हे मला कसे कळेल? एल2 स्थानांतरण उपकरण को प्रारंभ करने के बाद, सिग्नल विवरण एक्सप्लोरर के माध्यम से लगभग 20 मिनट के बाद उपलब्ध होंगे। -### क्या मैं एक समय पर एक से अधिक सबग्राफ पर अपनी संरचना को स्थानांतरित कर सकता हूँ? +### Can I transfer my curation on more than one Subgraph at a time? वर्तमान में कोई थोक स्थानांतरण विकल्प उपलब्ध नहीं है। @@ -266,7 +266,7 @@ L2 ट्रान्स्फर टूलला तुमचा स्टे ### मी माझा हिस्सा हस्तांतरित करण्यापूर्वी मला आर्बिट्रमवर इंडेक्स करावे लागेल का? -आप पहले ही अपने स्टेक को प्रभावी रूप से हस्तांतरित कर सकते हैं, लेकिन आप L2 पर किसी भी पुरस्कार का दावा नहीं कर पाएंगे जब तक आप L2 पर सबग्राफ्स को आवंटित नहीं करते हैं, उन्हें इंडेक्स करते हैं, और पॉइंट ऑफ इंटरेस्ट (POI) प्रस्तुत नहीं करते। +You can effectively transfer your stake first before setting up indexing, but you will not be able to claim any rewards on L2 until you allocate to Subgraphs on L2, index them, and present POIs. ### मी माझा इंडेक्सिंग स्टेक हलवण्यापूर्वी प्रतिनिधी त्यांचे प्रतिनिधी हलवू शकतात का? From 3fbb7fcff42b6cbcafab4c854dc4c922ee7a6d4c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:13:41 -0500 Subject: [PATCH 0315/1789] New translations l2-transfer-tools-faq.mdx (Swahili) --- .../arbitrum/l2-transfer-tools-faq.mdx | 414 ++++++++++++++++++ 1 file changed, 414 insertions(+) create mode 100644 website/src/pages/sw/archived/arbitrum/l2-transfer-tools-faq.mdx diff --git a/website/src/pages/sw/archived/arbitrum/l2-transfer-tools-faq.mdx b/website/src/pages/sw/archived/arbitrum/l2-transfer-tools-faq.mdx new file mode 100644 index 000000000000..7edde3d0cbcd --- /dev/null +++ b/website/src/pages/sw/archived/arbitrum/l2-transfer-tools-faq.mdx @@ -0,0 +1,414 @@ +--- +title: L2 Transfer Tools FAQ +--- + +## General + +### What are L2 Transfer Tools? + +The Graph has made it 26x cheaper for contributors to participate in the network by deploying the protocol to Arbitrum One. The L2 Transfer Tools were created by core devs to make it easy to move to L2. + +For each network participant, a set of L2 Transfer Tools are available to make the experience seamless when moving to L2, avoiding thawing periods or having to manually withdraw and bridge GRT. + +These tools will require you to follow a specific set of steps depending on what your role is within The Graph and what you are transferring to L2. + +### Can I use the same wallet I use on Ethereum mainnet? + +If you are using an [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account) wallet you can use the same address. If your Ethereum mainnet wallet is a contract (e.g. a multisig) then you must specify an [Arbitrum wallet address](/archived/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2) where your transfer will be sent. Please check the address carefully as any transfers to an incorrect address can result in permanent loss. If you'd like to use a multisig on L2, make sure you deploy a multisig contract on Arbitrum One. + +Wallets on EVM blockchains like Ethereum and Arbitrum are a pair of keys (public and private), that you create without any need to interact with the blockchain. So any wallet that was created for Ethereum will also work on Arbitrum without having to do anything else. + +The exception is with smart contract wallets like multisigs: these are smart contracts that are deployed separately on each chain, and get their address when they are deployed. If a multisig was deployed to Ethereum, it won't exist with the same address on Arbitrum. A new multisig must be created first on Arbitrum, and may get a different address. + +### What happens if I don’t finish my transfer in 7 days? + +The L2 Transfer Tools use Arbitrum’s native mechanism to send messages from L1 to L2. This mechanism is called a “retryable ticket” and is used by all native token bridges, including the Arbitrum GRT bridge. You can read more about retryable tickets in the [Arbitrum docs](https://docs.arbitrum.io/arbos/l1-to-l2-messaging). + +When you transfer your assets (Subgraph, stake, delegation or curation) to L2, a message is sent through the Arbitrum GRT bridge which creates a retryable ticket in L2. The transfer tool includes some ETH value in the transaction, that is used to 1) pay to create the ticket and 2) pay for the gas to execute the ticket in L2. However, because gas prices might vary in the time until the ticket is ready to execute in L2, it is possible that this auto-execution attempt fails. When that happens, the Arbitrum bridge will keep the retryable ticket alive for up to 7 days, and anyone can retry “redeeming” the ticket (which requires a wallet with some ETH bridged to Arbitrum). + +This is what we call the “Confirm” step in all the transfer tools - it will run automatically in most cases, as the auto-execution is most often successful, but it is important that you check back to make sure it went through. If it doesn’t succeed and there are no successful retries in 7 days, the Arbitrum bridge will discard the ticket, and your assets (Subgraph, stake, delegation or curation) will be lost and can’t be recovered. The Graph core devs have a monitoring system in place to detect these situations and try to redeem the tickets before it’s too late, but it is ultimately your responsibility to ensure your transfer is completed in time. If you’re having trouble confirming your transaction, please reach out using [this form](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) and core devs will be there help you. + +### I started my delegation/stake/curation transfer and I'm not sure if it made it through to L2, how can I confirm that it was transferred correctly? + +If you don't see a banner on your profile asking you to finish the transfer, then it's likely the transaction made it safely to L2 and no more action is needed. If in doubt, you can check if Explorer shows your delegation, stake or curation on Arbitrum One. + +If you have the L1 transaction hash (which you can find by looking at the recent transactions in your wallet), you can also confirm if the "retryable ticket" that carried the message to L2 was redeemed here: https://retryable-dashboard.arbitrum.io/ - if the auto-redeem failed, you can also connect your wallet there and redeem it. Rest assured that core devs are also monitoring for messages that get stuck, and will attempt to redeem them before they expire. + +## Subgraph Transfer + +### How do I transfer my Subgraph? + + + +To transfer your Subgraph, you will need to complete the following steps: + +1. Initiate the transfer on Ethereum mainnet + +2. Wait 20 minutes for confirmation + +3. Confirm Subgraph transfer on Arbitrum\* + +4. Finish publishing Subgraph on Arbitrum + +5. Update Query URL (recommended) + +\*Note that you must confirm the transfer within 7 days otherwise your Subgraph may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). + +### Where should I initiate my transfer from? + +You can initiate your transfer from the [Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) or any Subgraph details page. Click the "Transfer Subgraph" button in the Subgraph details page to start the transfer. + +### How long do I need to wait until my Subgraph is transferred + +The transfer time takes approximately 20 minutes. The Arbitrum bridge is working in the background to complete the bridge transfer automatically. In some cases, gas costs may spike and you will need to confirm the transaction again. + +### Will my Subgraph still be discoverable after I transfer it to L2? + +Your Subgraph will only be discoverable on the network it is published to. For example, if your Subgraph is on Arbitrum One, then you can only find it in Explorer on Arbitrum One and will not be able to find it on Ethereum. Please ensure that you have Arbitrum One selected in the network switcher at the top of the page to ensure you are on the correct network.  After the transfer, the L1 Subgraph will appear as deprecated. + +### Does my Subgraph need to be published to transfer it? + +To take advantage of the Subgraph transfer tool, your Subgraph must be already published to Ethereum mainnet and must have some curation signal owned by the wallet that owns the Subgraph. If your Subgraph is not published, it is recommended you simply publish directly on Arbitrum One - the associated gas fees will be considerably lower. If you want to transfer a published Subgraph but the owner account hasn't curated any signal on it, you can signal a small amount (e.g. 1 GRT) from that account; make sure to choose "auto-migrating" signal. + +### What happens to the Ethereum mainnet version of my Subgraph after I transfer to Arbitrum? + +After transferring your Subgraph to Arbitrum, the Ethereum mainnet version will be deprecated. We recommend you update your query URL within 48 hours. However, there is a grace period in place that keeps your mainnet URL functioning so that any third-party dapp support can be updated. + +### After I transfer, do I also need to re-publish on Arbitrum? + +After the 20 minute transfer window, you will need to confirm the transfer with a transaction in the UI to finish the transfer, but the transfer tool will guide you through this. Your L1 endpoint will continue to be supported during the transfer window and a grace period after. It is encouraged that you update your endpoint when convenient for you. + +### Will my endpoint experience downtime while re-publishing? + +It is unlikely, but possible to experience a brief downtime depending on which Indexers are supporting the Subgraph on L1 and whether they keep indexing it until the Subgraph is fully supported on L2. + +### Is publishing and versioning the same on L2 as Ethereum Ethereum mainnet? + +Yes. Select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the Subgraph. + +### Will my Subgraph's curation move with my Subgraph? + +If you've chosen auto-migrating signal, 100% of your own curation will move with your Subgraph to Arbitrum One. All of the Subgraph's curation signal will be converted to GRT at the time of the transfer, and the GRT corresponding to your curation signal will be used to mint signal on the L2 Subgraph. + +Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same Subgraph. + +### Can I move my Subgraph back to Ethereum mainnet after I transfer? + +Once transferred, your Ethereum mainnet version of this Subgraph will be deprecated. If you would like to move back to mainnet, you will need to redeploy and publish back to mainnet. However, transferring back to Ethereum mainnet is strongly discouraged as indexing rewards will eventually be distributed entirely on Arbitrum One. + +### Why do I need bridged ETH to complete my transfer? + +Gas fees on Arbitrum One are paid using bridged ETH (i.e. ETH that has been bridged to Arbitrum One). However, the gas fees are significantly lower when compared to Ethereum mainnet. + +## Delegation + +### How do I transfer my delegation? + + + +To transfer your delegation, you will need to complete the following steps: + +1. Initiate delegation transfer on Ethereum mainnet +2. Wait 20 minutes for confirmation +3. Confirm delegation transfer on Arbitrum + +\*\*\*\*You must confirm the transaction to complete the delegation transfer on Arbitrum. This step must be completed within 7 days or the delegation could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). + +### What happens to my rewards if I initiate a transfer with an open allocation on Ethereum mainnet? + +If the Indexer to whom you're delegating is still operating on L1, when you transfer to Arbitrum you will forfeit any delegation rewards from open allocations on Ethereum mainnet. This means that you will lose the rewards from, at most, the last 28-day period. If you time the transfer right after the Indexer has closed allocations you can make sure this is the least amount possible. If you have a communication channel with your Indexer(s), consider discussing with them to find the best time to do your transfer. + +### What happens if the Indexer I currently delegate to isn't on Arbitrum One? + +The L2 transfer tool will only be enabled if the Indexer you have delegated to has transferred their own stake to Arbitrum. + +### Do Delegators have the option to delegate to another Indexer? + +If you wish to delegate to another Indexer, you can transfer to the same Indexer on Arbitrum, then undelegate and wait for the thawing period. After this, you can select another active Indexer to delegate to. + +### What if I can't find the Indexer I'm delegating to on L2? + +The L2 transfer tool will automatically detect the Indexer you previously delegated to. + +### Will I be able to mix and match or 'spread' my delegation across new or several Indexers instead of the prior Indexer? + +The L2 transfer tool will always move your delegation to the same Indexer you delegated to previously. Once you have moved to L2, you can undelegate, wait for the thawing period, and decide if you'd like to split up your delegation. + +### Am I subject to the cooldown period or can I withdraw immediately after using the L2 delegation transfer tool? + +The transfer tool allows you to immediately move to L2. If you would like to undelegate you will have to wait for the thawing period. However, if an Indexer has transferred all of their stake to L2, you can withdraw on Ethereum mainnet immediately. + +### Can my rewards be negatively impacted if I do not transfer my delegation? + +It is anticipated that all network participation will move to Arbitrum One in the future. + +### How long does it take to complete the transfer of my delegation to L2? + +A 20-minute confirmation is required for delegation transfer. Please note that after the 20-minute period, you must come back and complete step 3 of the transfer process within 7 days. If you fail to do this, then your delegation may be lost. Note that in most cases the transfer tool will complete this step for you automatically. In case of a failed auto-attempt, you will need to complete it manually. If any issues arise during this process, don't worry, we'll be here to help: contact us at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). + +### Can I transfer my delegation if I'm using a GRT vesting contract/token lock wallet? + +Yes! The process is a bit different because vesting contracts can't forward the ETH needed to pay for the L2 gas, so you need to deposit it beforehand. If your vesting contract is not fully vested, you will also have to first initialize a counterpart vesting contract on L2 and will only be able to transfer the delegation to this L2 vesting contract. The UI on Explorer can guide you through this process when you've connected to Explorer using the vesting lock wallet. + +### Does my Arbitrum vesting contract allow releasing GRT just like on mainnet? + +No, the vesting contract that is created on Arbitrum will not allow releasing any GRT until the end of the vesting timeline, i.e. until your contract is fully vested. This is to prevent double spending, as otherwise it would be possible to release the same amounts on both layers. + +If you'd like to release GRT from the vesting contract, you can transfer them back to the L1 vesting contract using Explorer: in your Arbitrum One profile, you will see a banner saying you can transfer GRT back to the mainnet vesting contract. This requires a transaction on Arbitrum One, waiting 7 days, and a final transaction on mainnet, as it uses the same native bridging mechanism from the GRT bridge. + +### Is there any delegation tax? + +No. Received tokens on L2 are delegated to the specified Indexer on behalf of the specified Delegator without charging a delegation tax. + +### Will my unrealized rewards be transferred when I transfer my delegation? + +​Yes! The only rewards that can't be transferred are the ones for open allocations, as those won't exist until the Indexer closes the allocations (usually every 28 days). If you've been delegating for a while, this is likely only a small fraction of rewards. + +At the smart contract level, unrealized rewards are already part of your delegation balance, so they will be transferred when you transfer your delegation to L2. ​ + +### Is moving delegations to L2 mandatory? Is there a deadline? + +​Moving delegation to L2 is not mandatory, but indexing rewards are increasing on L2 following the timeline described in [GIP-0052](https://forum.thegraph.com/t/gip-0052-timeline-and-requirements-to-increase-rewards-in-l2/4193). Eventually, if the Council keeps approving the increases, all rewards will be distributed in L2 and there will be no indexing rewards for Indexers and Delegators on L1. ​ + +### If I am delegating to an Indexer that has already transferred stake to L2, do I stop receiving rewards on L1? + +​Many Indexers are transferring stake gradually so Indexers on L1 will still be earning rewards and fees on L1, which are then shared with Delegators. Once an Indexer has transferred all of their stake, then they will stop operating on L1, so Delegators will not receive any more rewards unless they transfer to L2. + +Eventually, if the Council keeps approving the indexing rewards increases in L2, all rewards will be distributed on L2 and there will be no indexing rewards for Indexers and Delegators on L1. ​ + +### I don't see a button to transfer my delegation. Why is that? + +​Your Indexer has probably not used the L2 transfer tools to transfer stake yet. + +If you can contact the Indexer, you can encourage them to use the L2 Transfer Tools so that Delegators can transfer delegations to their L2 Indexer address. ​ + +### My Indexer is also on Arbitrum, but I don't see a button to transfer the delegation in my profile. Why is that? + +​It is possible that the Indexer has set up operations on L2, but hasn't used the L2 transfer tools to transfer stake. The L1 smart contracts will therefore not know about the Indexer's L2 address. If you can contact the Indexer, you can encourage them to use the transfer tool so that Delegators can transfer delegations to their L2 Indexer address. ​ + +### Can I transfer my delegation to L2 if I have started the undelegating process and haven't withdrawn it yet? + +​No. If your delegation is thawing, you have to wait the 28 days and withdraw it. + +The tokens that are being undelegated are "locked" and therefore cannot be transferred to L2. + +## Curation Signal + +### How do I transfer my curation? + +To transfer your curation, you will need to complete the following steps: + +1. Initiate signal transfer on Ethereum mainnet + +2. Specify an L2 Curator address\* + +3. Wait 20 minutes for confirmation + +\*If necessary - i.e. you are using a contract address. + +### How will I know if the Subgraph I curated has moved to L2? + +When viewing the Subgraph details page, a banner will notify you that this Subgraph has been transferred. You can follow the prompt to transfer your curation. You can also find this information on the Subgraph details page of any Subgraph that has moved. + +### What if I do not wish to move my curation to L2? + +When a Subgraph is deprecated you have the option to withdraw your signal. Similarly, if a Subgraph has moved to L2, you can choose to withdraw your signal in Ethereum mainnet or send the signal to L2. + +### How do I know my curation successfully transferred? + +Signal details will be accessible via Explorer approximately 20 minutes after the L2 transfer tool is initiated. + +### Can I transfer my curation on more than one Subgraph at a time? + +There is no bulk transfer option at this time. + +## Indexer Stake + +### How do I transfer my stake to Arbitrum? + +> Disclaimer: If you are currently unstaking any portion of your GRT on your Indexer, you will not be able to use L2 Transfer Tools. + + + +To transfer your stake, you will need to complete the following steps: + +1. Initiate stake transfer on Ethereum mainnet + +2. Wait 20 minutes for confirmation + +3. Confirm stake transfer on Arbitrum + +\*Note that you must confirm the transfer within 7 days otherwise your stake may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). + +### Will all of my stake transfer? + +You can choose how much of your stake to transfer. If you choose to transfer all of your stake at once, you will need to close any open allocations first. + +If you plan on transferring parts of your stake over multiple transactions, you must always specify the same beneficiary address. + +Note: You must meet the minimum stake requirements on L2 the first time you use the transfer tool. Indexers must send the minimum 100k GRT (when calling this function the first time). If leaving a portion of stake on L1, it must also be over the 100k GRT minimum and be sufficient (together with your delegations) to cover your open allocations. + +### How much time do I have to confirm my stake transfer to Arbitrum? + +\*\*\* You must confirm your transaction to complete the stake transfer on Arbitrum. This step must be completed within 7 days or stake could be lost. + +### What if I have open allocations? + +If you are not sending all of your stake, the L2 transfer tool will validate that at least the minimum 100k GRT remains in Ethereum mainnet and your remaining stake and delegation is enough to cover any open allocations. You may need to close open allocations if your GRT balance does not cover the minimums + open allocations. + +### Using the transfer tools, is it necessary to wait 28 days to unstake on Ethereum mainnet before transferring? + +No, you can transfer your stake to L2 immediately, there's no need to unstake and wait before using the transfer tool. The 28-day wait only applies if you'd like to withdraw the stake back to your wallet, on Ethereum mainnet or L2. + +### How long will it take to transfer my stake? + +It will take approximately 20 minutes for the L2 transfer tool to complete transferring your stake. + +### Do I have to index on Arbitrum before I transfer my stake? + +You can effectively transfer your stake first before setting up indexing, but you will not be able to claim any rewards on L2 until you allocate to Subgraphs on L2, index them, and present POIs. + +### Can Delegators move their delegation before I move my indexing stake? + +No, in order for Delegators to transfer their delegated GRT to Arbitrum, the Indexer they are delegating to must be active on L2. + +### Can I transfer my stake if I'm using a GRT vesting contract / token lock wallet? + +Yes! The process is a bit different, because vesting contracts can't forward the ETH needed to pay for the L2 gas, so you need to deposit it beforehand. If your vesting contract is not fully vested, you will also have to first initialize a counterpart vesting contract on L2 and will only be able to transfer the stake to this L2 vesting contract. The UI on Explorer can guide you through this process when you've connected to Explorer using the vesting lock wallet. + +### I already have stake on L2. Do I still need to send 100k GRT when I use the transfer tools the first time? + +​Yes. The L1 smart contracts will not be aware of your L2 stake, so they will require you to transfer at least 100k GRT when you transfer for the first time. ​ + +### Can I transfer my stake to L2 if I am in the process of unstaking GRT? + +​No. If any fraction of your stake is thawing, you have to wait the 28 days and withdraw it before you can transfer stake. The tokens that are being staked are "locked" and will prevent any transfers or stake to L2. + +## Vesting Contract Transfer + +### How do I transfer my vesting contract? + +To transfer your vesting, you will need to complete the following steps: + +1. Initiate the vesting transfer on Ethereum mainnet + +2. Wait 20 minutes for confirmation + +3. Confirm vesting transfer on Arbitrum + +### How do I transfer my vesting contract if I am only partially vested? + + + +1. Deposit some ETH into the transfer tool contract (UI can help estimate a reasonable amount) + +2. Send some locked GRT through the transfer tool contract, to L2 to initialize the L2 vesting lock. This will also set their L2 beneficiary address. + +3. Send their stake/delegation to L2 through the "locked" transfer tool functions in the L1Staking contract. + +4. Withdraw any remaining ETH from the transfer tool contract + +### How do I transfer my vesting contract if I am fully vested? + + + +For those that are fully vested, the process is similar: + +1. Deposit some ETH into the transfer tool contract (UI can help estimate a reasonable amount) + +2. Set your L2 address with a call to the transfer tool contract + +3. Send your stake/delegation to L2 through the "locked" transfer tool functions in the L1 Staking contract. + +4. Withdraw any remaining ETH from the transfer tool contract + +### Can I transfer my vesting contract to Arbitrum? + +You can transfer your vesting contract's GRT balance to a vesting contract in L2. This is a prerequisite for transferring stake or delegation from your vesting contract to L2. The vesting contract must hold a nonzero amount of GRT (you can transfer a small amount like 1 GRT to it if needed). + +When you transfer GRT from your L1 vesting contract to L2, you can choose the amount to send and you can do this as many times as you like. The L2 vesting contract will be initialized the first time you transfer GRT. + +The transfers are done using a Transfer Tool that will be visible on your Explorer profile when you connect with the vesting contract account. + +Please note that you will not be able to release/withdraw GRT from the L2 vesting contract until the end of your vesting timeline when your contract is fully vested. If you need to release GRT before then, you can transfer the GRT back to the L1 vesting contract using another transfer tool that is available for that purpose. + +If you haven't transferred any vesting contract balance to L2, and your vesting contract is fully vested, you should not transfer your vesting contract to L2. Instead, you can use the transfer tools to set an L2 wallet address, and directly transfer your stake or delegation to this regular wallet on L2. + +### I'm using my vesting contract to stake on mainnet. Can I transfer my stake to Arbitrum? + +Yes, but if your contract is still vesting, you can only transfer the stake so that it is owned by your L2 vesting contract. You must first initialize this L2 contract by transferring some GRT balance using the vesting contract transfer tool on Explorer. If your contract is fully vested, you can transfer your stake to any address in L2, but you must set it beforehand and deposit some ETH for the L2 transfer tool to pay for L2 gas. + +### I'm using my vesting contract to delegate on mainnet. Can I transfer my delegations to Arbitrum? + +Yes, but if your contract is still vesting, you can only transfer the delegation so that it is owned by your L2 vesting contract. You must first initialize this L2 contract by transferring some GRT balance using the vesting contract transfer tool on Explorer. If your contract is fully vested, you can transfer your delegation to any address in L2, but you must set it beforehand and deposit some ETH for the L2 transfer tool to pay for L2 gas. + +### Can I specify a different beneficiary for my vesting contract on L2? + +Yes, the first time you transfer a balance and set up your L2 vesting contract, you can specify an L2 beneficiary. Make sure this beneficiary is a wallet that can perform transactions on Arbitrum One, i.e. it must be an EOA or a multisig deployed to Arbitrum One. + +If your contract is fully vested, you will not set up a vesting contract on L2; instead, you will set an L2 wallet address and this will be the receiving wallet for your stake or delegation on Arbitrum. + +### My contract is fully vested. Can I transfer my stake or delegation to another address that is not an L2 vesting contract? + +Yes. If you haven't transferred any vesting contract balance to L2, and your vesting contract is fully vested, you should not transfer your vesting contract to L2. Instead, you can use the transfer tools to set an L2 wallet address, and directly transfer your stake or delegation to this regular wallet on L2. + +This allows you to transfer your stake or delegation to any L2 address. + +### My vesting contract is still vesting. How do I transfer my vesting contract balance to L2? + +These steps only apply if your contract is still vesting, or if you've used this process before when your contract was still vesting. + +To transfer your vesting contract to L2, you will send any GRT balance to L2 using the transfer tools, which will initialize your L2 vesting contract: + +1. Deposit some ETH into the transfer tool contract (this will be used to pay for L2 gas) + +2. Revoke protocol access to the vesting contract (needed for the next step) + +3. Give protocol access to the vesting contract (will allow your contract to interact with the transfer tool) + +4. Specify an L2 beneficiary address\* and initiate the balance transfer on Ethereum mainnet + +5. Wait 20 minutes for confirmation + +6. Confirm the balance transfer on L2 + +\*If necessary - i.e. you are using a contract address. + +\*\*\*\*You must confirm your transaction to complete the balance transfer on Arbitrum. This step must be completed within 7 days or the balance could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). + +### My vesting contract shows 0 GRT so I cannot transfer it, why is this and how do I fix it? + +​To initialize your L2 vesting contract, you need to transfer a nonzero amount of GRT to L2. This is required by the Arbitrum GRT bridge that is used by the L2 Transfer Tools. The GRT must come from the vesting contract's balance, so it does not include staked or delegated GRT. + +If you've staked or delegated all your GRT from the vesting contract, you can manually send a small amount like 1 GRT to the vesting contract address from anywhere else (e.g. from another wallet, or an exchange). ​ + +### I am using a vesting contract to stake or delegate, but I don't see a button to transfer my stake or delegation to L2, what do I do? + +​If your vesting contract hasn't finished vesting, you need to first create an L2 vesting contract that will receive your stake or delegation on L2. This vesting contract will not allow releasing tokens in L2 until the end of the vesting timeline, but will allow you to transfer GRT back to the L1 vesting contract to be released there. + +When connected with the vesting contract on Explorer, you should see a button to initialize your L2 vesting contract. Follow that process first, and you will then see the buttons to transfer your stake or delegation in your profile. ​ + +### If I initialize my L2 vesting contract, will this also transfer my delegation to L2 automatically? + +​No, initializing your L2 vesting contract is a prerequisite for transferring stake or delegation from the vesting contract, but you still need to transfer these separately. + +You will see a banner on your profile prompting you to transfer your stake or delegation after you have initialized your L2 vesting contract. + +### Can I move my vesting contract back to L1? + +There is no need to do so because your vesting contract is still in L1. When you use the transfer tools, you just create a new contract in L2 that is connected with your L1 vesting contract, and you can send GRT back and forth between the two. + +### Why do I need to move my vesting contract to begin with? + +You need to set up an L2 vesting contract so that this account can own your stake or delegation on L2. Otherwise, there'd be no way for you to transfer the stake/delegation to L2 without "escaping" the vesting contract. + +### What happens if I try to cash out my contract when it is only partially vested? Is this possible? + +This is not a possibility. You can move funds back to L1 and withdraw them there. + +### What if I don't want to move my vesting contract to L2? + +You can keep staking/delegating on L1. Over time, you may want to consider moving to L2 to enable rewards there as the protocol scales on Arbitrum. Note that these transfer tools are for vesting contracts that are allowed to stake and delegate in the protocol. If your contract does not allow staking or delegating, or is revocable, then there is no transfer tool available. You will still be able to withdraw your GRT from L1 when available. From bd828035d1e5022c6deb922590d833bad3696b0d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:13:42 -0500 Subject: [PATCH 0316/1789] New translations l2-transfer-tools-guide.mdx (Romanian) --- .../arbitrum/l2-transfer-tools-guide.mdx | 76 +++++++++---------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/website/src/pages/ro/archived/arbitrum/l2-transfer-tools-guide.mdx b/website/src/pages/ro/archived/arbitrum/l2-transfer-tools-guide.mdx index 549618bfd7c3..4a34da9bad0e 100644 --- a/website/src/pages/ro/archived/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/src/pages/ro/archived/arbitrum/l2-transfer-tools-guide.mdx @@ -6,53 +6,53 @@ The Graph has made it easy to move to L2 on Arbitrum One. For each protocol part Some frequent questions about these tools are answered in the [L2 Transfer Tools FAQ](/archived/arbitrum/l2-transfer-tools-faq/). The FAQs contain in-depth explanations of how to use the tools, how they work, and things to keep in mind when using them. -## How to transfer your subgraph to Arbitrum (L2) +## How to transfer your Subgraph to Arbitrum (L2) -## Benefits of transferring your subgraphs +## Benefits of transferring your Subgraphs The Graph's community and core devs have [been preparing](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) to move to Arbitrum over the past year. Arbitrum, a layer 2 or "L2" blockchain, inherits the security from Ethereum but provides drastically lower gas fees. -When you publish or upgrade your subgraph to The Graph Network, you're interacting with smart contracts on the protocol and this requires paying for gas using ETH. By moving your subgraphs to Arbitrum, any future updates to your subgraph will require much lower gas fees. The lower fees, and the fact that curation bonding curves on L2 are flat, also make it easier for other Curators to curate on your subgraph, increasing the rewards for Indexers on your subgraph. This lower-cost environment also makes it cheaper for Indexers to index and serve your subgraph. Indexing rewards will be increasing on Arbitrum and decreasing on Ethereum mainnet over the coming months, so more and more Indexers will be transferring their stake and setting up their operations on L2. +When you publish or upgrade your Subgraph to The Graph Network, you're interacting with smart contracts on the protocol and this requires paying for gas using ETH. By moving your Subgraphs to Arbitrum, any future updates to your Subgraph will require much lower gas fees. The lower fees, and the fact that curation bonding curves on L2 are flat, also make it easier for other Curators to curate on your Subgraph, increasing the rewards for Indexers on your Subgraph. This lower-cost environment also makes it cheaper for Indexers to index and serve your Subgraph. Indexing rewards will be increasing on Arbitrum and decreasing on Ethereum mainnet over the coming months, so more and more Indexers will be transferring their stake and setting up their operations on L2. -## Understanding what happens with signal, your L1 subgraph and query URLs +## Understanding what happens with signal, your L1 Subgraph and query URLs -Transferring a subgraph to Arbitrum uses the Arbitrum GRT bridge, which in turn uses the native Arbitrum bridge, to send the subgraph to L2. The "transfer" will deprecate the subgraph on mainnet and send the information to re-create the subgraph on L2 using the bridge. It will also include the subgraph owner's signaled GRT, which must be more than zero for the bridge to accept the transfer. +Transferring a Subgraph to Arbitrum uses the Arbitrum GRT bridge, which in turn uses the native Arbitrum bridge, to send the Subgraph to L2. The "transfer" will deprecate the Subgraph on mainnet and send the information to re-create the Subgraph on L2 using the bridge. It will also include the Subgraph owner's signaled GRT, which must be more than zero for the bridge to accept the transfer. -When you choose to transfer the subgraph, this will convert all of the subgraph's curation signal to GRT. This is equivalent to "deprecating" the subgraph on mainnet. The GRT corresponding to your curation will be sent to L2 together with the subgraph, where they will be used to mint signal on your behalf. +When you choose to transfer the Subgraph, this will convert all of the Subgraph's curation signal to GRT. This is equivalent to "deprecating" the Subgraph on mainnet. The GRT corresponding to your curation will be sent to L2 together with the Subgraph, where they will be used to mint signal on your behalf. -Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same subgraph. If a subgraph owner does not transfer their subgraph to L2 and manually deprecates it via a contract call, then Curators will be notified and will be able to withdraw their curation. +Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same Subgraph. If a Subgraph owner does not transfer their Subgraph to L2 and manually deprecates it via a contract call, then Curators will be notified and will be able to withdraw their curation. -As soon as the subgraph is transferred, since all curation is converted to GRT, Indexers will no longer receive rewards for indexing the subgraph. However, there will be Indexers that will 1) keep serving transferred subgraphs for 24 hours, and 2) immediately start indexing the subgraph on L2. Since these Indexers already have the subgraph indexed, there should be no need to wait for the subgraph to sync, and it will be possible to query the L2 subgraph almost immediately. +As soon as the Subgraph is transferred, since all curation is converted to GRT, Indexers will no longer receive rewards for indexing the Subgraph. However, there will be Indexers that will 1) keep serving transferred Subgraphs for 24 hours, and 2) immediately start indexing the Subgraph on L2. Since these Indexers already have the Subgraph indexed, there should be no need to wait for the Subgraph to sync, and it will be possible to query the L2 Subgraph almost immediately. -Queries to the L2 subgraph will need to be done to a different URL (on `arbitrum-gateway.thegraph.com`), but the L1 URL will continue working for at least 48 hours. After that, the L1 gateway will forward queries to the L2 gateway (for some time), but this will add latency so it is recommended to switch all your queries to the new URL as soon as possible. +Queries to the L2 Subgraph will need to be done to a different URL (on `arbitrum-gateway.thegraph.com`), but the L1 URL will continue working for at least 48 hours. After that, the L1 gateway will forward queries to the L2 gateway (for some time), but this will add latency so it is recommended to switch all your queries to the new URL as soon as possible. ## Choosing your L2 wallet -When you published your subgraph on mainnet, you used a connected wallet to create the subgraph, and this wallet owns the NFT that represents this subgraph and allows you to publish updates. +When you published your Subgraph on mainnet, you used a connected wallet to create the Subgraph, and this wallet owns the NFT that represents this Subgraph and allows you to publish updates. -When transferring the subgraph to Arbitrum, you can choose a different wallet that will own this subgraph NFT on L2. +When transferring the Subgraph to Arbitrum, you can choose a different wallet that will own this Subgraph NFT on L2. If you're using a "regular" wallet like MetaMask (an Externally Owned Account or EOA, i.e. a wallet that is not a smart contract), then this is optional and it is recommended to keep the same owner address as in L1. -If you're using a smart contract wallet, like a multisig (e.g. a Safe), then choosing a different L2 wallet address is mandatory, as it is most likely that this account only exists on mainnet and you will not be able to make transactions on Arbitrum using this wallet. If you want to keep using a smart contract wallet or multisig, create a new wallet on Arbitrum and use its address as the L2 owner of your subgraph. +If you're using a smart contract wallet, like a multisig (e.g. a Safe), then choosing a different L2 wallet address is mandatory, as it is most likely that this account only exists on mainnet and you will not be able to make transactions on Arbitrum using this wallet. If you want to keep using a smart contract wallet or multisig, create a new wallet on Arbitrum and use its address as the L2 owner of your Subgraph. -**It is very important to use a wallet address that you control, and that can make transactions on Arbitrum. Otherwise, the subgraph will be lost and cannot be recovered.** +**It is very important to use a wallet address that you control, and that can make transactions on Arbitrum. Otherwise, the Subgraph will be lost and cannot be recovered.** ## Preparing for the transfer: bridging some ETH -Transferring the subgraph involves sending a transaction through the bridge, and then executing another transaction on Arbitrum. The first transaction uses ETH on mainnet, and includes some ETH to pay for gas when the message is received on L2. However, if this gas is insufficient, you will have to retry the transaction and pay for the gas directly on L2 (this is "Step 3: Confirming the transfer" below). This step **must be executed within 7 days of starting the transfer**. Moreover, the second transaction ("Step 4: Finishing the transfer on L2") will be done directly on Arbitrum. For these reasons, you will need some ETH on an Arbitrum wallet. If you're using a multisig or smart contract account, the ETH will need to be in the regular (EOA) wallet that you are using to execute the transactions, not on the multisig wallet itself. +Transferring the Subgraph involves sending a transaction through the bridge, and then executing another transaction on Arbitrum. The first transaction uses ETH on mainnet, and includes some ETH to pay for gas when the message is received on L2. However, if this gas is insufficient, you will have to retry the transaction and pay for the gas directly on L2 (this is "Step 3: Confirming the transfer" below). This step **must be executed within 7 days of starting the transfer**. Moreover, the second transaction ("Step 4: Finishing the transfer on L2") will be done directly on Arbitrum. For these reasons, you will need some ETH on an Arbitrum wallet. If you're using a multisig or smart contract account, the ETH will need to be in the regular (EOA) wallet that you are using to execute the transactions, not on the multisig wallet itself. You can buy ETH on some exchanges and withdraw it directly to Arbitrum, or you can use the Arbitrum bridge to send ETH from a mainnet wallet to L2: [bridge.arbitrum.io](http://bridge.arbitrum.io). Since gas fees on Arbitrum are lower, you should only need a small amount. It is recommended that you start at a low threshold (0.e.g. 01 ETH) for your transaction to be approved. -## Finding the subgraph Transfer Tool +## Finding the Subgraph Transfer Tool -You can find the L2 Transfer Tool when you're looking at your subgraph's page on Subgraph Studio: +You can find the L2 Transfer Tool when you're looking at your Subgraph's page on Subgraph Studio: ![transfer tool](/img/L2-transfer-tool1.png) -It is also available on Explorer if you're connected with the wallet that owns a subgraph and on that subgraph's page on Explorer: +It is also available on Explorer if you're connected with the wallet that owns a Subgraph and on that Subgraph's page on Explorer: ![Transferring to L2](/img/transferToL2.png) @@ -60,19 +60,19 @@ Clicking on the Transfer to L2 button will open the transfer tool where you can ## Step 1: Starting the transfer -Before starting the transfer, you must decide which address will own the subgraph on L2 (see "Choosing your L2 wallet" above), and it is strongly recommend having some ETH for gas already bridged on Arbitrum (see "Preparing for the transfer: bridging some ETH" above). +Before starting the transfer, you must decide which address will own the Subgraph on L2 (see "Choosing your L2 wallet" above), and it is strongly recommend having some ETH for gas already bridged on Arbitrum (see "Preparing for the transfer: bridging some ETH" above). -Also please note transferring the subgraph requires having a nonzero amount of signal on the subgraph with the same account that owns the subgraph; if you haven't signaled on the subgraph you will have to add a bit of curation (adding a small amount like 1 GRT would suffice). +Also please note transferring the Subgraph requires having a nonzero amount of signal on the Subgraph with the same account that owns the Subgraph; if you haven't signaled on the Subgraph you will have to add a bit of curation (adding a small amount like 1 GRT would suffice). -After opening the Transfer Tool, you will be able to input the L2 wallet address into the "Receiving wallet address" field - **make sure you've entered the correct address here**. Clicking on Transfer Subgraph will prompt you to execute the transaction on your wallet (note some ETH value is included to pay for L2 gas); this will initiate the transfer and deprecate your L1 subgraph (see "Understanding what happens with signal, your L1 subgraph and query URLs" above for more details on what goes on behind the scenes). +After opening the Transfer Tool, you will be able to input the L2 wallet address into the "Receiving wallet address" field - **make sure you've entered the correct address here**. Clicking on Transfer Subgraph will prompt you to execute the transaction on your wallet (note some ETH value is included to pay for L2 gas); this will initiate the transfer and deprecate your L1 Subgraph (see "Understanding what happens with signal, your L1 Subgraph and query URLs" above for more details on what goes on behind the scenes). -If you execute this step, **make sure you proceed until completing step 3 in less than 7 days, or the subgraph and your signal GRT will be lost.** This is due to how L1-L2 messaging works on Arbitrum: messages that are sent through the bridge are "retry-able tickets" that must be executed within 7 days, and the initial execution might need a retry if there are spikes in the gas price on Arbitrum. +If you execute this step, **make sure you proceed until completing step 3 in less than 7 days, or the Subgraph and your signal GRT will be lost.** This is due to how L1-L2 messaging works on Arbitrum: messages that are sent through the bridge are "retry-able tickets" that must be executed within 7 days, and the initial execution might need a retry if there are spikes in the gas price on Arbitrum. ![Start the transfer to L2](/img/startTransferL2.png) -## Step 2: Waiting for the subgraph to get to L2 +## Step 2: Waiting for the Subgraph to get to L2 -After you start the transfer, the message that sends your L1 subgraph to L2 must propagate through the Arbitrum bridge. This takes approximately 20 minutes (the bridge waits for the mainnet block containing the transaction to be "safe" from potential chain reorgs). +After you start the transfer, the message that sends your L1 Subgraph to L2 must propagate through the Arbitrum bridge. This takes approximately 20 minutes (the bridge waits for the mainnet block containing the transaction to be "safe" from potential chain reorgs). Once this wait time is over, Arbitrum will attempt to auto-execute the transfer on the L2 contracts. @@ -80,7 +80,7 @@ Once this wait time is over, Arbitrum will attempt to auto-execute the transfer ## Step 3: Confirming the transfer -In most cases, this step will auto-execute as the L2 gas included in step 1 should be sufficient to execute the transaction that receives the subgraph on the Arbitrum contracts. In some cases, however, it is possible that a spike in gas prices on Arbitrum causes this auto-execution to fail. In this case, the "ticket" that sends your subgraph to L2 will be pending and require a retry within 7 days. +In most cases, this step will auto-execute as the L2 gas included in step 1 should be sufficient to execute the transaction that receives the Subgraph on the Arbitrum contracts. In some cases, however, it is possible that a spike in gas prices on Arbitrum causes this auto-execution to fail. In this case, the "ticket" that sends your Subgraph to L2 will be pending and require a retry within 7 days. If this is the case, you will need to connect using an L2 wallet that has some ETH on Arbitrum, switch your wallet network to Arbitrum, and click on "Confirm Transfer" to retry the transaction. @@ -88,33 +88,33 @@ If this is the case, you will need to connect using an L2 wallet that has some E ## Step 4: Finishing the transfer on L2 -At this point, your subgraph and GRT have been received on Arbitrum, but the subgraph is not published yet. You will need to connect using the L2 wallet that you chose as the receiving wallet, switch your wallet network to Arbitrum, and click "Publish Subgraph." +At this point, your Subgraph and GRT have been received on Arbitrum, but the Subgraph is not published yet. You will need to connect using the L2 wallet that you chose as the receiving wallet, switch your wallet network to Arbitrum, and click "Publish Subgraph." -![Publish the subgraph](/img/publishSubgraphL2TransferTools.png) +![Publish the Subgraph](/img/publishSubgraphL2TransferTools.png) -![Wait for the subgraph to be published](/img/waitForSubgraphToPublishL2TransferTools.png) +![Wait for the Subgraph to be published](/img/waitForSubgraphToPublishL2TransferTools.png) -This will publish the subgraph so that Indexers that are operating on Arbitrum can start serving it. It will also mint curation signal using the GRT that were transferred from L1. +This will publish the Subgraph so that Indexers that are operating on Arbitrum can start serving it. It will also mint curation signal using the GRT that were transferred from L1. ## Step 5: Updating the query URL -Your subgraph has been successfully transferred to Arbitrum! To query the subgraph, the new URL will be : +Your Subgraph has been successfully transferred to Arbitrum! To query the Subgraph, the new URL will be : `https://arbitrum-gateway.thegraph.com/api/[api-key]/subgraphs/id/[l2-subgraph-id]` -Note that the subgraph ID on Arbitrum will be a different than the one you had on mainnet, but you can always find it on Explorer or Studio. As mentioned above (see "Understanding what happens with signal, your L1 subgraph and query URLs") the old L1 URL will be supported for a short while, but you should switch your queries to the new address as soon as the subgraph has been synced on L2. +Note that the Subgraph ID on Arbitrum will be a different than the one you had on mainnet, but you can always find it on Explorer or Studio. As mentioned above (see "Understanding what happens with signal, your L1 Subgraph and query URLs") the old L1 URL will be supported for a short while, but you should switch your queries to the new address as soon as the Subgraph has been synced on L2. ## How to transfer your curation to Arbitrum (L2) -## Understanding what happens to curation on subgraph transfers to L2 +## Understanding what happens to curation on Subgraph transfers to L2 -When the owner of a subgraph transfers a subgraph to Arbitrum, all of the subgraph's signal is converted to GRT at the same time. This applies to "auto-migrated" signal, i.e. signal that is not specific to a subgraph version or deployment but that follows the latest version of a subgraph. +When the owner of a Subgraph transfers a Subgraph to Arbitrum, all of the Subgraph's signal is converted to GRT at the same time. This applies to "auto-migrated" signal, i.e. signal that is not specific to a Subgraph version or deployment but that follows the latest version of a Subgraph. -This conversion from signal to GRT is the same as what would happen if the subgraph owner deprecated the subgraph in L1. When the subgraph is deprecated or transferred, all curation signal is "burned" simultaneously (using the curation bonding curve) and the resulting GRT is held by the GNS smart contract (that is the contract that handles subgraph upgrades and auto-migrated signal). Each Curator on that subgraph therefore has a claim to that GRT proportional to the amount of shares they had for the subgraph. +This conversion from signal to GRT is the same as what would happen if the Subgraph owner deprecated the Subgraph in L1. When the Subgraph is deprecated or transferred, all curation signal is "burned" simultaneously (using the curation bonding curve) and the resulting GRT is held by the GNS smart contract (that is the contract that handles Subgraph upgrades and auto-migrated signal). Each Curator on that Subgraph therefore has a claim to that GRT proportional to the amount of shares they had for the Subgraph. -A fraction of these GRT corresponding to the subgraph owner is sent to L2 together with the subgraph. +A fraction of these GRT corresponding to the Subgraph owner is sent to L2 together with the Subgraph. -At this point, the curated GRT will not accrue any more query fees, so Curators can choose to withdraw their GRT or transfer it to the same subgraph on L2, where it can be used to mint new curation signal. There is no rush to do this as the GRT can be help indefinitely and everybody gets an amount proportional to their shares, irrespective of when they do it. +At this point, the curated GRT will not accrue any more query fees, so Curators can choose to withdraw their GRT or transfer it to the same Subgraph on L2, where it can be used to mint new curation signal. There is no rush to do this as the GRT can be help indefinitely and everybody gets an amount proportional to their shares, irrespective of when they do it. ## Choosing your L2 wallet @@ -130,9 +130,9 @@ If you're using a smart contract wallet, like a multisig (e.g. a Safe), then cho Before starting the transfer, you must decide which address will own the curation on L2 (see "Choosing your L2 wallet" above), and it is recommended having some ETH for gas already bridged on Arbitrum in case you need to retry the execution of the message on L2. You can buy ETH on some exchanges and withdraw it directly to Arbitrum, or you can use the Arbitrum bridge to send ETH from a mainnet wallet to L2: [bridge.arbitrum.io](http://bridge.arbitrum.io) - since gas fees on Arbitrum are so low, you should only need a small amount, e.g. 0.01 ETH will probably be more than enough. -If a subgraph that you curate to has been transferred to L2, you will see a message on Explorer telling you that you're curating to a transferred subgraph. +If a Subgraph that you curate to has been transferred to L2, you will see a message on Explorer telling you that you're curating to a transferred Subgraph. -When looking at the subgraph page, you can choose to withdraw or transfer the curation. Clicking on "Transfer Signal to Arbitrum" will open the transfer tool. +When looking at the Subgraph page, you can choose to withdraw or transfer the curation. Clicking on "Transfer Signal to Arbitrum" will open the transfer tool. ![Transfer signal](/img/transferSignalL2TransferTools.png) @@ -162,4 +162,4 @@ If this is the case, you will need to connect using an L2 wallet that has some E ## Withdrawing your curation on L1 -If you prefer not to send your GRT to L2, or you'd rather bridge the GRT manually, you can withdraw your curated GRT on L1. On the banner on the subgraph page, choose "Withdraw Signal" and confirm the transaction; the GRT will be sent to your Curator address. +If you prefer not to send your GRT to L2, or you'd rather bridge the GRT manually, you can withdraw your curated GRT on L1. On the banner on the Subgraph page, choose "Withdraw Signal" and confirm the transaction; the GRT will be sent to your Curator address. From eb8f9f0e452286c66c29155e43b491ede341b255 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:13:43 -0500 Subject: [PATCH 0317/1789] New translations l2-transfer-tools-guide.mdx (French) --- .../arbitrum/l2-transfer-tools-guide.mdx | 76 +++++++++---------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/website/src/pages/fr/archived/arbitrum/l2-transfer-tools-guide.mdx b/website/src/pages/fr/archived/arbitrum/l2-transfer-tools-guide.mdx index 6d59607442b4..d6014f6d5dac 100644 --- a/website/src/pages/fr/archived/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/src/pages/fr/archived/arbitrum/l2-transfer-tools-guide.mdx @@ -6,53 +6,53 @@ The Graph a facilité le passage à L2 sur Arbitrum One. Pour chaque participant Some frequent questions about these tools are answered in the [L2 Transfer Tools FAQ](/archived/arbitrum/l2-transfer-tools-faq/). The FAQs contain in-depth explanations of how to use the tools, how they work, and things to keep in mind when using them. -## Comment transférer votre subgraph vers Arbitrum (L2) +## How to transfer your Subgraph to Arbitrum (L2) -## Avantages du transfert de vos subgraphs +## Benefits of transferring your Subgraphs La communauté et les développeurs du Graph se sont préparés (https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) à passer à Arbitrum au cours de l'année écoulée. Arbitrum, une blockchain de couche 2 ou "L2", hérite de la sécurité d'Ethereum mais offre des frais de gaz considérablement réduits. -Lorsque vous publiez ou mettez à niveau votre subgraph sur The Graph Network, vous interagissez avec des contrats intelligents sur le protocole, ce qui nécessite de payer le gaz avec ETH. En déplaçant vos subgraphs vers Arbitrum, toute mise à jour future de votre subgraph nécessitera des frais de gaz bien inférieurs. Les frais inférieurs et le fait que les courbes de liaison de curation sur L2 soient plates facilitent également la curation pour les autres conservateurs sur votre subgraph, augmentant ainsi les récompenses des indexeurs sur votre subgraph. Cet environnement moins coûteux rend également moins cher pour les indexeurs l'indexation et la diffusion de votre subgraph. Les récompenses d'indexation augmenteront sur Arbitrum et diminueront sur le réseau principal Ethereum au cours des prochains mois, de sorte que de plus en plus d'indexeurs transféreront leur participation et établiront leurs opérations sur L2. +When you publish or upgrade your Subgraph to The Graph Network, you're interacting with smart contracts on the protocol and this requires paying for gas using ETH. By moving your Subgraphs to Arbitrum, any future updates to your Subgraph will require much lower gas fees. The lower fees, and the fact that curation bonding curves on L2 are flat, also make it easier for other Curators to curate on your Subgraph, increasing the rewards for Indexers on your Subgraph. This lower-cost environment also makes it cheaper for Indexers to index and serve your Subgraph. Indexing rewards will be increasing on Arbitrum and decreasing on Ethereum mainnet over the coming months, so more and more Indexers will be transferring their stake and setting up their operations on L2. -## Comprendre ce qui se passe avec le signal, votre subgraph L1 et les URL de requête +## Understanding what happens with signal, your L1 Subgraph and query URLs -Le transfert d'un subgraph vers Arbitrum utilise le pont GRT sur Arbitrum, qui à son tour utilise le pont natif d'Arbitrum, pour envoyer le subgraph vers L2. Le 'transfert' va déprécier le subgraph sur le mainnet et envoyer les informations pour recréer le subgraph sur L2 en utilisant le pont. Il inclura également les GRT signalés par le propriétaire du subgraph, qui doivent être supérieurs à zéro pour que le pont accepte le transfert. +Transferring a Subgraph to Arbitrum uses the Arbitrum GRT bridge, which in turn uses the native Arbitrum bridge, to send the Subgraph to L2. The "transfer" will deprecate the Subgraph on mainnet and send the information to re-create the Subgraph on L2 using the bridge. It will also include the Subgraph owner's signaled GRT, which must be more than zero for the bridge to accept the transfer. -Lorsque vous choisissez de transférer le subgraph, cela convertira tous les signaux de curation du subgraph en GRT. Cela équivaut à "déprécier" le subgraph sur le mainnet. Les GRT correspondant à votre curation seront envoyés à L2 avec le subgraph, où ils seront utilisés pour monnayer des signaux en votre nom. +When you choose to transfer the Subgraph, this will convert all of the Subgraph's curation signal to GRT. This is equivalent to "deprecating" the Subgraph on mainnet. The GRT corresponding to your curation will be sent to L2 together with the Subgraph, where they will be used to mint signal on your behalf. -Les autres curateurs peuvent choisir de retirer leur fraction de GRT ou de la transférer également à L2 pour le signal de monnayage sur le même subgraph. Si un propriétaire de subgraph ne transfère pas son subgraph à L2 et le déprécie manuellement via un appel de contrat, les curateurs en seront informés et pourront retirer leur curation. +Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same Subgraph. If a Subgraph owner does not transfer their Subgraph to L2 and manually deprecates it via a contract call, then Curators will be notified and will be able to withdraw their curation. -Dès que le subgraph est transféré, puisque toute la curation est convertie en GRT, les indexeurs ne recevront plus de récompenses pour l'indexation du subgraph. Cependant, certains indexeurs 1) continueront à servir les subgraphs transférés pendant 24 heures et 2) commenceront immédiatement à indexer le subgraph sur L2. Comme ces indexeurs ont déjà indexé le subgraph, il ne devrait pas être nécessaire d'attendre la synchronisation du subgraph, et il sera possible d'interroger le subgraph L2 presque immédiatement. +As soon as the Subgraph is transferred, since all curation is converted to GRT, Indexers will no longer receive rewards for indexing the Subgraph. However, there will be Indexers that will 1) keep serving transferred Subgraphs for 24 hours, and 2) immediately start indexing the Subgraph on L2. Since these Indexers already have the Subgraph indexed, there should be no need to wait for the Subgraph to sync, and it will be possible to query the L2 Subgraph almost immediately. -Les requêtes vers le subgraph L2 devront être effectuées vers une URL différente (sur `arbitrum-gateway.thegraph.com`), mais l'URL L1 continuera à fonctionner pendant au moins 48 heures. Après cela, la passerelle L1 transmettra les requêtes à la passerelle L2 (pendant un certain temps), mais cela augmentera la latence. Il est donc recommandé de basculer toutes vos requêtes vers la nouvelle URL dès que possible. +Queries to the L2 Subgraph will need to be done to a different URL (on `arbitrum-gateway.thegraph.com`), but the L1 URL will continue working for at least 48 hours. After that, the L1 gateway will forward queries to the L2 gateway (for some time), but this will add latency so it is recommended to switch all your queries to the new URL as soon as possible. ## Choisir son portefeuille L2 -Lorsque vous avez publié votre subgraph sur le mainnet, vous avez utilisé un portefeuille connecté pour créer le subgraph, et ce portefeuille possède le NFT qui représente ce subgraph et vous permet de publier des mises à jour. +When you published your Subgraph on mainnet, you used a connected wallet to create the Subgraph, and this wallet owns the NFT that represents this Subgraph and allows you to publish updates. -Lors du transfert du subgraph vers Arbitrum, vous pouvez choisir un autre portefeuille qui possédera ce subgraph NFT sur L2. +When transferring the Subgraph to Arbitrum, you can choose a different wallet that will own this Subgraph NFT on L2. Si vous utilisez un portefeuille "normal" comme MetaMask (un Externally Owned Account ou EOA, c'est-à-dire un portefeuille qui n'est pas un smart contract), cette étape est facultative et il est recommandé de conserver la même adresse de propriétaire que dans L1.portefeuille. -Si vous utilisez un portefeuille de smart contrat, comme un multisig (par exemple un Safe), alors choisir une adresse de portefeuille L2 différente est obligatoire, car il est très probable que ce compte n'existe que sur le mainnet et vous ne pourrez pas faire de transactions sur Arbitrum en utilisant ce portefeuille. Si vous souhaitez continuer à utiliser un portefeuille de contrat intelligent ou un multisig, créez un nouveau portefeuille sur Arbitrum et utilisez son adresse comme propriétaire L2 de votre subgraph. +If you're using a smart contract wallet, like a multisig (e.g. a Safe), then choosing a different L2 wallet address is mandatory, as it is most likely that this account only exists on mainnet and you will not be able to make transactions on Arbitrum using this wallet. If you want to keep using a smart contract wallet or multisig, create a new wallet on Arbitrum and use its address as the L2 owner of your Subgraph. -**Il est très important d'utiliser une adresse de portefeuille que vous contrôlez, et qui peut effectuer des transactions sur Arbitrum. Dans le cas contraire, le subgraph sera perdu et ne pourra pas être récupéré** +**It is very important to use a wallet address that you control, and that can make transactions on Arbitrum. Otherwise, the Subgraph will be lost and cannot be recovered.** ## Préparer le transfert : faire le pont avec quelques EPF -Le transfert du subgraph implique l'envoi d'une transaction à travers le pont, puis l'exécution d'une autre transaction sur Arbitrum. La première transaction utilise de l'ETH sur le mainnet, et inclut de l'ETH pour payer le gaz lorsque le message est reçu sur L2. Cependant, si ce gaz est insuffisant, vous devrez réessayer la transaction et payer le gaz directement sur L2 (c'est "l'étape 3 : Confirmer le transfert" ci-dessous). Cette étape **doit être exécutée dans les 7 jours suivant le début du transfert**. De plus, la deuxième transaction ("Etape 4 : Terminer le transfert sur L2") se fera directement sur Arbitrum. Pour ces raisons, vous aurez besoin de quelques ETH sur un portefeuille Arbitrum. Si vous utilisez un compte multisig ou smart contract, l'ETH devra être dans le portefeuille régulier (EOA) que vous utilisez pour exécuter les transactions, et non sur le portefeuille multisig lui-même. +Transferring the Subgraph involves sending a transaction through the bridge, and then executing another transaction on Arbitrum. The first transaction uses ETH on mainnet, and includes some ETH to pay for gas when the message is received on L2. However, if this gas is insufficient, you will have to retry the transaction and pay for the gas directly on L2 (this is "Step 3: Confirming the transfer" below). This step **must be executed within 7 days of starting the transfer**. Moreover, the second transaction ("Step 4: Finishing the transfer on L2") will be done directly on Arbitrum. For these reasons, you will need some ETH on an Arbitrum wallet. If you're using a multisig or smart contract account, the ETH will need to be in the regular (EOA) wallet that you are using to execute the transactions, not on the multisig wallet itself. Vous pouvez acheter de l'ETH sur certains échanges et le retirer directement sur Arbitrum, ou vous pouvez utiliser le pont Arbitrum pour envoyer de l'ETH d'un portefeuille du mainnet vers L2 : [bridge.arbitrum.io](http://bridge.arbitrum.io). Étant donné que les frais de gaz sur Arbitrum sont moins élevés, vous ne devriez avoir besoin que d'une petite quantité. Il est recommandé de commencer par un seuil bas (0,par exemple 01 ETH) pour que votre transaction soit approuvée. -## Trouver l'outil de transfert de subgraph +## Finding the Subgraph Transfer Tool -Vous pouvez trouver l'outil de transfert L2 lorsque vous consultez la page de votre subgraph dans le Subgraph Studio : +You can find the L2 Transfer Tool when you're looking at your Subgraph's page on Subgraph Studio: ![outil de transfert](/img/L2-transfer-tool1.png) -Elle est également disponible sur Explorer si vous êtes connecté au portefeuille qui possède un subgraph et sur la page de ce subgraph sur Explorer : +It is also available on Explorer if you're connected with the wallet that owns a Subgraph and on that Subgraph's page on Explorer: ![Transfert vers L2](/img/transferToL2.png) @@ -60,19 +60,19 @@ En cliquant sur le bouton Transférer vers L2, vous ouvrirez l'outil de transfer ## Étape 1 : Démarrer le transfert -Avant de commencer le transfert, vous devez décider quelle adresse sera propriétaire du subgraph sur L2 (voir "Choisir votre portefeuille L2" ci-dessus), et il est fortement recommandé d'avoir quelques ETH pour le gaz déjà bridgé sur Arbitrum (voir "Préparer le transfert : brider quelques ETH" ci-dessus). +Before starting the transfer, you must decide which address will own the Subgraph on L2 (see "Choosing your L2 wallet" above), and it is strongly recommend having some ETH for gas already bridged on Arbitrum (see "Preparing for the transfer: bridging some ETH" above). -Veuillez également noter que le transfert du subgraph nécessite d'avoir un montant de signal non nul sur le subgraph avec le même compte qui possède le subgraph ; si vous n'avez pas signalé sur le subgraph, vous devrez ajouter un peu de curation (ajouter un petit montant comme 1 GRT suffirait). +Also please note transferring the Subgraph requires having a nonzero amount of signal on the Subgraph with the same account that owns the Subgraph; if you haven't signaled on the Subgraph you will have to add a bit of curation (adding a small amount like 1 GRT would suffice). -Après avoir ouvert l'outil de transfert, vous pourrez saisir l'adresse du portefeuille L2 dans le champ "Adresse du portefeuille destinataire" - **assurez-vous que vous avez saisi la bonne adresse ici**. En cliquant sur Transférer le subgraph, vous serez invité à exécuter la transaction sur votre portefeuille (notez qu'une certaine valeur ETH est incluse pour payer le gaz L2) ; cela lancera le transfert et dépréciera votre subgraph L1 (voir "Comprendre ce qui se passe avec le signal, votre subgraph L1 et les URL de requête" ci-dessus pour plus de détails sur ce qui se passe en coulisses). +After opening the Transfer Tool, you will be able to input the L2 wallet address into the "Receiving wallet address" field - **make sure you've entered the correct address here**. Clicking on Transfer Subgraph will prompt you to execute the transaction on your wallet (note some ETH value is included to pay for L2 gas); this will initiate the transfer and deprecate your L1 Subgraph (see "Understanding what happens with signal, your L1 Subgraph and query URLs" above for more details on what goes on behind the scenes). -Si vous exécutez cette étape, **assurez-vous de continuer jusqu'à terminer l'étape 3 en moins de 7 jours, sinon le subgraph et votre signal GRT seront perdus.** Cela est dû au fonctionnement de la messagerie L1-L2 sur Arbitrum : les messages qui sont envoyés via le pont sont des « tickets réessayables » qui doivent être exécutés dans les 7 jours, et l'exécution initiale peut nécessiter une nouvelle tentative s'il y a des pics dans le prix du gaz sur Arbitrum. +If you execute this step, **make sure you proceed until completing step 3 in less than 7 days, or the Subgraph and your signal GRT will be lost.** This is due to how L1-L2 messaging works on Arbitrum: messages that are sent through the bridge are "retry-able tickets" that must be executed within 7 days, and the initial execution might need a retry if there are spikes in the gas price on Arbitrum. ![Démarrer le transfert vers la L2](/img/startTransferL2.png) -## Étape 2 : Attendre que le subgraphe atteigne L2 +## Step 2: Waiting for the Subgraph to get to L2 -Après avoir lancé le transfert, le message qui envoie votre subgraph de L1 à L2 doit se propager à travers le pont Arbitrum. Cela prend environ 20 minutes (le pont attend que le bloc du réseau principal contenant la transaction soit "sûr" face aux potentielles réorganisations de la chaîne). +After you start the transfer, the message that sends your L1 Subgraph to L2 must propagate through the Arbitrum bridge. This takes approximately 20 minutes (the bridge waits for the mainnet block containing the transaction to be "safe" from potential chain reorgs). Une fois ce temps d'attente terminé, le réseau Arbitrum tentera d'exécuter automatiquement le transfert sur les contrats L2. @@ -80,7 +80,7 @@ Une fois ce temps d'attente terminé, le réseau Arbitrum tentera d'exécuter au ## Étape 3 : Confirmer le transfert -Dans la plupart des cas, cette étape s'exécutera automatiquement car le gaz L2 inclus dans l'étape 1 devrait être suffisant pour exécuter la transaction qui reçoit le subgraph sur les contrats Arbitrum. Cependant, dans certains cas, il est possible qu'une hausse soudaine des prix du gaz sur Arbitrum entraîne l'échec de cette exécution automatique. Dans ce cas, le "ticket" qui envoie votre subgraphe vers L2 sera en attente et nécessitera une nouvelle tentative dans les 7 jours. +In most cases, this step will auto-execute as the L2 gas included in step 1 should be sufficient to execute the transaction that receives the Subgraph on the Arbitrum contracts. In some cases, however, it is possible that a spike in gas prices on Arbitrum causes this auto-execution to fail. In this case, the "ticket" that sends your Subgraph to L2 will be pending and require a retry within 7 days. Si c'est le cas, vous devrez vous connecter en utilisant un portefeuille L2 qui contient de l'ETH sur Arbitrum, changer le réseau de votre portefeuille vers Arbitrum, et cliquer sur "Confirmer le transfert" pour retenter la transaction. @@ -88,33 +88,33 @@ Si c'est le cas, vous devrez vous connecter en utilisant un portefeuille L2 qui ## Étape 4 : Terminer le transfert sur L2 -À ce stade, votre subgraph et vos GRT ont été reçus sur Arbitrum, mais le subgraph n'est pas encore publié. Vous devrez vous connecter à l'aide du portefeuille L2 que vous avez choisi comme portefeuille de réception, basculer votre réseau de portefeuille sur Arbitrum et cliquer sur « Publier le subgraph.» +At this point, your Subgraph and GRT have been received on Arbitrum, but the Subgraph is not published yet. You will need to connect using the L2 wallet that you chose as the receiving wallet, switch your wallet network to Arbitrum, and click "Publish Subgraph." -![Publier le subgraph](/img/publishSubgraphL2TransferTools.png) +![Publish the Subgraph](/img/publishSubgraphL2TransferTools.png) -![Attendez que le subgraph soit publié](/img/waitForSubgraphToPublishL2TransferTools.png) +![Wait for the Subgraph to be published](/img/waitForSubgraphToPublishL2TransferTools.png) -Cela permettra de publier le subgraph afin que les indexeurs opérant sur Arbitrum puissent commencer à le servir. Il va également modifier le signal de curation en utilisant les GRT qui ont été transférés de L1. +This will publish the Subgraph so that Indexers that are operating on Arbitrum can start serving it. It will also mint curation signal using the GRT that were transferred from L1. ## Étape 5 : Mise à jour de l'URL de la requête -Votre subgraph a été transféré avec succès vers Arbitrum ! Pour interroger le subgraph, la nouvelle URL sera : +Your Subgraph has been successfully transferred to Arbitrum! To query the Subgraph, the new URL will be : https://arbitrum-gateway.thegraph.com/api/[api-key]/subgraphs/id/[l2-subgraph-id]\` -Notez que l'ID du subgraph sur Arbitrum sera différent de celui que vous aviez sur le mainnet, mais vous pouvez toujours le trouver sur Explorer ou Studio. Comme mentionné ci-dessus (voir "Comprendre ce qui se passe avec le signal, votre subgraph L1 et les URL de requête"), l'ancienne URL L1 sera prise en charge pendant une courte période, mais vous devez basculer vos requêtes vers la nouvelle adresse dès que le subgraph aura été synchronisé. sur L2. +Note that the Subgraph ID on Arbitrum will be a different than the one you had on mainnet, but you can always find it on Explorer or Studio. As mentioned above (see "Understanding what happens with signal, your L1 Subgraph and query URLs") the old L1 URL will be supported for a short while, but you should switch your queries to the new address as soon as the Subgraph has been synced on L2. ## Comment transférer votre curation vers Arbitrum (L2) -## Comprendre ce qui arrive à la curation lors des transferts de subgraphs vers L2 +## Understanding what happens to curation on Subgraph transfers to L2 -Lorsque le propriétaire d'un subgraph transfère un subgraph vers Arbitrum, tout le signal du subgraph est converti en GRT en même temps. Cela s'applique au signal "auto-migré", c'est-à-dire au signal qui n'est pas spécifique à une version de subgraph ou à un déploiement, mais qui suit la dernière version d'un subgraph. +When the owner of a Subgraph transfers a Subgraph to Arbitrum, all of the Subgraph's signal is converted to GRT at the same time. This applies to "auto-migrated" signal, i.e. signal that is not specific to a Subgraph version or deployment but that follows the latest version of a Subgraph. -Cette conversion du signal en GRT est identique à ce qui se produirait si le propriétaire du subgraph dépréciait le subgraph en L1. Lorsque le subgraph est déprécié ou transféré, tout le signal de curation est "brûlé" simultanément (en utilisant la courbe de liaison de curation) et le GRT résultant est détenu par le contrat intelligent GNS (c'est-à-dire le contrat qui gère les mises à niveau des subgraphs et le signal auto-migré). Chaque Curateur de ce subgraph a donc droit à ce GRT de manière proportionnelle à la quantité de parts qu'il détenait pour le subgraph. +This conversion from signal to GRT is the same as what would happen if the Subgraph owner deprecated the Subgraph in L1. When the Subgraph is deprecated or transferred, all curation signal is "burned" simultaneously (using the curation bonding curve) and the resulting GRT is held by the GNS smart contract (that is the contract that handles Subgraph upgrades and auto-migrated signal). Each Curator on that Subgraph therefore has a claim to that GRT proportional to the amount of shares they had for the Subgraph. -Une fraction de ces GRT correspondant au propriétaire du subgraph est envoyée à L2 avec le subgraph. +A fraction of these GRT corresponding to the Subgraph owner is sent to L2 together with the Subgraph. -À ce stade, le GRT organisé n'accumulera plus de frais de requête, les conservateurs peuvent donc choisir de retirer leur GRT ou de le transférer vers le même subgraph sur L2, où il pourra être utilisé pour créer un nouveau signal de curation. Il n'y a pas d'urgence à le faire car le GRT peut être utile indéfiniment et chacun reçoit un montant proportionnel à ses actions, quel que soit le moment où il le fait. +At this point, the curated GRT will not accrue any more query fees, so Curators can choose to withdraw their GRT or transfer it to the same Subgraph on L2, where it can be used to mint new curation signal. There is no rush to do this as the GRT can be help indefinitely and everybody gets an amount proportional to their shares, irrespective of when they do it. ## Choisir son portefeuille L2 @@ -130,9 +130,9 @@ Si vous utilisez un portefeuille de contrat intelligent, comme un multisig (par Avant de commencer le transfert, vous devez décider quelle adresse détiendra la curation sur L2 (voir "Choisir votre portefeuille L2" ci-dessus), et il est recommandé d'avoir des ETH pour le gaz déjà pontés sur Arbitrum au cas où vous auriez besoin de réessayer l'exécution du message sur L2. Vous pouvez acheter de l'ETH sur certaines bourses et le retirer directement sur Arbitrum, ou vous pouvez utiliser le pont Arbitrum pour envoyer de l'ETH depuis un portefeuille du mainnet vers L2 : [bridge.arbitrum.io](http://bridge.arbitrum.io) - étant donné que les frais de gaz sur Arbitrum sont si bas, vous ne devriez avoir besoin que d'un petit montant, par ex. 0,01 ETH sera probablement plus que suffisant. -Si un subgraph que vous organisez a été transféré vers L2, vous verrez un message sur l'Explorateur vous indiquant que vous organisez un subgraph transféré. +If a Subgraph that you curate to has been transferred to L2, you will see a message on Explorer telling you that you're curating to a transferred Subgraph. -En consultant la page du subgraph, vous pouvez choisir de retirer ou de transférer la curation. En cliquant sur "Transférer le signal vers Arbitrum", vous ouvrirez l'outil de transfert. +When looking at the Subgraph page, you can choose to withdraw or transfer the curation. Clicking on "Transfer Signal to Arbitrum" will open the transfer tool. Signal de transfert](/img/transferSignalL2TransferTools.png) @@ -162,4 +162,4 @@ Si c'est le cas, vous devrez vous connecter en utilisant un portefeuille L2 qui ## Retrait de la curation sur L1 -Si vous préférez ne pas envoyer votre GRT vers L2, ou si vous préférez combler le GRT manuellement, vous pouvez retirer votre GRT organisé sur L1. Sur la bannière de la page du subgraph, choisissez « Retirer le signal » et confirmez la transaction ; le GRT sera envoyé à votre adresse de conservateur. +If you prefer not to send your GRT to L2, or you'd rather bridge the GRT manually, you can withdraw your curated GRT on L1. On the banner on the Subgraph page, choose "Withdraw Signal" and confirm the transaction; the GRT will be sent to your Curator address. From cc3e16da2cb20cec6c21864030d78617d6c194b2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:13:45 -0500 Subject: [PATCH 0318/1789] New translations l2-transfer-tools-guide.mdx (Spanish) --- .../arbitrum/l2-transfer-tools-guide.mdx | 76 +++++++++---------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/website/src/pages/es/archived/arbitrum/l2-transfer-tools-guide.mdx b/website/src/pages/es/archived/arbitrum/l2-transfer-tools-guide.mdx index 4ec61fdc3a7c..3d0d90acb9a9 100644 --- a/website/src/pages/es/archived/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/src/pages/es/archived/arbitrum/l2-transfer-tools-guide.mdx @@ -6,53 +6,53 @@ The Graph ha facilitado la migración a L2 en Arbitrum One. Para cada participan Some frequent questions about these tools are answered in the [L2 Transfer Tools FAQ](/archived/arbitrum/l2-transfer-tools-faq/). The FAQs contain in-depth explanations of how to use the tools, how they work, and things to keep in mind when using them. -## Cómo transferir tu subgrafo a Arbitrum (L2) +## How to transfer your Subgraph to Arbitrum (L2) -## Beneficios de transferir tus subgrafos +## Benefits of transferring your Subgraphs La comunidad de The Graph y los core devs se han [estado preparando] (https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) para migrar a Arbitrum durante el último año. Arbitrum, una blockchain de capa 2 o "L2", hereda la seguridad de Ethereum pero ofrece tarifas de gas considerablemente más bajas. -Cuando publicas o actualizas tus subgrafos en The Graph Network, estás interactuando con contratos inteligentes en el protocolo, lo cual requiere pagar por gas utilizando ETH. Al mover tus subgrafos a Arbitrum, cualquier actualización futura de tu subgrafo requerirá tarifas de gas mucho más bajas. Las tarifas más bajas, y el hecho de que las bonding curves de curación en L2 son planas, también facilitan que otros Curadores realicen curación en tu subgrafo, aumentando las recompensas para los Indexadores en tu subgrafo. Este contexto con tarifas más económicas también hace que sea más barato para los Indexadores indexar y servir tu subgrafo. Las recompensas por indexación aumentarán en Arbitrum y disminuirán en Ethereum mainnet en los próximos meses, por lo que cada vez más Indexadores transferirán su stake y establecerán sus operaciones en L2. +When you publish or upgrade your Subgraph to The Graph Network, you're interacting with smart contracts on the protocol and this requires paying for gas using ETH. By moving your Subgraphs to Arbitrum, any future updates to your Subgraph will require much lower gas fees. The lower fees, and the fact that curation bonding curves on L2 are flat, also make it easier for other Curators to curate on your Subgraph, increasing the rewards for Indexers on your Subgraph. This lower-cost environment also makes it cheaper for Indexers to index and serve your Subgraph. Indexing rewards will be increasing on Arbitrum and decreasing on Ethereum mainnet over the coming months, so more and more Indexers will be transferring their stake and setting up their operations on L2. -## Comprensión de lo que sucede con la señal, tu subgrafo de L1 y las URL de consulta +## Understanding what happens with signal, your L1 Subgraph and query URLs -Transferir un subgrafo a Arbitrum utiliza el puente de GRT de Arbitrum, que a su vez utiliza el puente nativo de Arbitrum para enviar el subgrafo a L2. La "transferencia" deprecará el subgrafo en mainnet y enviará la información para recrear el subgrafo en L2 utilizando el puente. También incluirá el GRT señalizado del propietario del subgrafo, el cual debe ser mayor que cero para que el puente acepte la transferencia. +Transferring a Subgraph to Arbitrum uses the Arbitrum GRT bridge, which in turn uses the native Arbitrum bridge, to send the Subgraph to L2. The "transfer" will deprecate the Subgraph on mainnet and send the information to re-create the Subgraph on L2 using the bridge. It will also include the Subgraph owner's signaled GRT, which must be more than zero for the bridge to accept the transfer. -Cuando eliges transferir el subgrafo, esto convertirá toda la señal de curación del subgrafo a GRT. Esto equivale a "deprecar" el subgrafo en mainnet. El GRT correspondiente a tu curación se enviará a L2 junto con el subgrafo, donde se utilizarán para emitir señal en tu nombre. +When you choose to transfer the Subgraph, this will convert all of the Subgraph's curation signal to GRT. This is equivalent to "deprecating" the Subgraph on mainnet. The GRT corresponding to your curation will be sent to L2 together with the Subgraph, where they will be used to mint signal on your behalf. -Otros Curadores pueden elegir si retirar su fracción de GRT o también transferirlo a L2 para emitir señal en el mismo subgrafo. Si un propietario de subgrafo no transfiere su subgrafo a L2 y lo depreca manualmente a través de una llamada de contrato, entonces los Curadores serán notificados y podrán retirar su curación. +Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same Subgraph. If a Subgraph owner does not transfer their Subgraph to L2 and manually deprecates it via a contract call, then Curators will be notified and will be able to withdraw their curation. -Tan pronto como se transfiera el subgrafo, dado que toda la curación se convierte en GRT, los Indexadores ya no recibirán recompensas por indexar el subgrafo. Sin embargo, habrá Indexadores que 1) continuarán sirviendo los subgrafos transferidos durante 24 horas y 2) comenzarán inmediatamente a indexar el subgrafo en L2. Dado que estos Indexadores ya tienen el subgrafo indexado, no será necesario esperar a que se sincronice el subgrafo y será posible realizar consultas al subgrafo en L2 casi de inmediato. +As soon as the Subgraph is transferred, since all curation is converted to GRT, Indexers will no longer receive rewards for indexing the Subgraph. However, there will be Indexers that will 1) keep serving transferred Subgraphs for 24 hours, and 2) immediately start indexing the Subgraph on L2. Since these Indexers already have the Subgraph indexed, there should be no need to wait for the Subgraph to sync, and it will be possible to query the L2 Subgraph almost immediately. -Las consultas al subgrafo en L2 deberán realizarse a una URL diferente (en `arbitrum-gateway.thegraph.com`), pero la URL de L1 seguirá funcionando durante al menos 48 horas. Después de eso, la gateway de L1 redirigirá las consultas a la gateway de L2 (durante algún tiempo), pero esto agregará latencia, por lo que se recomienda cambiar todas las consultas a la nueva URL lo antes posible. +Queries to the L2 Subgraph will need to be done to a different URL (on `arbitrum-gateway.thegraph.com`), but the L1 URL will continue working for at least 48 hours. After that, the L1 gateway will forward queries to the L2 gateway (for some time), but this will add latency so it is recommended to switch all your queries to the new URL as soon as possible. ## Elección de tu wallet en L2 -Cuando publicaste tu subgrafo en mainnet, utilizaste una wallet conectada para crear el subgrafo, y esta wallet es la propietaria del NFT que representa este subgrafo y te permite publicar actualizaciones. +When you published your Subgraph on mainnet, you used a connected wallet to create the Subgraph, and this wallet owns the NFT that represents this Subgraph and allows you to publish updates. -Al transferir el subgrafo a Arbitrum, puedes elegir una wallet diferente que será la propietaria del NFT de este subgrafo en L2. +When transferring the Subgraph to Arbitrum, you can choose a different wallet that will own this Subgraph NFT on L2. Si estás utilizando una wallet "convencional" como MetaMask (una Cuenta de Propiedad Externa o EOA, es decir, una wallet que no es un contrato inteligente), esto es opcional y se recomienda para mantener la misma dirección del propietario que en L1. -Si estás utilizando una wallet de tipo smart contract, como una multisig (por ejemplo, una Safe), entonces elegir una dirección de wallet L2 diferente es obligatorio, ya que es muy probable que esta cuenta solo exista en mainnet y no podrás realizar transacciones en Arbitrum utilizando esta wallet. Si deseas seguir utilizando una wallet de tipo smart contract o multisig, crea una nueva wallet en Arbitrum y utiliza su dirección como propietario L2 de tu subgrafo. +If you're using a smart contract wallet, like a multisig (e.g. a Safe), then choosing a different L2 wallet address is mandatory, as it is most likely that this account only exists on mainnet and you will not be able to make transactions on Arbitrum using this wallet. If you want to keep using a smart contract wallet or multisig, create a new wallet on Arbitrum and use its address as the L2 owner of your Subgraph. -**Es muy importante utilizar una dirección de wallet que controles y que pueda realizar transacciones en Arbitrum. De lo contrario, el subgrafo se perderá y no podrá ser recuperado.** +**It is very important to use a wallet address that you control, and that can make transactions on Arbitrum. Otherwise, the Subgraph will be lost and cannot be recovered.** ## Preparándose para la transferencia: bridgeando algo de ETH -Transferir el subgrafo implica enviar una transacción a través del puente y luego ejecutar otra transacción en Arbitrum. La primera transacción utiliza ETH en la red principal e incluye cierta cantidad de ETH para pagar el gas cuando se recibe el mensaje en L2. Sin embargo, si este gas es insuficiente, deberás volver a intentar la transacción y pagar el gas directamente en L2 (esto es "Paso 3: Confirmando la transferencia" que se describe a continuación). Este paso **debe ejecutarse dentro de los 7 días desde el inicio de la transferencia**. Además, la segunda transacción ("Paso 4: Finalizando la transferencia en L2") se realizará directamente en Arbitrum. Por estas razones, necesitarás tener algo de ETH en una billetera de Arbitrum. Si estás utilizando una cuenta de firma múltiple o un contrato inteligente, el ETH debe estar en la billetera regular (EOA) que estás utilizando para ejecutar las transacciones, no en la billetera de firma múltiple en sí misma. +Transferring the Subgraph involves sending a transaction through the bridge, and then executing another transaction on Arbitrum. The first transaction uses ETH on mainnet, and includes some ETH to pay for gas when the message is received on L2. However, if this gas is insufficient, you will have to retry the transaction and pay for the gas directly on L2 (this is "Step 3: Confirming the transfer" below). This step **must be executed within 7 days of starting the transfer**. Moreover, the second transaction ("Step 4: Finishing the transfer on L2") will be done directly on Arbitrum. For these reasons, you will need some ETH on an Arbitrum wallet. If you're using a multisig or smart contract account, the ETH will need to be in the regular (EOA) wallet that you are using to execute the transactions, not on the multisig wallet itself. Puedes comprar ETH en algunos exchanges y retirarlo directamente a Arbitrum, o puedes utilizar el puente de Arbitrum para enviar ETH desde una billetera en la red principal a L2: [bridge.arbitrum.io](http://bridge.arbitrum.io). Dado que las tarifas de gas en Arbitrum son más bajas, solo necesitarás una pequeña cantidad. Se recomienda que comiences con un umbral bajo (por ejemplo, 0.01 ETH) para que tu transacción sea aprobada. -## Encontrando la herramienta de transferencia del subgrafo +## Finding the Subgraph Transfer Tool -Puedes encontrar la herramienta de transferencia a L2 cuando estás viendo la página de tu subgrafo en Subgraph Studio: +You can find the L2 Transfer Tool when you're looking at your Subgraph's page on Subgraph Studio: ![transfer tool](/img/L2-transfer-tool1.png) -También está disponible en Explorer si estás conectado con la wallet que es propietaria de un subgrafo y en la página de ese subgrafo en Explorer: +It is also available on Explorer if you're connected with the wallet that owns a Subgraph and on that Subgraph's page on Explorer: ![Transferring to L2](/img/transferToL2.png) @@ -60,19 +60,19 @@ Al hacer clic en el botón "Transferir a L2" se abrirá la herramienta de transf ## Paso 1: Iniciar la transferencia -Antes de iniciar la transferencia, debes decidir qué dirección será la propietaria del subgrafo en L2 (ver "Elección de tu wallet en L2" anteriormente), y se recomienda encarecidamente tener ETH para gas ya transferido a Arbitrum (ver "Preparando para la transferencia: transferir ETH" anteriormente). +Before starting the transfer, you must decide which address will own the Subgraph on L2 (see "Choosing your L2 wallet" above), and it is strongly recommend having some ETH for gas already bridged on Arbitrum (see "Preparing for the transfer: bridging some ETH" above). -También ten en cuenta que la transferencia del subgrafo requiere tener una cantidad distinta de cero de señal en el subgrafo con la misma cuenta que es propietaria del subgrafo; si no has emitido señal en el subgrafo, deberás agregar un poco de curación (añadir una pequeña cantidad como 1 GRT sería suficiente). +Also please note transferring the Subgraph requires having a nonzero amount of signal on the Subgraph with the same account that owns the Subgraph; if you haven't signaled on the Subgraph you will have to add a bit of curation (adding a small amount like 1 GRT would suffice). -Después de abrir la herramienta de transferencia, podrás ingresar la dirección de la wallet L2 en el campo "Dirección de la wallet receptora" - asegúrate de ingresar la dirección correcta aquí. Al hacer clic en "Transferir Subgrafo", se te pedirá que ejecutes la transacción en tu wallet (ten en cuenta que se incluye un valor de ETH para pagar el gas de L2); esto iniciará la transferencia y deprecará tu subgrafo de L1 (consulta "Comprensión de lo que sucede con la señal, tu subgrafo de L1 y las URL de consulta" anteriormente para obtener más detalles sobre lo que ocurre detrás de escena). +After opening the Transfer Tool, you will be able to input the L2 wallet address into the "Receiving wallet address" field - **make sure you've entered the correct address here**. Clicking on Transfer Subgraph will prompt you to execute the transaction on your wallet (note some ETH value is included to pay for L2 gas); this will initiate the transfer and deprecate your L1 Subgraph (see "Understanding what happens with signal, your L1 Subgraph and query URLs" above for more details on what goes on behind the scenes). -Si ejecutas este paso, **asegúrate de completar el paso 3 en menos de 7 días, o el subgrafo y tu GRT de señal se perderán**. Esto se debe a cómo funciona la mensajería de L1 a L2 en Arbitrum: los mensajes que se envían a través del puente son "tickets reintentables" que deben ejecutarse dentro de los 7 días, y la ejecución inicial puede requerir un reintento si hay picos en el precio del gas en Arbitrum. +If you execute this step, **make sure you proceed until completing step 3 in less than 7 days, or the Subgraph and your signal GRT will be lost.** This is due to how L1-L2 messaging works on Arbitrum: messages that are sent through the bridge are "retry-able tickets" that must be executed within 7 days, and the initial execution might need a retry if there are spikes in the gas price on Arbitrum. ![Start the transfer to L2](/img/startTransferL2.png) -## Paso 2: Esperarando a que el subgrafo llegue a L2 +## Step 2: Waiting for the Subgraph to get to L2 -Después de iniciar la transferencia, el mensaje que envía tu subgrafo de L1 a L2 debe propagarse a través del puente de Arbitrum. Esto tarda aproximadamente 20 minutos (el puente espera a que el bloque de mainnet que contiene la transacción sea "seguro" para evitar posibles reorganizaciones de la cadena). +After you start the transfer, the message that sends your L1 Subgraph to L2 must propagate through the Arbitrum bridge. This takes approximately 20 minutes (the bridge waits for the mainnet block containing the transaction to be "safe" from potential chain reorgs). Una vez que finalice este tiempo de espera, Arbitrum intentará ejecutar automáticamente la transferencia en los contratos de L2. @@ -80,7 +80,7 @@ Una vez que finalice este tiempo de espera, Arbitrum intentará ejecutar automá ## Paso 3: Confirmando la transferencia -En la mayoría de los casos, este paso se ejecutará automáticamente, ya que el gas de L2 incluido en el paso 1 debería ser suficiente para ejecutar la transacción que recibe el subgrafo en los contratos de Arbitrum. Sin embargo, en algunos casos, es posible que un aumento en el precio del gas en Arbitrum cause que esta autoejecución falle. En este caso, el "ticket" que envía tu subgrafo a L2 quedará pendiente y requerirá un reintento dentro de los 7 días. +In most cases, this step will auto-execute as the L2 gas included in step 1 should be sufficient to execute the transaction that receives the Subgraph on the Arbitrum contracts. In some cases, however, it is possible that a spike in gas prices on Arbitrum causes this auto-execution to fail. In this case, the "ticket" that sends your Subgraph to L2 will be pending and require a retry within 7 days. Si este es el caso, deberás conectarte utilizando una wallet de L2 que tenga algo de ETH en Arbitrum, cambiar la red de tu wallet a Arbitrum y hacer clic en "Confirmar Transferencia" para volver a intentar la transacción. @@ -88,33 +88,33 @@ Si este es el caso, deberás conectarte utilizando una wallet de L2 que tenga al ## Paso 4: Finalizando la transferencia en L2 -En este punto, tu subgrafo y GRT se han recibido en Arbitrum, pero el subgrafo aún no se ha publicado. Deberás conectarte utilizando la wallet de L2 que elegiste como la wallet receptora, cambiar la red de tu wallet a Arbitrum y hacer clic en "Publicar Subgrafo". +At this point, your Subgraph and GRT have been received on Arbitrum, but the Subgraph is not published yet. You will need to connect using the L2 wallet that you chose as the receiving wallet, switch your wallet network to Arbitrum, and click "Publish Subgraph." -![Publicar el subgrafo](/img/publishSubgraphL2TransferTools.png) +![Publish the Subgraph](/img/publishSubgraphL2TransferTools.png) -![Espera a que el subgrafo este publicado](/img/waitForSubgraphToPublishL2TransferTools.png) +![Wait for the Subgraph to be published](/img/waitForSubgraphToPublishL2TransferTools.png) -Esto publicará el subgrafo para que los Indexadores que estén operando en Arbitrum puedan comenzar a servirlo. También se emitirá señal de curación utilizando los GRT que se transfirieron desde L1. +This will publish the Subgraph so that Indexers that are operating on Arbitrum can start serving it. It will also mint curation signal using the GRT that were transferred from L1. ## Paso 5: Actualizando la URL de consulta -¡Tu subgrafo se ha transferido correctamente a Arbitrum! Para realizar consultas al subgrafo, la nueva URL será: +Your Subgraph has been successfully transferred to Arbitrum! To query the Subgraph, the new URL will be : `https://arbitrum-gateway.thegraph.com/api/[api-key]/subgraphs/id/[l2-subgraph-id]` -Ten en cuenta que el ID del subgrafo en Arbitrum será diferente al que tenías en mainnet, pero siempre podrás encontrarlo en Explorer o Studio. Como se mencionó anteriormente (ver "Comprensión de lo que sucede con la señal, tu subgrafo de L1 y las URL de consulta"), la antigua URL de L1 será compatible durante un corto período de tiempo, pero debes cambiar tus consultas a la nueva dirección tan pronto como el subgrafo se haya sincronizado en L2. +Note that the Subgraph ID on Arbitrum will be a different than the one you had on mainnet, but you can always find it on Explorer or Studio. As mentioned above (see "Understanding what happens with signal, your L1 Subgraph and query URLs") the old L1 URL will be supported for a short while, but you should switch your queries to the new address as soon as the Subgraph has been synced on L2. ## Cómo transferir tu curación a Arbitrum (L2) -## Comprensión de lo que sucede con la curación al transferir subgrafos a L2 +## Understanding what happens to curation on Subgraph transfers to L2 -Cuando el propietario de un subgrafo transfiere un subgrafo a Arbitrum, toda la señal del subgrafo se convierte en GRT al mismo tiempo. Esto se aplica a la señal "migrada automáticamente", es decir, la señal que no está vinculada a una versión o deploy específico del subgrafo, sino que sigue la última versión del subgrafo. +When the owner of a Subgraph transfers a Subgraph to Arbitrum, all of the Subgraph's signal is converted to GRT at the same time. This applies to "auto-migrated" signal, i.e. signal that is not specific to a Subgraph version or deployment but that follows the latest version of a Subgraph. -Esta conversión de señal a GRT es similar a lo que sucedería si el propietario del subgrafo deprecara el subgrafo en L1. Cuando el subgrafo se depreca o se transfiere, toda la señal de curación se "quema" simultáneamente (utilizando la bonding curve de curación) y el GRT resultante se mantiene en el contrato inteligente de GNS (que es el contrato que maneja las actualizaciones de subgrafos y la señal auto-migrada). Cada Curador en ese subgrafo, por lo tanto, tiene un reclamo sobre ese GRT proporcional a la cantidad de participaciones que tenían para el subgrafo. +This conversion from signal to GRT is the same as what would happen if the Subgraph owner deprecated the Subgraph in L1. When the Subgraph is deprecated or transferred, all curation signal is "burned" simultaneously (using the curation bonding curve) and the resulting GRT is held by the GNS smart contract (that is the contract that handles Subgraph upgrades and auto-migrated signal). Each Curator on that Subgraph therefore has a claim to that GRT proportional to the amount of shares they had for the Subgraph. -Una fracción de estos GRT correspondientes al propietario del subgrafo se envía a L2 junto con el subgrafo. +A fraction of these GRT corresponding to the Subgraph owner is sent to L2 together with the Subgraph. -En este punto, el GRT curado ya no acumulará más tarifas de consulta, por lo que los Curadores pueden optar por retirar su GRT o transferirlo al mismo subgrafo en L2, donde se puede utilizar para generar nueva señal de curación. No hay prisa para hacerlo, ya que el GRT se puede mantener indefinidamente y todos reciben una cantidad proporcional a sus participaciones, independientemente de cuándo lo hagan. +At this point, the curated GRT will not accrue any more query fees, so Curators can choose to withdraw their GRT or transfer it to the same Subgraph on L2, where it can be used to mint new curation signal. There is no rush to do this as the GRT can be help indefinitely and everybody gets an amount proportional to their shares, irrespective of when they do it. ## Elección de tu wallet en L2 @@ -130,9 +130,9 @@ Si estás utilizando una billetera de contrato inteligente, como una multisig (p Antes de comenzar la transferencia, debes decidir qué dirección será la propietaria de la curación en L2 (ver "Elegir tu wallet en L2" arriba), y se recomienda tener algo de ETH para el gas ya bridgeado en Arbitrum en caso de que necesites volver a intentar la ejecución del mensaje en L2. Puedes comprar ETH en algunos exchanges y retirarlo directamente a Arbitrum, o puedes utilizar el puente de Arbitrum para enviar ETH desde una wallet en la red principal a L2: [bridge.arbitrum.io](http://bridge.arbitrum.io) - dado que las tarifas de gas en Arbitrum son muy bajas, es probable que solo necesites una pequeña cantidad, por ejemplo, 0.01 ETH será más que suficiente. -Si un subgrafo al que has curado ha sido transferido a L2, verás un mensaje en Explorer que te indicará que estás curando hacia un subgrafo transferido. +If a Subgraph that you curate to has been transferred to L2, you will see a message on Explorer telling you that you're curating to a transferred Subgraph. -Cuando estás en la página del subgrafo, puedes elegir retirar o transferir la curación. Al hacer clic en "Transferir Señal a Arbitrum" se abrirá la herramienta de transferencia. +When looking at the Subgraph page, you can choose to withdraw or transfer the curation. Clicking on "Transfer Signal to Arbitrum" will open the transfer tool. ![Transferir señal](/img/transferSignalL2TransferTools.png) @@ -162,4 +162,4 @@ Si este es el caso, deberás conectarte utilizando una wallet de L2 que tenga al ## Retirando tu curacion en L1 -Si prefieres no enviar tu GRT a L2, o prefieres bridgear GRT de forma manual, puedes retirar tu GRT curado en L1. En el banner en la página del subgrafo, elige "Retirar Señal" y confirma la transacción; el GRT se enviará a tu dirección de Curador. +If you prefer not to send your GRT to L2, or you'd rather bridge the GRT manually, you can withdraw your curated GRT on L1. On the banner on the Subgraph page, choose "Withdraw Signal" and confirm the transaction; the GRT will be sent to your Curator address. From 1b548907cac991a56286127f4c77b2ece859a182 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:13:46 -0500 Subject: [PATCH 0319/1789] New translations l2-transfer-tools-guide.mdx (Arabic) --- .../arbitrum/l2-transfer-tools-guide.mdx | 76 +++++++++---------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/website/src/pages/ar/archived/arbitrum/l2-transfer-tools-guide.mdx b/website/src/pages/ar/archived/arbitrum/l2-transfer-tools-guide.mdx index af5a133538d6..5863ff2de0a2 100644 --- a/website/src/pages/ar/archived/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/src/pages/ar/archived/arbitrum/l2-transfer-tools-guide.mdx @@ -6,53 +6,53 @@ title: L2 Transfer Tools Guide Some frequent questions about these tools are answered in the [L2 Transfer Tools FAQ](/archived/arbitrum/l2-transfer-tools-faq/). The FAQs contain in-depth explanations of how to use the tools, how they work, and things to keep in mind when using them. -## كيف تنقل الغراف الفرعي الخاص بك إلى شبكة آربترم (الطبقة الثانية) +## How to transfer your Subgraph to Arbitrum (L2) -## فوائد نقل الغراف الفرعي الخاصة بك +## Benefits of transferring your Subgraphs مجتمع الغراف والمطورون الأساسيون كانوا [يستعدون] (https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) للإنتقال إلى آربترم على مدى العام الماضي. وتعتبر آربترم سلسلة كتل من الطبقة الثانية أو "L2"، حيث ترث الأمان من سلسلة الإيثيريوم ولكنها توفر رسوم غازٍ أقل بشكلٍ كبير. -عندما تقوم بنشر أو ترقية الغرافات الفرعية الخاصة بك إلى شبكة الغراف، فأنت تتفاعل مع عقودٍ ذكيةٍ في البروتوكول وهذا يتطلب دفع رسوم الغاز باستخدام عملة الايثيريوم. من خلال نقل غرافاتك الفرعية إلى آربترم، فإن أي ترقيات مستقبلية لغرافك الفرعي ستتطلب رسوم غازٍ أقل بكثير. الرسوم الأقل، وكذلك حقيقة أن منحنيات الترابط التنسيقي على الطبقة الثانية مستقيمة، تجعل من الأسهل على المنسِّقين الآخرين تنسيق غرافك الفرعي، ممّا يزيد من مكافآت المفهرِسين على غرافك الفرعي. هذه البيئة ذات التكلفة-الأقل كذلك تجعل من الأرخص على المفهرسين أن يقوموا بفهرسة وخدمة غرافك الفرعي. سوف تزداد مكافآت الفهرسة على آربترم وتتناقص على شبكة إيثيريوم الرئيسية على مدى الأشهر المقبلة، لذلك سيقوم المزيد والمزيد من المُفَهرِسين بنقل ودائعهم المربوطة وتثبيت عملياتهم على الطبقة الثانية. +When you publish or upgrade your Subgraph to The Graph Network, you're interacting with smart contracts on the protocol and this requires paying for gas using ETH. By moving your Subgraphs to Arbitrum, any future updates to your Subgraph will require much lower gas fees. The lower fees, and the fact that curation bonding curves on L2 are flat, also make it easier for other Curators to curate on your Subgraph, increasing the rewards for Indexers on your Subgraph. This lower-cost environment also makes it cheaper for Indexers to index and serve your Subgraph. Indexing rewards will be increasing on Arbitrum and decreasing on Ethereum mainnet over the coming months, so more and more Indexers will be transferring their stake and setting up their operations on L2. -## فهم ما يحدث مع الإشارة وغرافك الفرعي على الطبقة الأولى وعناوين مواقع الإستعلام +## Understanding what happens with signal, your L1 Subgraph and query URLs -عند نقل سبجراف إلى Arbitrum، يتم استخدام جسر Arbitrum GRT، الذي بدوره يستخدم جسر Arbitrum الأصلي، لإرسال السبجراف إلى L2. سيؤدي عملية "النقل" إلى إهمال السبجراف على شبكة الإيثيريوم الرئيسية وإرسال المعلومات لإعادة إنشاء السبجراف على L2 باستخدام الجسر. ستتضمن أيضًا رصيد GRT المرهون المرتبط بمالك السبجراف، والذي يجب أن يكون أكبر من الصفر حتى يقبل الجسر النقل. +Transferring a Subgraph to Arbitrum uses the Arbitrum GRT bridge, which in turn uses the native Arbitrum bridge, to send the Subgraph to L2. The "transfer" will deprecate the Subgraph on mainnet and send the information to re-create the Subgraph on L2 using the bridge. It will also include the Subgraph owner's signaled GRT, which must be more than zero for the bridge to accept the transfer. -عندما تختار نقل الرسم البياني الفرعي ، سيؤدي ذلك إلى تحويل جميع إشارات التنسيق الخاصة بالرسم الفرعي إلى GRT. هذا يعادل "إهمال" الرسم البياني الفرعي على الشبكة الرئيسية. سيتم إرسال GRT المستخدمة لعملية التنسيق الخاصة بك إلى L2 جمباً إلى جمب مع الرسم البياني الفرعي ، حيث سيتم استخدامها لإنتاج الإشارة نيابة عنك. +When you choose to transfer the Subgraph, this will convert all of the Subgraph's curation signal to GRT. This is equivalent to "deprecating" the Subgraph on mainnet. The GRT corresponding to your curation will be sent to L2 together with the Subgraph, where they will be used to mint signal on your behalf. -يمكن للمنسقين الآخرين اختيار ما إذا كانوا سيسحبون جزء من GRT الخاص بهم ، أو نقله أيضًا إلى L2 لصك إشارة على نفس الرسم البياني الفرعي. إذا لم يقم مالك الرسم البياني الفرعي بنقل الرسم البياني الفرعي الخاص به إلى L2 وقام بإيقافه يدويًا عبر استدعاء العقد ، فسيتم إخطار المنسقين وسيتمكنون من سحب تنسيقهم. +Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same Subgraph. If a Subgraph owner does not transfer their Subgraph to L2 and manually deprecates it via a contract call, then Curators will be notified and will be able to withdraw their curation. -بمجرد نقل الرسم البياني الفرعي ، لن يتلقى المفهرسون بعد الآن مكافآت لفهرسة الرسم البياني الفرعي، نظرًا لأنه يتم تحويل كل التنسيق لـ GRT. ومع ذلك ، سيكون هناك مفهرسون 1) سيستمرون في خدمة الرسوم البيانية الفرعية المنقولة لمدة 24 ساعة ، و 2) سيبدأون فورًا في فهرسة الرسم البياني الفرعي على L2. ونظرًا لأن هؤلاء المفهرسون لديهم بالفعل رسم بياني فرعي مفهرس ، فلا داعي لانتظار مزامنة الرسم البياني الفرعي ، وسيكون من الممكن الاستعلام عن الرسم البياني الفرعي على L2 مباشرة تقريبًا. +As soon as the Subgraph is transferred, since all curation is converted to GRT, Indexers will no longer receive rewards for indexing the Subgraph. However, there will be Indexers that will 1) keep serving transferred Subgraphs for 24 hours, and 2) immediately start indexing the Subgraph on L2. Since these Indexers already have the Subgraph indexed, there should be no need to wait for the Subgraph to sync, and it will be possible to query the L2 Subgraph almost immediately. -يجب إجراء الاستعلامات على الرسم البياني الفرعي في L2 على عنوان URL مختلف (على \`` Arbitrum-gateway.thegraph.com`) ، لكن عنوان URL L1 سيستمر في العمل لمدة 48 ساعة على الأقل. بعد ذلك ، ستقوم بوابة L1 بإعادة توجيه الاستعلامات إلى بوابة L2 (لبعض الوقت) ، ولكن هذا سيضيف زمن تأخير لذلك يوصى تغيير جميع استعلاماتك إلى عنوان URL الجديد في أقرب وقت ممكن. +Queries to the L2 Subgraph will need to be done to a different URL (on `arbitrum-gateway.thegraph.com`), but the L1 URL will continue working for at least 48 hours. After that, the L1 gateway will forward queries to the L2 gateway (for some time), but this will add latency so it is recommended to switch all your queries to the new URL as soon as possible. ## اختيار محفظة L2 الخاصة بك -عندما قمت بنشر subgraph الخاص بك على الشبكة الرئيسية ، فقد استخدمت محفظة متصلة لإنشاء subgraph ، وتمتلك هذه المحفظة NFT الذي يمثل هذا subgraph ويسمح لك بنشر التحديثات. +When you published your Subgraph on mainnet, you used a connected wallet to create the Subgraph, and this wallet owns the NFT that represents this Subgraph and allows you to publish updates. -عند نقل الرسم البياني الفرعي إلى Arbitrum ، يمكنك اختيار محفظة مختلفة والتي ستمتلك هذا الـ subgraph NFT على L2. +When transferring the Subgraph to Arbitrum, you can choose a different wallet that will own this Subgraph NFT on L2. إذا كنت تستخدم محفظة "عادية" مثل MetaMask (حساب مملوك خارجيًا EOA ، محفظة ليست بعقد ذكي) ، فهذا اختياري ويوصى بالاحتفاظ بعنوان المالك نفسه كما في L1. -إذا كنت تستخدم محفظة بعقد ذكي ، مثل multisig (على سبيل المثال Safe) ، فإن اختيار عنوان مختلف لمحفظة L2 أمر إلزامي ، حيث من المرجح أن هذا الحساب موجود فقط على mainnet ولن تكون قادرًا على إجراء المعاملات على Arbitrum باستخدام هذه المحفظة. إذا كنت ترغب في الاستمرار في استخدام محفظة عقد ذكية أو multisig ، فقم بإنشاء محفظة جديدة على Arbitrum واستخدم عنوانها كمالك للرسم البياني الفرعي الخاص بك على L2. +If you're using a smart contract wallet, like a multisig (e.g. a Safe), then choosing a different L2 wallet address is mandatory, as it is most likely that this account only exists on mainnet and you will not be able to make transactions on Arbitrum using this wallet. If you want to keep using a smart contract wallet or multisig, create a new wallet on Arbitrum and use its address as the L2 owner of your Subgraph. -** من المهم جدًا استخدام عنوان محفظة تتحكم فيه ، ويمكنه إجراء معاملات على Arbitrum. وإلا فسيتم فقد الرسم البياني الفرعي ولا يمكن استعادته. ** +**It is very important to use a wallet address that you control, and that can make transactions on Arbitrum. Otherwise, the Subgraph will be lost and cannot be recovered.** ## التحضير لعملية النقل: إنشاء جسر لـبعض ETH -يتضمن نقل الغراف الفرعي إرسال معاملة عبر الجسر ، ثم تنفيذ معاملة أخرى على شبكة أربترم. تستخدم المعاملة الأولى الإيثيريوم على الشبكة الرئيسية ، وتتضمن بعضًا من إيثيريوم لدفع ثمن الغاز عند استلام الرسالة على الطبقة الثانية. ومع ذلك ، إذا كان هذا الغاز غير كافٍ ، فسيتعين عليك إعادة إجراء المعاملة ودفع ثمن الغاز مباشرةً على الطبقة الثانية (هذه هي "الخطوة 3: تأكيد التحويل" أدناه). يجب تنفيذ هذه الخطوة ** في غضون 7 أيام من بدء التحويل **. علاوة على ذلك ، سيتم إجراء المعاملة الثانية مباشرة على شبكة أربترم ("الخطوة 4: إنهاء التحويل على الطبقة الثانية"). لهذه الأسباب ، ستحتاج بعضًا من إيثيريوم في محفظة أربترم. إذا كنت تستخدم متعدد التواقيع أو عقداً ذكياً ، فيجب أن يكون هناك بعضًا من إيثيريوم في المحفظة العادية (حساب مملوك خارجيا) التي تستخدمها لتنفيذ المعاملات ، وليس على محفظة متعددة التواقيع. +Transferring the Subgraph involves sending a transaction through the bridge, and then executing another transaction on Arbitrum. The first transaction uses ETH on mainnet, and includes some ETH to pay for gas when the message is received on L2. However, if this gas is insufficient, you will have to retry the transaction and pay for the gas directly on L2 (this is "Step 3: Confirming the transfer" below). This step **must be executed within 7 days of starting the transfer**. Moreover, the second transaction ("Step 4: Finishing the transfer on L2") will be done directly on Arbitrum. For these reasons, you will need some ETH on an Arbitrum wallet. If you're using a multisig or smart contract account, the ETH will need to be in the regular (EOA) wallet that you are using to execute the transactions, not on the multisig wallet itself. يمكنك شراء إيثيريوم من بعض المنصات وسحبها مباشرة إلى أربترم، أو يمكنك استخدام جسر أربترم لإرسال إيثيريوم من محفظة الشبكة الرئيسيةإلى الطبقة الثانية: [bridge.arbitrum.io] (http://bridge.arbitrum.io). نظرًا لأن رسوم الغاز على أربترم أقل ، فستحتاج فقط إلى مبلغ صغير. من المستحسن أن تبدأ بمبلغ منخفض (0 على سبيل المثال ، 01 ETH) للموافقة على معاملتك. -## العثور على أداة نقل الغراف الفرعي +## Finding the Subgraph Transfer Tool -يمكنك العثور على أداة نقل L2 في صفحة الرسم البياني الفرعي الخاص بك على Subgraph Studio: +You can find the L2 Transfer Tool when you're looking at your Subgraph's page on Subgraph Studio: ![أداة النقل](/img/L2-transfer-tool1.png) -إذا كنت متصلاً بالمحفظة التي تمتلك الغراف الفرعي، فيمكنك الوصول إليها عبر المستكشف، وذلك عن طريق الانتقال إلى صفحة الغراف الفرعي على المستكشف: +It is also available on Explorer if you're connected with the wallet that owns a Subgraph and on that Subgraph's page on Explorer: ![Transferring to L2](/img/transferToL2.png) @@ -60,19 +60,19 @@ Some frequent questions about these tools are answered in the [L2 Transfer Tools ## الخطوة 1: بدء عملية النقل -قبل بدء عملية النقل، يجب أن تقرر أي عنوان سيكون مالكًا للغراف الفرعي على الطبقة الثانية (انظر "اختيار محفظة الطبقة الثانية" أعلاه)، ويُوصَى بشدة بأن يكون لديك بعضًا من الإيثيريوم لرسوم الغاز على أربترم. يمكنك الاطلاع على (التحضير لعملية النقل: تحويل بعضًا من إيثيريوم عبر الجسر." أعلاه). +Before starting the transfer, you must decide which address will own the Subgraph on L2 (see "Choosing your L2 wallet" above), and it is strongly recommend having some ETH for gas already bridged on Arbitrum (see "Preparing for the transfer: bridging some ETH" above). -يرجى أيضًا ملاحظة أن نقل الرسم البياني الفرعي يتطلب وجود كمية غير صفرية من إشارة التنسيق عليه بنفس الحساب الذي يمتلك الرسم البياني الفرعي ؛ إذا لم تكن قد أشرت إلى الرسم البياني الفرعي ، فسيتعين عليك إضافة القليل من إشارة التنسيق (يكفي إضافة مبلغ صغير مثل 1 GRT). +Also please note transferring the Subgraph requires having a nonzero amount of signal on the Subgraph with the same account that owns the Subgraph; if you haven't signaled on the Subgraph you will have to add a bit of curation (adding a small amount like 1 GRT would suffice). -بعد فتح أداة النقل، ستتمكن من إدخال عنوان المحفظة في الطبقة الثانية في حقل "عنوان محفظة الاستلام". تأكد من إدخال العنوان الصحيح هنا. بعد ذلك، انقر على "نقل الغراف الفرعي"، وسيتم طلب تنفيذ العملية في محفظتك. (يُرجى ملاحظة أنه يتم تضمين بعضًا من الإثيريوم لدفع رسوم الغاز في الطبقة الثانية). بعد تنفيذ العملية، سيتم بدء عملية النقل وإهمال الغراف الفرعي في الطبقة الأولى. (يمكنك الاطلاع على "فهم ما يحدث مع الإشارة والغراف الفرعي في الطبقة الأولى وعناوين الاستعلام" أعلاه لمزيد من التفاصيل حول ما يحدث خلف الكواليس). +After opening the Transfer Tool, you will be able to input the L2 wallet address into the "Receiving wallet address" field - **make sure you've entered the correct address here**. Clicking on Transfer Subgraph will prompt you to execute the transaction on your wallet (note some ETH value is included to pay for L2 gas); this will initiate the transfer and deprecate your L1 Subgraph (see "Understanding what happens with signal, your L1 Subgraph and query URLs" above for more details on what goes on behind the scenes). -إذا قمت بتنفيذ هذه الخطوة، \*\*يجب عليك التأكد من أنك ستستكمل الخطوة 3 في غضون 7 أيام، وإلا فإنك ستفقد الغراف الفرعي والإشارة GRT الخاصة بك. يرجع ذلك إلى آلية التواصل بين الطبقة الأولى والطبقة الثانية في أربترم: الرسائل التي ترسل عبر الجسر هي "تذاكر قابلة لإعادة المحاولة" يجب تنفيذها في غضون 7 أيام، وقد يتطلب التنفيذ الأولي إعادة المحاولة إذا كان هناك زيادة في سعر الغاز على أربترم. +If you execute this step, **make sure you proceed until completing step 3 in less than 7 days, or the Subgraph and your signal GRT will be lost.** This is due to how L1-L2 messaging works on Arbitrum: messages that are sent through the bridge are "retry-able tickets" that must be executed within 7 days, and the initial execution might need a retry if there are spikes in the gas price on Arbitrum. ![Start the transfer to L2](/img/startTransferL2.png) -## الخطوة 2: الانتظار حتى يتم نقل الغراف الفرعي إلى الطبقة الثانية +## Step 2: Waiting for the Subgraph to get to L2 -بعد بدء عملية النقل، يتعين على الرسالة التي ترسل الـ subgraph من L1 إلى L2 أن يتم نشرها عبر جسر Arbitrum. يستغرق ذلك حوالي 20 دقيقة (ينتظر الجسر لكتلة الشبكة الرئيسية التي تحتوي على المعاملة حتى يتأكد أنها "آمنة" من إمكانية إعادة ترتيب السلسلة). +After you start the transfer, the message that sends your L1 Subgraph to L2 must propagate through the Arbitrum bridge. This takes approximately 20 minutes (the bridge waits for the mainnet block containing the transaction to be "safe" from potential chain reorgs). بمجرد انتهاء وقت الانتظار ، ستحاول Arbitrum تنفيذ النقل تلقائيًا على عقود L2. @@ -80,7 +80,7 @@ Some frequent questions about these tools are answered in the [L2 Transfer Tools ## الخطوة الثالثة: تأكيد التحويل -في معظم الحالات ، سيتم تنفيذ هذه الخطوة تلقائيًا لأن غاز الطبقة الثانية المضمن في الخطوة 1 يجب أن يكون كافيًا لتنفيذ المعاملة التي تتلقى الغراف الفرعي في عقود أربترم. ومع ذلك ، في بعض الحالات ، من الممكن أن يؤدي ارتفاع أسعار الغاز على أربترم إلى فشل هذا التنفيذ التلقائي. وفي هذه الحالة ، ستكون "التذكرة" التي ترسل غرافك الفرعي إلى الطبقة الثانية معلقة وتتطلب إعادة المحاولة في غضون 7 أيام. +In most cases, this step will auto-execute as the L2 gas included in step 1 should be sufficient to execute the transaction that receives the Subgraph on the Arbitrum contracts. In some cases, however, it is possible that a spike in gas prices on Arbitrum causes this auto-execution to fail. In this case, the "ticket" that sends your Subgraph to L2 will be pending and require a retry within 7 days. في هذا الحالة ، فستحتاج إلى الاتصال باستخدام محفظة الطبقة الثانية والتي تحتوي بعضاً من إيثيريوم على أربترم، قم بتغيير شبكة محفظتك إلى أربترم، والنقر فوق "تأكيد النقل" لإعادة محاولة المعاملة. @@ -88,33 +88,33 @@ Some frequent questions about these tools are answered in the [L2 Transfer Tools ## الخطوة 4: إنهاء عملية النقل على L2 -في هذه المرحلة، تم استلام الغراف الفرعي والـ GRT الخاص بك على أربترم، ولكن الغراف الفرعي لم يتم نشره بعد. ستحتاج إلى الربط باستخدام محفظة الطبقة الثانية التي اخترتها كمحفظة استلام، وتغيير شبكة محفظتك إلى أربترم، ثم النقر على "نشر الغراف الفرعي" +At this point, your Subgraph and GRT have been received on Arbitrum, but the Subgraph is not published yet. You will need to connect using the L2 wallet that you chose as the receiving wallet, switch your wallet network to Arbitrum, and click "Publish Subgraph." -![نشر الغراف الفرعي](/img/publishSubgraphL2TransferTools.png) +![Publish the Subgraph](/img/publishSubgraphL2TransferTools.png) -![انتظر حتى يتم نشر الغراف الفرعي](/img/waitForSubgraphToPublishL2TransferTools.png) +![Wait for the Subgraph to be published](/img/waitForSubgraphToPublishL2TransferTools.png) -سيؤدي هذا إلى نشر الغراف الفرعي حتى يتمكن المفهرسون الذين يعملون في أربترم بالبدء في تقديم الخدمة. كما أنه سيعمل أيضًا على إصدار إشارة التنسيق باستخدام GRT التي تم نقلها من الطبقة الأولى. +This will publish the Subgraph so that Indexers that are operating on Arbitrum can start serving it. It will also mint curation signal using the GRT that were transferred from L1. ## Step 5: Updating the query URL -تم نقل غرافك الفرعي بنجاح إلى أربترم! للاستعلام عن الغراف الفرعي ، سيكون عنوان URL الجديد هو: +Your Subgraph has been successfully transferred to Arbitrum! To query the Subgraph, the new URL will be : `https://arbitrum-gateway.thegraph.com/api/[api-key]/subgraphs/id/[l2-subgraph-id]` -لاحظ أن ID الغراف الفرعي على أربترم سيكون مختلفًا عن الذي لديك في الشبكة الرئيسية، ولكن يمكنك العثور عليه في المستكشف أو استوديو. كما هو مذكور أعلاه (راجع "فهم ما يحدث للإشارة والغراف الفرعي في الطبقة الأولى وعناوين الاستعلام") سيتم دعم عنوان URL الطبقة الأولى القديم لفترة قصيرة ، ولكن يجب عليك تبديل استعلاماتك إلى العنوان الجديد بمجرد مزامنة الغراف الفرعي على الطبقة الثانية. +Note that the Subgraph ID on Arbitrum will be a different than the one you had on mainnet, but you can always find it on Explorer or Studio. As mentioned above (see "Understanding what happens with signal, your L1 Subgraph and query URLs") the old L1 URL will be supported for a short while, but you should switch your queries to the new address as soon as the Subgraph has been synced on L2. ## كيفية نقل التنسيق الخاص بك إلى أربترم (الطبقة الثانية) -## Understanding what happens to curation on subgraph transfers to L2 +## Understanding what happens to curation on Subgraph transfers to L2 -When the owner of a subgraph transfers a subgraph to Arbitrum, all of the subgraph's signal is converted to GRT at the same time. This applies to "auto-migrated" signal, i.e. signal that is not specific to a subgraph version or deployment but that follows the latest version of a subgraph. +When the owner of a Subgraph transfers a Subgraph to Arbitrum, all of the Subgraph's signal is converted to GRT at the same time. This applies to "auto-migrated" signal, i.e. signal that is not specific to a Subgraph version or deployment but that follows the latest version of a Subgraph. -This conversion from signal to GRT is the same as what would happen if the subgraph owner deprecated the subgraph in L1. When the subgraph is deprecated or transferred, all curation signal is "burned" simultaneously (using the curation bonding curve) and the resulting GRT is held by the GNS smart contract (that is the contract that handles subgraph upgrades and auto-migrated signal). Each Curator on that subgraph therefore has a claim to that GRT proportional to the amount of shares they had for the subgraph. +This conversion from signal to GRT is the same as what would happen if the Subgraph owner deprecated the Subgraph in L1. When the Subgraph is deprecated or transferred, all curation signal is "burned" simultaneously (using the curation bonding curve) and the resulting GRT is held by the GNS smart contract (that is the contract that handles Subgraph upgrades and auto-migrated signal). Each Curator on that Subgraph therefore has a claim to that GRT proportional to the amount of shares they had for the Subgraph. -A fraction of these GRT corresponding to the subgraph owner is sent to L2 together with the subgraph. +A fraction of these GRT corresponding to the Subgraph owner is sent to L2 together with the Subgraph. -At this point, the curated GRT will not accrue any more query fees, so Curators can choose to withdraw their GRT or transfer it to the same subgraph on L2, where it can be used to mint new curation signal. There is no rush to do this as the GRT can be help indefinitely and everybody gets an amount proportional to their shares, irrespective of when they do it. +At this point, the curated GRT will not accrue any more query fees, so Curators can choose to withdraw their GRT or transfer it to the same Subgraph on L2, where it can be used to mint new curation signal. There is no rush to do this as the GRT can be help indefinitely and everybody gets an amount proportional to their shares, irrespective of when they do it. ## اختيار محفظة L2 الخاصة بك @@ -130,9 +130,9 @@ If you're using a smart contract wallet, like a multisig (e.g. a Safe), then cho Before starting the transfer, you must decide which address will own the curation on L2 (see "Choosing your L2 wallet" above), and it is recommended having some ETH for gas already bridged on Arbitrum in case you need to retry the execution of the message on L2. You can buy ETH on some exchanges and withdraw it directly to Arbitrum, or you can use the Arbitrum bridge to send ETH from a mainnet wallet to L2: [bridge.arbitrum.io](http://bridge.arbitrum.io) - since gas fees on Arbitrum are so low, you should only need a small amount, e.g. 0.01 ETH will probably be more than enough. -If a subgraph that you curate to has been transferred to L2, you will see a message on Explorer telling you that you're curating to a transferred subgraph. +If a Subgraph that you curate to has been transferred to L2, you will see a message on Explorer telling you that you're curating to a transferred Subgraph. -When looking at the subgraph page, you can choose to withdraw or transfer the curation. Clicking on "Transfer Signal to Arbitrum" will open the transfer tool. +When looking at the Subgraph page, you can choose to withdraw or transfer the curation. Clicking on "Transfer Signal to Arbitrum" will open the transfer tool. ![Transfer signal](/img/transferSignalL2TransferTools.png) @@ -162,4 +162,4 @@ In most cases, this step will auto-execute as the L2 gas included in step 1 shou ## Withdrawing your curation on L1 -If you prefer not to send your GRT to L2, or you'd rather bridge the GRT manually, you can withdraw your curated GRT on L1. On the banner on the subgraph page, choose "Withdraw Signal" and confirm the transaction; the GRT will be sent to your Curator address. +If you prefer not to send your GRT to L2, or you'd rather bridge the GRT manually, you can withdraw your curated GRT on L1. On the banner on the Subgraph page, choose "Withdraw Signal" and confirm the transaction; the GRT will be sent to your Curator address. From 39485897e79823865d4fc36235291ea1de9c5957 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:13:47 -0500 Subject: [PATCH 0320/1789] New translations l2-transfer-tools-guide.mdx (Czech) --- .../arbitrum/l2-transfer-tools-guide.mdx | 76 +++++++++---------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/website/src/pages/cs/archived/arbitrum/l2-transfer-tools-guide.mdx b/website/src/pages/cs/archived/arbitrum/l2-transfer-tools-guide.mdx index 69717e46ed39..94b78981db6b 100644 --- a/website/src/pages/cs/archived/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/src/pages/cs/archived/arbitrum/l2-transfer-tools-guide.mdx @@ -6,53 +6,53 @@ Graph usnadnil přechod na úroveň L2 v Arbitrum One. Pro každého účastník Some frequent questions about these tools are answered in the [L2 Transfer Tools FAQ](/archived/arbitrum/l2-transfer-tools-faq/). The FAQs contain in-depth explanations of how to use the tools, how they work, and things to keep in mind when using them. -## Jak přenést podgraf do Arbitrum (L2) +## How to transfer your Subgraph to Arbitrum (L2) -## Výhody přenosu podgrafů +## Benefits of transferring your Subgraphs Komunita a hlavní vývojáři Graphu se v uplynulém roce [připravovali](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) na přechod na Arbitrum. Arbitrum, blockchain druhé vrstvy neboli "L2", zdědil bezpečnost po Ethereum, ale poskytuje výrazně nižší poplatky za plyn. -Když publikujete nebo aktualizujete svůj subgraf v síti The Graph Network, komunikujete s chytrými smlouvami na protokolu, což vyžaduje platbu za plyn pomocí ETH. Přesunutím subgrafů do Arbitrum budou veškeré budoucí aktualizace subgrafů vyžadovat mnohem nižší poplatky za plyn. Nižší poplatky a skutečnost, že křivky vazby kurátorů na L2 jsou ploché, také usnadňují ostatním kurátorům kurátorství na vašem podgrafu, což zvyšuje odměny pro indexátory na vašem podgrafu. Toto prostředí s nižšími náklady také zlevňuje indexování a obsluhu subgrafu pro indexátory. Odměny za indexování se budou v následujících měsících na Arbitrum zvyšovat a na mainnetu Ethereum snižovat, takže stále více indexerů bude převádět své podíly a zakládat své operace na L2. +When you publish or upgrade your Subgraph to The Graph Network, you're interacting with smart contracts on the protocol and this requires paying for gas using ETH. By moving your Subgraphs to Arbitrum, any future updates to your Subgraph will require much lower gas fees. The lower fees, and the fact that curation bonding curves on L2 are flat, also make it easier for other Curators to curate on your Subgraph, increasing the rewards for Indexers on your Subgraph. This lower-cost environment also makes it cheaper for Indexers to index and serve your Subgraph. Indexing rewards will be increasing on Arbitrum and decreasing on Ethereum mainnet over the coming months, so more and more Indexers will be transferring their stake and setting up their operations on L2. -## Porozumění tomu, co se děje se signálem, podgrafem L1 a adresami URL dotazů +## Understanding what happens with signal, your L1 Subgraph and query URLs -Při přenosu podgrafu do Arbitrum se používá můstek Arbitrum GRT, který zase používá nativní můstek Arbitrum k odeslání podgrafu do L2. Při "přenosu" se subgraf v mainnetu znehodnotí a odešlou se informace pro opětovné vytvoření subgrafu v L2 pomocí mostu. Zahrnuje také GRT vlastníka podgrafu, který již byl signalizován a který musí být větší než nula, aby most převod přijal. +Transferring a Subgraph to Arbitrum uses the Arbitrum GRT bridge, which in turn uses the native Arbitrum bridge, to send the Subgraph to L2. The "transfer" will deprecate the Subgraph on mainnet and send the information to re-create the Subgraph on L2 using the bridge. It will also include the Subgraph owner's signaled GRT, which must be more than zero for the bridge to accept the transfer. -Pokud zvolíte převod podgrafu, převede se veškerý signál kurátoru podgrafu na GRT. To je ekvivalentní "znehodnocení" podgrafu v síti mainnet. GRT odpovídající vašemu kurátorství budou spolu s podgrafem odeslány na L2, kde budou vaším jménem použity k ražbě signálu. +When you choose to transfer the Subgraph, this will convert all of the Subgraph's curation signal to GRT. This is equivalent to "deprecating" the Subgraph on mainnet. The GRT corresponding to your curation will be sent to L2 together with the Subgraph, where they will be used to mint signal on your behalf. -Ostatní kurátoři se mohou rozhodnout, zda si stáhnou svůj podíl GRT, nebo jej také převedou na L2, aby na stejném podgrafu vyrazili signál. Pokud vlastník podgrafu nepřevede svůj podgraf na L2 a ručně jej znehodnotí prostřednictvím volání smlouvy, pak budou Kurátoři upozorněni a budou moci stáhnout svou kurátorskou funkci. +Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same Subgraph. If a Subgraph owner does not transfer their Subgraph to L2 and manually deprecates it via a contract call, then Curators will be notified and will be able to withdraw their curation. -Jakmile je podgraf převeden, protože veškerá kurátorská činnost je převedena na GRT, indexátoři již nebudou dostávat odměny za indexování podgrafu. Budou však existovat indexátory, které 1) budou obsluhovat převedené podgrafy po dobu 24 hodin a 2) okamžitě začnou indexovat podgraf na L2. Protože tyto Indexery již mají podgraf zaindexovaný, nemělo by být nutné čekat na synchronizaci podgrafu a bude možné se na podgraf na L2 dotazovat téměř okamžitě. +As soon as the Subgraph is transferred, since all curation is converted to GRT, Indexers will no longer receive rewards for indexing the Subgraph. However, there will be Indexers that will 1) keep serving transferred Subgraphs for 24 hours, and 2) immediately start indexing the Subgraph on L2. Since these Indexers already have the Subgraph indexed, there should be no need to wait for the Subgraph to sync, and it will be possible to query the L2 Subgraph almost immediately. -Dotazy do podgrafu L2 bude nutné zadávat na jinou adresu URL (na `arbitrum-gateway.thegraph.com`), ale adresa URL L1 bude fungovat nejméně 48 hodin. Poté bude brána L1 přeposílat dotazy na bránu L2 (po určitou dobu), což však zvýší latenci, takže se doporučuje co nejdříve přepnout všechny dotazy na novou adresu URL. +Queries to the L2 Subgraph will need to be done to a different URL (on `arbitrum-gateway.thegraph.com`), but the L1 URL will continue working for at least 48 hours. After that, the L1 gateway will forward queries to the L2 gateway (for some time), but this will add latency so it is recommended to switch all your queries to the new URL as soon as possible. ## Výběr peněženky L2 -Když jste publikovali svůj podgraf na hlavní síti (mainnet), použili jste připojenou peněženku, která vlastní NFT reprezentující tento podgraf a umožňuje vám publikovat aktualizace. +When you published your Subgraph on mainnet, you used a connected wallet to create the Subgraph, and this wallet owns the NFT that represents this Subgraph and allows you to publish updates. -Při přenosu podgrafu do Arbitrum si můžete vybrat jinou peněženku, která bude vlastnit tento podgraf NFT na L2. +When transferring the Subgraph to Arbitrum, you can choose a different wallet that will own this Subgraph NFT on L2. Pokud používáte "obyčejnou" peněženku, jako je MetaMask (externě vlastněný účet nebo EOA, tj. peněženka, která není chytrým kontraktem), pak je to volitelné a doporučuje se zachovat stejnou adresu vlastníka jako v L1. -Pokud používáte peněženku s chytrým kontraktem, jako je multisig (např. Trezor), pak je nutné zvolit jinou adresu peněženky L2, protože je pravděpodobné, že tento účet existuje pouze v mainnetu a nebudete moci provádět transakce na Arbitrum pomocí této peněženky. Pokud chcete i nadále používat peněženku s chytrým kontraktem nebo multisig, vytvořte si na Arbitrum novou peněženku a její adresu použijte jako vlastníka L2 svého subgrafu. +If you're using a smart contract wallet, like a multisig (e.g. a Safe), then choosing a different L2 wallet address is mandatory, as it is most likely that this account only exists on mainnet and you will not be able to make transactions on Arbitrum using this wallet. If you want to keep using a smart contract wallet or multisig, create a new wallet on Arbitrum and use its address as the L2 owner of your Subgraph. -**Je velmi důležité používat adresu peněženky, kterou máte pod kontrolou a která může provádět transakce na Arbitrum. V opačném případě bude podgraf ztracen a nebude možné jej obnovit.** +**It is very important to use a wallet address that you control, and that can make transactions on Arbitrum. Otherwise, the Subgraph will be lost and cannot be recovered.** ## Příprava na převod: přemostění některých ETH -Přenos podgrafu zahrnuje odeslání transakce přes můstek a následné provedení další transakce na Arbitrum. První transakce využívá ETH na mainnetu a obsahuje nějaké ETH na zaplacení plynu, když je zpráva přijata na L2. Pokud však tento plyn nestačí, je třeba transakci zopakovat a zaplatit za plyn přímo na L2 (to je 'Krok 3: Potvrzení převodu' níže). Tento krok musí být proveden do 7 dnů od zahájení převodu\*\*. Druhá transakce ('Krok 4: Dokončení převodu na L2') bude navíc provedena přímo na Arbitrum. Z těchto důvodů budete potřebovat nějaké ETH na peněžence Arbitrum. Pokud používáte multisig nebo smart contract účet, ETH bude muset být v běžné peněžence (EOA), kterou používáte k provádění transakcí, nikoli na samotné multisig peněžence. +Transferring the Subgraph involves sending a transaction through the bridge, and then executing another transaction on Arbitrum. The first transaction uses ETH on mainnet, and includes some ETH to pay for gas when the message is received on L2. However, if this gas is insufficient, you will have to retry the transaction and pay for the gas directly on L2 (this is "Step 3: Confirming the transfer" below). This step **must be executed within 7 days of starting the transfer**. Moreover, the second transaction ("Step 4: Finishing the transfer on L2") will be done directly on Arbitrum. For these reasons, you will need some ETH on an Arbitrum wallet. If you're using a multisig or smart contract account, the ETH will need to be in the regular (EOA) wallet that you are using to execute the transactions, not on the multisig wallet itself. ETH si můžete koupit na některých burzách a vybrat přímo na Arbitrum, nebo můžete použít most Arbitrum a poslat ETH z peněženky mainnetu na L2: [bridge.arbitrum.io](http://bridge.arbitrum.io). Vzhledem k tomu, že poplatky za plyn na Arbitrum jsou nižší, mělo by vám stačit jen malé množství. Doporučujeme začít na nízkém prahu (např. 0.01 ETH), aby byla vaše transakce schválena. -## Hledání nástroje pro přenos podgrafu +## Finding the Subgraph Transfer Tool -Nástroj pro přenos L2 najdete při prohlížení stránky svého podgrafu v aplikaci Subgraph Studio: +You can find the L2 Transfer Tool when you're looking at your Subgraph's page on Subgraph Studio: ![transfer tool](/img/L2-transfer-tool1.png) -Je k dispozici také v Průzkumníku, pokud jste připojeni k peněžence, která vlastní podgraf, a na stránce tohoto podgrafu v Průzkumníku: +It is also available on Explorer if you're connected with the wallet that owns a Subgraph and on that Subgraph's page on Explorer: ![Transferring to L2](/img/transferToL2.png) @@ -60,19 +60,19 @@ Kliknutím na tlačítko Přenést na L2 otevřete nástroj pro přenos, kde mů ## Krok 1: Zahájení přenosu -Před zahájením převodu se musíte rozhodnout, která adresa bude vlastnit podgraf na L2 (viz výše "Výběr peněženky L2"), a důrazně doporučujeme mít na Arbitrum již přemostěné ETH pro plyn (viz výše "Příprava na převod: přemostění některých ETH"). +Before starting the transfer, you must decide which address will own the Subgraph on L2 (see "Choosing your L2 wallet" above), and it is strongly recommend having some ETH for gas already bridged on Arbitrum (see "Preparing for the transfer: bridging some ETH" above). -Vezměte prosím na vědomí, že přenos podgrafu vyžaduje nenulové množství signálu na podgrafu se stejným účtem, který vlastní podgraf; pokud jste na podgrafu nesignalizovali, budete muset přidat trochu kurátorství (stačí přidat malé množství, například 1 GRT). +Also please note transferring the Subgraph requires having a nonzero amount of signal on the Subgraph with the same account that owns the Subgraph; if you haven't signaled on the Subgraph you will have to add a bit of curation (adding a small amount like 1 GRT would suffice). -Po otevření nástroje Transfer Tool budete moci do pole "Receiving wallet address" zadat adresu peněženky L2 - **ujistěte se, že jste zadali správnou adresu**. Kliknutím na Transfer Subgraph budete vyzváni k provedení transakce na vaší peněžence (všimněte si, že je zahrnuta určitá hodnota ETH, abyste zaplatili za plyn L2); tím se zahájí přenos a znehodnotí váš subgraf L1 (více podrobností o tom, co se děje v zákulisí, najdete výše v části "Porozumění tomu, co se děje se signálem, vaším subgrafem L1 a URL dotazů"). +After opening the Transfer Tool, you will be able to input the L2 wallet address into the "Receiving wallet address" field - **make sure you've entered the correct address here**. Clicking on Transfer Subgraph will prompt you to execute the transaction on your wallet (note some ETH value is included to pay for L2 gas); this will initiate the transfer and deprecate your L1 Subgraph (see "Understanding what happens with signal, your L1 Subgraph and query URLs" above for more details on what goes on behind the scenes). -Pokud tento krok provedete, ujistěte se, že jste pokračovali až do dokončení kroku 3 za méně než 7 dní, jinak se podgraf a váš signál GRT ztratí. To je způsobeno tím, jak funguje zasílání zpráv L1-L2 na Arbitrum: zprávy, které jsou zasílány přes most, jsou "Opakovatelný tiket", které musí být provedeny do 7 dní, a počáteční provedení může vyžadovat opakování, pokud dojde ke skokům v ceně plynu na Arbitrum. +If you execute this step, **make sure you proceed until completing step 3 in less than 7 days, or the Subgraph and your signal GRT will be lost.** This is due to how L1-L2 messaging works on Arbitrum: messages that are sent through the bridge are "retry-able tickets" that must be executed within 7 days, and the initial execution might need a retry if there are spikes in the gas price on Arbitrum. ![Start the transfer to L2](/img/startTransferL2.png) -## Krok 2: Čekání, až se podgraf dostane do L2 +## Step 2: Waiting for the Subgraph to get to L2 -Po zahájení přenosu se musí zpráva, která odesílá podgraf L1 do L2, šířit přes můstek Arbitrum. To trvá přibližně 20 minut (můstek čeká, až bude blok mainnetu obsahující transakci "bezpečný" před případnými reorgy řetězce). +After you start the transfer, the message that sends your L1 Subgraph to L2 must propagate through the Arbitrum bridge. This takes approximately 20 minutes (the bridge waits for the mainnet block containing the transaction to be "safe" from potential chain reorgs). Po uplynutí této čekací doby se Arbitrum pokusí o automatické provedení přenosu na základě smluv L2. @@ -80,7 +80,7 @@ Po uplynutí této čekací doby se Arbitrum pokusí o automatické provedení p ## Krok 3: Potvrzení převodu -Ve většině případů se tento krok provede automaticky, protože plyn L2 obsažený v kroku 1 by měl stačit k provedení transakce, která přijímá podgraf na smlouvách Arbitrum. V některých případech je však možné, že prudký nárůst cen plynu na Arbitrum způsobí selhání tohoto automatického provedení. V takovém případě bude "ticket", který odešle subgraf na L2, čekat na vyřízení a bude vyžadovat opakování pokusu do 7 dnů. +In most cases, this step will auto-execute as the L2 gas included in step 1 should be sufficient to execute the transaction that receives the Subgraph on the Arbitrum contracts. In some cases, however, it is possible that a spike in gas prices on Arbitrum causes this auto-execution to fail. In this case, the "ticket" that sends your Subgraph to L2 will be pending and require a retry within 7 days. V takovém případě se musíte připojit pomocí peněženky L2, která má nějaké ETH na Arbitrum, přepnout síť peněženky na Arbitrum a kliknutím na "Confirm Transfer" zopakovat transakci. @@ -88,33 +88,33 @@ V takovém případě se musíte připojit pomocí peněženky L2, která má n ## Krok 4: Dokončení přenosu na L2 -V tuto chvíli byly váš podgraf a GRT přijaty na Arbitrum, ale podgraf ještě není zveřejněn. Budete se muset připojit pomocí peněženky L2, kterou jste si vybrali jako přijímající peněženku, přepnout síť peněženky na Arbitrum a kliknout na "Publikovat subgraf" +At this point, your Subgraph and GRT have been received on Arbitrum, but the Subgraph is not published yet. You will need to connect using the L2 wallet that you chose as the receiving wallet, switch your wallet network to Arbitrum, and click "Publish Subgraph." -![Publish the subgraph](/img/publishSubgraphL2TransferTools.png) +![Publish the Subgraph](/img/publishSubgraphL2TransferTools.png) -![Wait for the subgraph to be published](/img/waitForSubgraphToPublishL2TransferTools.png) +![Wait for the Subgraph to be published](/img/waitForSubgraphToPublishL2TransferTools.png) -Tím se podgraf zveřejní, aby jej mohly začít obsluhovat indexery pracující na Arbitrum. Rovněž bude zminován kurátorský signál pomocí GRT, které byly přeneseny z L1. +This will publish the Subgraph so that Indexers that are operating on Arbitrum can start serving it. It will also mint curation signal using the GRT that were transferred from L1. ## Krok 5: Aktualizace URL dotazu -Váš podgraf byl úspěšně přenesen do Arbitrum! Chcete-li se na podgraf zeptat, nová URL bude: +Your Subgraph has been successfully transferred to Arbitrum! To query the Subgraph, the new URL will be : `https://arbitrum-gateway.thegraph.com/api/[api-key]/subgraphs/id/[l2-subgraph-id]` -Všimněte si, že ID podgrafu v Arbitrum bude jiné než to, které jste měli v mainnetu, ale vždy ho můžete najít v Průzkumníku nebo Studiu. Jak je uvedeno výše (viz "Pochopení toho, co se děje se signálem, vaším subgrafem L1 a URL dotazů"), stará URL adresa L1 bude po krátkou dobu podporována, ale jakmile bude subgraf synchronizován na L2, měli byste své dotazy přepnout na novou adresu. +Note that the Subgraph ID on Arbitrum will be a different than the one you had on mainnet, but you can always find it on Explorer or Studio. As mentioned above (see "Understanding what happens with signal, your L1 Subgraph and query URLs") the old L1 URL will be supported for a short while, but you should switch your queries to the new address as soon as the Subgraph has been synced on L2. ## Jak přenést kurátorství do služby Arbitrum (L2) -## Porozumění tomu, co se děje s kurátorstvím při přenosu podgrafů do L2 +## Understanding what happens to curation on Subgraph transfers to L2 -Když vlastník podgrafu převede podgraf do Arbitrum, je veškerý signál podgrafu současně převeden na GRT. To se týká "automaticky migrovaného" signálu, tj. signálu, který není specifický pro verzi podgrafu nebo nasazení, ale který následuje nejnovější verzi podgrafu. +When the owner of a Subgraph transfers a Subgraph to Arbitrum, all of the Subgraph's signal is converted to GRT at the same time. This applies to "auto-migrated" signal, i.e. signal that is not specific to a Subgraph version or deployment but that follows the latest version of a Subgraph. -Tento převod ze signálu na GRT je stejný, jako kdyby vlastník podgrafu zrušil podgraf v L1. Při depreciaci nebo převodu subgrafu se současně "spálí" veškerý kurátorský signál (pomocí kurátorské vazební křivky) a výsledný GRT je držen inteligentním kontraktem GNS (tedy kontraktem, který se stará o upgrade subgrafu a automatickou migraci signálu). Každý kurátor na tomto subgrafu má tedy nárok na tento GRT úměrný množství podílů, které měl na subgrafu. +This conversion from signal to GRT is the same as what would happen if the Subgraph owner deprecated the Subgraph in L1. When the Subgraph is deprecated or transferred, all curation signal is "burned" simultaneously (using the curation bonding curve) and the resulting GRT is held by the GNS smart contract (that is the contract that handles Subgraph upgrades and auto-migrated signal). Each Curator on that Subgraph therefore has a claim to that GRT proportional to the amount of shares they had for the Subgraph. -Část těchto GRT odpovídající vlastníkovi podgrafu je odeslána do L2 spolu s podgrafem. +A fraction of these GRT corresponding to the Subgraph owner is sent to L2 together with the Subgraph. -V tomto okamžiku se za kurátorský GRT již nebudou účtovat žádné poplatky za dotazování, takže kurátoři se mohou rozhodnout, zda svůj GRT stáhnou, nebo jej přenesou do stejného podgrafu na L2, kde může být použit k ražbě nového kurátorského signálu. S tímto úkonem není třeba spěchat, protože GRT lze pomáhat donekonečna a každý dostane částku úměrnou svému podílu bez ohledu na to, kdy tak učiní. +At this point, the curated GRT will not accrue any more query fees, so Curators can choose to withdraw their GRT or transfer it to the same Subgraph on L2, where it can be used to mint new curation signal. There is no rush to do this as the GRT can be help indefinitely and everybody gets an amount proportional to their shares, irrespective of when they do it. ## Výběr peněženky L2 @@ -130,9 +130,9 @@ Pokud používáte peněženku s chytrým kontraktem, jako je multisig (např. T Před zahájením převodu se musíte rozhodnout, která adresa bude vlastnit kurátorství na L2 (viz výše "Výběr peněženky L2"), a doporučujeme mít nějaké ETH pro plyn již přemostěné na Arbitrum pro případ, že byste potřebovali zopakovat provedení zprávy na L2. ETH můžete nakoupit na některých burzách a vybrat si ho přímo na Arbitrum, nebo můžete použít Arbitrum bridge pro odeslání ETH z peněženky mainnetu na L2: [bridge.arbitrum.io](http://bridge.arbitrum.io) - protože poplatky za plyn na Arbitrum jsou tak nízké, mělo by vám stačit jen malé množství, např. 0,01 ETH bude pravděpodobně více než dostačující. -Pokud byl podgraf, do kterého kurátor provádí kurátorství, převeden do L2, zobrazí se v Průzkumníku zpráva, že kurátorství provádíte do převedeného podgrafu. +If a Subgraph that you curate to has been transferred to L2, you will see a message on Explorer telling you that you're curating to a transferred Subgraph. -Při pohledu na stránku podgrafu můžete zvolit stažení nebo přenos kurátorství. Kliknutím na "Přenést signál do Arbitrum" otevřete nástroj pro přenos. +When looking at the Subgraph page, you can choose to withdraw or transfer the curation. Clicking on "Transfer Signal to Arbitrum" will open the transfer tool. ![Transfer signal](/img/transferSignalL2TransferTools.png) @@ -162,4 +162,4 @@ V takovém případě se musíte připojit pomocí peněženky L2, která má n ## Odstranění vašeho kurátorství na L1 -Pokud nechcete posílat GRT na L2 nebo byste raději překlenuli GRT ručně, můžete si na L1 stáhnout svůj kurátorovaný GRT. Na banneru na stránce podgrafu zvolte "Withdraw Signal" a potvrďte transakci; GRT bude odeslán na vaši adresu kurátora. +If you prefer not to send your GRT to L2, or you'd rather bridge the GRT manually, you can withdraw your curated GRT on L1. On the banner on the Subgraph page, choose "Withdraw Signal" and confirm the transaction; the GRT will be sent to your Curator address. From 2a18dcec285ddbcdd266a62ec3b3581ebf1b8aa0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:13:48 -0500 Subject: [PATCH 0321/1789] New translations l2-transfer-tools-guide.mdx (German) --- .../arbitrum/l2-transfer-tools-guide.mdx | 76 +++++++++---------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/website/src/pages/de/archived/arbitrum/l2-transfer-tools-guide.mdx b/website/src/pages/de/archived/arbitrum/l2-transfer-tools-guide.mdx index 6a5b13da53d7..8a3b3c6f3f1d 100644 --- a/website/src/pages/de/archived/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/src/pages/de/archived/arbitrum/l2-transfer-tools-guide.mdx @@ -6,53 +6,53 @@ The Graph hat den Wechsel zu L2 auf Arbitrum One leicht gemacht. Für jeden Prot Einige häufig gestellte Fragen zu diesen Tools werden in den [L2 Transfer Tools FAQ](/archived/arbitrum/l2-transfer-tools-faq/) beantwortet. Die FAQs enthalten ausführliche Erklärungen zur Verwendung der Tools, zu ihrer Funktionsweise und zu den Dingen, die bei ihrer Verwendung zu beachten sind. -## So übertragen Sie Ihren Subgraphen auf Arbitrum (L2) +## How to transfer your Subgraph to Arbitrum (L2) -## Vorteile der Übertragung Ihrer Untergraphen +## Benefits of transferring your Subgraphs The Graph's Community und die Kernentwickler haben im letzten Jahr den Wechsel zu Arbitrum [vorbereitet] (https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305). Arbitrum, eine Layer-2- oder "L2"-Blockchain, erbt die Sicherheit von Ethereum, bietet aber drastisch niedrigere Gasgebühren. -Wenn Sie Ihren Subgraphen auf The Graph Network veröffentlichen oder aktualisieren, interagieren Sie mit intelligenten Verträgen auf dem Protokoll, und dies erfordert die Bezahlung von Gas mit ETH. Indem Sie Ihre Subgraphen zu Arbitrum verschieben, werden alle zukünftigen Aktualisierungen Ihres Subgraphen viel niedrigere Gasgebühren erfordern. Die niedrigeren Gebühren und die Tatsache, dass die Kurationsbindungskurven auf L2 flach sind, machen es auch für andere Kuratoren einfacher, auf Ihrem Subgraphen zu kuratieren, was die Belohnungen für Indexer auf Ihrem Subgraphen erhöht. Diese kostengünstigere Umgebung macht es auch für Indexer preiswerter, Ihren Subgraphen zu indizieren und zu bedienen. Die Belohnungen für die Indexierung werden in den kommenden Monaten auf Arbitrum steigen und auf dem Ethereum-Mainnet sinken, so dass immer mehr Indexer ihren Einsatz transferieren und ihre Operationen auf L2 einrichten werden. +When you publish or upgrade your Subgraph to The Graph Network, you're interacting with smart contracts on the protocol and this requires paying for gas using ETH. By moving your Subgraphs to Arbitrum, any future updates to your Subgraph will require much lower gas fees. The lower fees, and the fact that curation bonding curves on L2 are flat, also make it easier for other Curators to curate on your Subgraph, increasing the rewards for Indexers on your Subgraph. This lower-cost environment also makes it cheaper for Indexers to index and serve your Subgraph. Indexing rewards will be increasing on Arbitrum and decreasing on Ethereum mainnet over the coming months, so more and more Indexers will be transferring their stake and setting up their operations on L2. -## Verstehen, was mit dem Signal, Ihrem L1-Subgraphen und den Abfrage-URLs geschieht +## Understanding what happens with signal, your L1 Subgraph and query URLs -Die Übertragung eines Subgraphen nach Arbitrum verwendet die Arbitrum GRT-Brücke, die wiederum die native Arbitrum-Brücke verwendet, um den Subgraphen nach L2 zu senden. Der "Transfer" löscht den Subgraphen im Mainnet und sendet die Informationen, um den Subgraphen auf L2 mit Hilfe der Brücke neu zu erstellen. Sie enthält auch die vom Eigentümer des Subgraphen signalisierte GRT, die größer als Null sein muss, damit die Brücke die Übertragung akzeptiert. +Transferring a Subgraph to Arbitrum uses the Arbitrum GRT bridge, which in turn uses the native Arbitrum bridge, to send the Subgraph to L2. The "transfer" will deprecate the Subgraph on mainnet and send the information to re-create the Subgraph on L2 using the bridge. It will also include the Subgraph owner's signaled GRT, which must be more than zero for the bridge to accept the transfer. -Wenn Sie sich für die Übertragung des Untergraphen entscheiden, wird das gesamte Kurationssignal des Untergraphen in GRT umgewandelt. Dies ist gleichbedeutend mit dem "Verwerfen" des Subgraphen im Mainnet. Die GRT, die Ihrer Kuration entsprechen, werden zusammen mit dem Subgraphen an L2 gesendet, wo sie für die Prägung von Signalen in Ihrem Namen verwendet werden. +When you choose to transfer the Subgraph, this will convert all of the Subgraph's curation signal to GRT. This is equivalent to "deprecating" the Subgraph on mainnet. The GRT corresponding to your curation will be sent to L2 together with the Subgraph, where they will be used to mint signal on your behalf. -Andere Kuratoren können wählen, ob sie ihren Anteil an GRT zurückziehen oder ihn ebenfalls an L2 übertragen, um das Signal auf demselben Untergraphen zu prägen. Wenn ein Subgraph-Eigentümer seinen Subgraph nicht an L2 überträgt und ihn manuell über einen Vertragsaufruf abmeldet, werden die Kuratoren benachrichtigt und können ihre Kuration zurückziehen. +Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same Subgraph. If a Subgraph owner does not transfer their Subgraph to L2 and manually deprecates it via a contract call, then Curators will be notified and will be able to withdraw their curation. -Sobald der Subgraph übertragen wurde, erhalten die Indexer keine Belohnungen mehr für die Indizierung des Subgraphen, da die gesamte Kuration in GRT umgewandelt wird. Es wird jedoch Indexer geben, die 1) übertragene Untergraphen für 24 Stunden weiter bedienen und 2) sofort mit der Indizierung des Untergraphen auf L2 beginnen. Da diese Indexer den Untergraphen bereits indiziert haben, sollte es nicht nötig sein, auf die Synchronisierung des Untergraphen zu warten, und es wird möglich sein, den L2-Untergraphen fast sofort abzufragen. +As soon as the Subgraph is transferred, since all curation is converted to GRT, Indexers will no longer receive rewards for indexing the Subgraph. However, there will be Indexers that will 1) keep serving transferred Subgraphs for 24 hours, and 2) immediately start indexing the Subgraph on L2. Since these Indexers already have the Subgraph indexed, there should be no need to wait for the Subgraph to sync, and it will be possible to query the L2 Subgraph almost immediately. -Anfragen an den L2-Subgraphen müssen an eine andere URL gerichtet werden (an `arbitrum-gateway.thegraph.com`), aber die L1-URL wird noch mindestens 48 Stunden lang funktionieren. Danach wird das L1-Gateway (für eine gewisse Zeit) Anfragen an das L2-Gateway weiterleiten, was jedoch zu zusätzlichen Latenzzeiten führt. Es wird daher empfohlen, alle Anfragen so bald wie möglich auf die neue URL umzustellen. +Queries to the L2 Subgraph will need to be done to a different URL (on `arbitrum-gateway.thegraph.com`), but the L1 URL will continue working for at least 48 hours. After that, the L1 gateway will forward queries to the L2 gateway (for some time), but this will add latency so it is recommended to switch all your queries to the new URL as soon as possible. ## Ein Teil dieser GRT, der dem Inhaber des Untergraphen entspricht, wird zusammen mit dem Untergraphen an L2 gesendet. -Als Sie Ihren Subgraphen im Mainnet veröffentlicht haben, haben Sie eine angeschlossene Wallet benutzt, um den Subgraphen zu erstellen, und diese Wallet besitzt die NFT, die diesen Subgraphen repräsentiert und Ihnen erlaubt, Updates zu veröffentlichen. +When you published your Subgraph on mainnet, you used a connected wallet to create the Subgraph, and this wallet owns the NFT that represents this Subgraph and allows you to publish updates. -Wenn man den Subgraphen zu Arbitrum überträgt, kann man eine andere Wallet wählen, die diesen Subgraphen NFT auf L2 besitzen wird. +When transferring the Subgraph to Arbitrum, you can choose a different wallet that will own this Subgraph NFT on L2. Wenn Sie eine "normale" Wallet wie MetaMask verwenden (ein Externally Owned Account oder EOA, d.h. eine Wallet, die kein Smart Contract ist), dann ist dies optional und es wird empfohlen, die gleiche Eigentümeradresse wie in L1 beizubehalten. -Wenn Sie eine Smart-Contract-Wallet, wie z.B. eine Multisig (z.B. Safe), verwenden, dann ist die Wahl einer anderen L2-Wallet-Adresse zwingend erforderlich, da es sehr wahrscheinlich ist, dass dieses Konto nur im Mainnet existiert und Sie mit dieser Wallet keine Transaktionen auf Arbitrum durchführen können. Wenn Sie weiterhin eine Smart Contract Wallet oder Multisig verwenden möchten, erstellen Sie eine neue Wallet auf Arbitrum und verwenden Sie deren Adresse als L2-Besitzer Ihres Subgraphen. +If you're using a smart contract wallet, like a multisig (e.g. a Safe), then choosing a different L2 wallet address is mandatory, as it is most likely that this account only exists on mainnet and you will not be able to make transactions on Arbitrum using this wallet. If you want to keep using a smart contract wallet or multisig, create a new wallet on Arbitrum and use its address as the L2 owner of your Subgraph. -**Es ist sehr wichtig, eine Wallet-Adresse zu verwenden, die Sie kontrollieren und die Transaktionen auf Arbitrum durchführen kann. Andernfalls geht der Subgraph verloren und kann nicht wiederhergestellt werden.** +**It is very important to use a wallet address that you control, and that can make transactions on Arbitrum. Otherwise, the Subgraph will be lost and cannot be recovered.** ## Vorbereitung der Übertragung: Überbrückung einiger ETH -Die Übertragung des Subgraphen beinhaltet das Senden einer Transaktion über die Brücke und das Ausführen einer weiteren Transaktion auf Arbitrum. Die erste Transaktion verwendet ETH im Mainnet und enthält einige ETH, um das Gas zu bezahlen, wenn die Nachricht auf L2 empfangen wird. Wenn dieses Gas jedoch nicht ausreicht, müssen Sie die Transaktion wiederholen und das Gas direkt auf L2 bezahlen (dies ist "Schritt 3: Bestätigen des Transfers" unten). Dieser Schritt **muss innerhalb von 7 Tagen nach Beginn der Überweisung** ausgeführt werden. Außerdem wird die zweite Transaktion ("Schritt 4: Beenden der Übertragung auf L2") direkt auf Arbitrum durchgeführt. Aus diesen Gründen benötigen Sie etwas ETH auf einer Arbitrum-Wallet. Wenn Sie ein Multisig- oder Smart-Contract-Konto verwenden, muss sich die ETH in der regulären (EOA-) Wallet befinden, die Sie zum Ausführen der Transaktionen verwenden, nicht in der Multisig-Wallet selbst. +Transferring the Subgraph involves sending a transaction through the bridge, and then executing another transaction on Arbitrum. The first transaction uses ETH on mainnet, and includes some ETH to pay for gas when the message is received on L2. However, if this gas is insufficient, you will have to retry the transaction and pay for the gas directly on L2 (this is "Step 3: Confirming the transfer" below). This step **must be executed within 7 days of starting the transfer**. Moreover, the second transaction ("Step 4: Finishing the transfer on L2") will be done directly on Arbitrum. For these reasons, you will need some ETH on an Arbitrum wallet. If you're using a multisig or smart contract account, the ETH will need to be in the regular (EOA) wallet that you are using to execute the transactions, not on the multisig wallet itself. Sie können ETH auf einigen Börsen kaufen und direkt auf Arbitrum abheben, oder Sie können die Arbitrum-Brücke verwenden, um ETH von einer Mainnet-Wallet zu L2 zu senden: [bridge.arbitrum.io](http://bridge.arbitrum.io). Da die Gasgebühren auf Arbitrum niedriger sind, sollten Sie nur eine kleine Menge benötigen. Es wird empfohlen, mit einem niedrigen Schwellenwert (z.B. 0,01 ETH) zu beginnen, damit Ihre Transaktion genehmigt wird. -## Suche nach dem Untergraphen Transfer Tool +## Finding the Subgraph Transfer Tool -Sie finden das L2 Transfer Tool, wenn Sie die Seite Ihres Subgraphen in Subgraph Studio ansehen: +You can find the L2 Transfer Tool when you're looking at your Subgraph's page on Subgraph Studio: ![transfer tool](/img/L2-transfer-tool1.png) -Sie ist auch im Explorer verfügbar, wenn Sie mit der Wallet verbunden sind, die einen Untergraphen besitzt, und auf der Seite dieses Untergraphen im Explorer: +It is also available on Explorer if you're connected with the wallet that owns a Subgraph and on that Subgraph's page on Explorer: ![Transferring to L2](/img/transferToL2.png) @@ -60,19 +60,19 @@ Wenn Sie auf die Schaltfläche auf L2 übertragen klicken, wird das Übertragung ## Schritt 1: Starten der Übertragung -Bevor Sie mit dem Transfer beginnen, müssen Sie entscheiden, welche Adresse den Subgraphen auf L2 besitzen wird (siehe "Wählen Sie Ihre L2-Wallet" oben), und es wird dringend empfohlen, einige ETH für Gas bereits auf Arbitrum zu überbrücken (siehe "Vorbereitung des Transfers: Überbrücken einiger ETH" oben). +Before starting the transfer, you must decide which address will own the Subgraph on L2 (see "Choosing your L2 wallet" above), and it is strongly recommend having some ETH for gas already bridged on Arbitrum (see "Preparing for the transfer: bridging some ETH" above). -Bitte beachten Sie auch, dass die Übertragung des Untergraphen ein Signal ungleich Null auf dem Untergraphen mit demselben Konto erfordert, das den Untergraphen besitzt; wenn Sie kein Signal auf dem Untergraphen haben, müssen Sie ein wenig Kuration hinzufügen (das Hinzufügen eines kleinen Betrags wie 1 GRT würde ausreichen). +Also please note transferring the Subgraph requires having a nonzero amount of signal on the Subgraph with the same account that owns the Subgraph; if you haven't signaled on the Subgraph you will have to add a bit of curation (adding a small amount like 1 GRT would suffice). -Nachdem Sie das Transfer-Tool geöffnet haben, können Sie die L2-Wallet-Adresse in das Feld "Empfänger-Wallet-Adresse" eingeben - **vergewissern Sie sich, dass Sie hier die richtige Adresse eingegeben haben**. Wenn Sie auf "Transfer Subgraph" klicken, werden Sie aufgefordert, die Transaktion auf Ihrer Wallet auszuführen (beachten Sie, dass ein gewisser ETH-Wert enthalten ist, um das L2-Gas zu bezahlen); dadurch wird der Transfer eingeleitet und Ihr L1-Subgraph außer Kraft gesetzt (siehe "Verstehen, was mit Signal, Ihrem L1-Subgraph und Abfrage-URLs passiert" weiter oben für weitere Details darüber, was hinter den Kulissen passiert). +After opening the Transfer Tool, you will be able to input the L2 wallet address into the "Receiving wallet address" field - **make sure you've entered the correct address here**. Clicking on Transfer Subgraph will prompt you to execute the transaction on your wallet (note some ETH value is included to pay for L2 gas); this will initiate the transfer and deprecate your L1 Subgraph (see "Understanding what happens with signal, your L1 Subgraph and query URLs" above for more details on what goes on behind the scenes). -Wenn Sie diesen Schritt ausführen, **vergewissern Sie sich, dass Sie bis zum Abschluss von Schritt 3 in weniger als 7 Tagen fortfahren, sonst gehen der Subgraph und Ihr Signal GRT verloren.** Dies liegt daran, wie L1-L2-Nachrichten auf Arbitrum funktionieren: Nachrichten, die über die Brücke gesendet werden, sind "wiederholbare Tickets", die innerhalb von 7 Tagen ausgeführt werden müssen, und die erste Ausführung muss möglicherweise wiederholt werden, wenn es Spitzen im Gaspreis auf Arbitrum gibt. +If you execute this step, **make sure you proceed until completing step 3 in less than 7 days, or the Subgraph and your signal GRT will be lost.** This is due to how L1-L2 messaging works on Arbitrum: messages that are sent through the bridge are "retry-able tickets" that must be executed within 7 days, and the initial execution might need a retry if there are spikes in the gas price on Arbitrum. ![Start the transfer to L2](/img/startTransferL2.png) -## Schritt 2: Warten, bis der Untergraph L2 erreicht hat +## Step 2: Waiting for the Subgraph to get to L2 -Nachdem Sie die Übertragung gestartet haben, muss die Nachricht, die Ihren L1-Subgraphen an L2 sendet, die Arbitrum-Brücke durchlaufen. Dies dauert etwa 20 Minuten (die Brücke wartet darauf, dass der Mainnet-Block, der die Transaktion enthält, vor potenziellen Reorgs der Kette "sicher" ist). +After you start the transfer, the message that sends your L1 Subgraph to L2 must propagate through the Arbitrum bridge. This takes approximately 20 minutes (the bridge waits for the mainnet block containing the transaction to be "safe" from potential chain reorgs). Sobald diese Wartezeit abgelaufen ist, versucht Arbitrum, die Übertragung auf den L2-Verträgen automatisch auszuführen. @@ -80,7 +80,7 @@ Sobald diese Wartezeit abgelaufen ist, versucht Arbitrum, die Übertragung auf d ## Schritt 3: Bestätigung der Übertragung -In den meisten Fällen wird dieser Schritt automatisch ausgeführt, da das in Schritt 1 enthaltene L2-Gas ausreichen sollte, um die Transaktion auszuführen, die den Untergraphen auf den Arbitrum-Verträgen erhält. In einigen Fällen ist es jedoch möglich, dass ein Anstieg der Gaspreise auf Arbitrum dazu führt, dass diese automatische Ausführung fehlschlägt. In diesem Fall wird das "Ticket", das Ihren Subgraphen an L2 sendet, ausstehend sein und einen erneuten Versuch innerhalb von 7 Tagen erfordern. +In most cases, this step will auto-execute as the L2 gas included in step 1 should be sufficient to execute the transaction that receives the Subgraph on the Arbitrum contracts. In some cases, however, it is possible that a spike in gas prices on Arbitrum causes this auto-execution to fail. In this case, the "ticket" that sends your Subgraph to L2 will be pending and require a retry within 7 days. Wenn dies der Fall ist, müssen Sie sich mit einer L2-Wallet verbinden, die etwas ETH auf Arbitrum hat, Ihr Wallet-Netzwerk auf Arbitrum umstellen und auf "Confirm Transfer" klicken, um die Transaktion zu wiederholen. @@ -88,33 +88,33 @@ Wenn dies der Fall ist, müssen Sie sich mit einer L2-Wallet verbinden, die etwa ## Schritt 4: Abschluss der Übertragung auf L2 -Zu diesem Zeitpunkt wurden Ihr Subgraph und GRT auf Arbitrum empfangen, aber der Subgraph ist noch nicht veröffentlicht. Sie müssen sich mit der L2-Wallet verbinden, die Sie als empfangende Wallet gewählt haben, Ihr Wallet-Netzwerk auf Arbitrum umstellen und auf "Subgraph" veröffentlichen klicken. +At this point, your Subgraph and GRT have been received on Arbitrum, but the Subgraph is not published yet. You will need to connect using the L2 wallet that you chose as the receiving wallet, switch your wallet network to Arbitrum, and click "Publish Subgraph." -![Publish the subgraph](/img/publishSubgraphL2TransferTools.png) +![Publish the Subgraph](/img/publishSubgraphL2TransferTools.png) -![Wait for the subgraph to be published](/img/waitForSubgraphToPublishL2TransferTools.png) +![Wait for the Subgraph to be published](/img/waitForSubgraphToPublishL2TransferTools.png) -Dadurch wird der Untergraph veröffentlicht, so dass Indexer, die auf Arbitrum arbeiten, damit beginnen können, ihn zu bedienen. Es wird auch ein Kurationssignal unter Verwendung der GRT, die von L1 übertragen wurden, eingeleitet. +This will publish the Subgraph so that Indexers that are operating on Arbitrum can start serving it. It will also mint curation signal using the GRT that were transferred from L1. ## Schritt 5: Aktualisierung der Abfrage-URL -Ihr Subgraph wurde erfolgreich zu Arbitrum übertragen! Um den Subgraphen abzufragen, wird die neue URL lauten: +Your Subgraph has been successfully transferred to Arbitrum! To query the Subgraph, the new URL will be : `https://arbitrum-gateway.thegraph.com/api/[api-key]/subgraphs/id/[l2-subgraph-id]` -Beachten Sie, dass die ID des Subgraphen auf Arbitrum eine andere sein wird als die, die Sie im Mainnet hatten, aber Sie können sie immer im Explorer oder Studio finden. Wie oben erwähnt (siehe "Verstehen, was mit Signal, Ihrem L1-Subgraphen und Abfrage-URLs passiert"), wird die alte L1-URL noch eine kurze Zeit lang unterstützt, aber Sie sollten Ihre Abfragen auf die neue Adresse umstellen, sobald der Subgraph auf L2 synchronisiert worden ist. +Note that the Subgraph ID on Arbitrum will be a different than the one you had on mainnet, but you can always find it on Explorer or Studio. As mentioned above (see "Understanding what happens with signal, your L1 Subgraph and query URLs") the old L1 URL will be supported for a short while, but you should switch your queries to the new address as soon as the Subgraph has been synced on L2. ## Wie Sie Ihre Kuration auf Arbitrum übertragen (L2) -## Verstehen, was mit der Kuration bei der Übertragung von Untergraphen auf L2 geschieht +## Understanding what happens to curation on Subgraph transfers to L2 -Wenn der Eigentümer eines Untergraphen einen Untergraphen an Arbitrum überträgt, werden alle Signale des Untergraphen gleichzeitig in GRT konvertiert. Dies gilt für "automatisch migrierte" Signale, d.h. Signale, die nicht spezifisch für eine Subgraphenversion oder einen Einsatz sind, sondern der neuesten Version eines Subgraphen folgen. +When the owner of a Subgraph transfers a Subgraph to Arbitrum, all of the Subgraph's signal is converted to GRT at the same time. This applies to "auto-migrated" signal, i.e. signal that is not specific to a Subgraph version or deployment but that follows the latest version of a Subgraph. -Diese Umwandlung von Signal in GRT entspricht dem, was passieren würde, wenn der Eigentümer des Subgraphen den Subgraphen in L1 verwerfen würde. Wenn der Subgraph veraltet oder übertragen wird, werden alle Kurationssignale gleichzeitig "verbrannt" (unter Verwendung der Kurationsbindungskurve) und das resultierende GRT wird vom GNS-Smart-Contract gehalten (das ist der Vertrag, der Subgraph-Upgrades und automatisch migrierte Signale handhabt). Jeder Kurator auf diesem Subgraphen hat daher einen Anspruch auf dieses GRT proportional zu der Menge an Anteilen, die er für den Subgraphen hatte. +This conversion from signal to GRT is the same as what would happen if the Subgraph owner deprecated the Subgraph in L1. When the Subgraph is deprecated or transferred, all curation signal is "burned" simultaneously (using the curation bonding curve) and the resulting GRT is held by the GNS smart contract (that is the contract that handles Subgraph upgrades and auto-migrated signal). Each Curator on that Subgraph therefore has a claim to that GRT proportional to the amount of shares they had for the Subgraph. -Ein Teil dieser GRT, der dem Eigentümer des Untergraphen entspricht, wird zusammen mit dem Untergraphen an L2 gesendet. +A fraction of these GRT corresponding to the Subgraph owner is sent to L2 together with the Subgraph. -Ein Teil dieser GRT, der dem Eigentümer des Untergraphen entspricht, wird zusammen mit dem Untergraphen an L2 gesendet. +At this point, the curated GRT will not accrue any more query fees, so Curators can choose to withdraw their GRT or transfer it to the same Subgraph on L2, where it can be used to mint new curation signal. There is no rush to do this as the GRT can be help indefinitely and everybody gets an amount proportional to their shares, irrespective of when they do it. ## Ein Teil dieser GRT, der dem Inhaber des Untergraphen entspricht, wird zusammen mit dem Untergraphen an L2 gesendet. @@ -130,9 +130,9 @@ If you're using a smart contract wallet, like a multisig (e.g. a Safe), then cho Before starting the transfer, you must decide which address will own the curation on L2 (see "Choosing your L2 wallet" above), and it is recommended having some ETH for gas already bridged on Arbitrum in case you need to retry the execution of the message on L2. You can buy ETH on some exchanges and withdraw it directly to Arbitrum, or you can use the Arbitrum bridge to send ETH from a mainnet wallet to L2: [bridge.arbitrum.io](http://bridge.arbitrum.io) - since gas fees on Arbitrum are so low, you should only need a small amount, e.g. 0.01 ETH will probably be more than enough. -If a subgraph that you curate to has been transferred to L2, you will see a message on Explorer telling you that you're curating to a transferred subgraph. +If a Subgraph that you curate to has been transferred to L2, you will see a message on Explorer telling you that you're curating to a transferred Subgraph. -When looking at the subgraph page, you can choose to withdraw or transfer the curation. Clicking on "Transfer Signal to Arbitrum" will open the transfer tool. +When looking at the Subgraph page, you can choose to withdraw or transfer the curation. Clicking on "Transfer Signal to Arbitrum" will open the transfer tool. ![Transfer signal](/img/transferSignalL2TransferTools.png) @@ -162,4 +162,4 @@ Wenn dies der Fall ist, müssen Sie sich mit einer L2-Wallet verbinden, die etwa ## Withdrawing your curation on L1 -If you prefer not to send your GRT to L2, or you'd rather bridge the GRT manually, you can withdraw your curated GRT on L1. On the banner on the subgraph page, choose "Withdraw Signal" and confirm the transaction; the GRT will be sent to your Curator address. +If you prefer not to send your GRT to L2, or you'd rather bridge the GRT manually, you can withdraw your curated GRT on L1. On the banner on the Subgraph page, choose "Withdraw Signal" and confirm the transaction; the GRT will be sent to your Curator address. From 35356e4beb51822fa5db25f3695eec1f70c1a18e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:13:49 -0500 Subject: [PATCH 0322/1789] New translations l2-transfer-tools-guide.mdx (Italian) --- .../arbitrum/l2-transfer-tools-guide.mdx | 76 +++++++++---------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/website/src/pages/it/archived/arbitrum/l2-transfer-tools-guide.mdx b/website/src/pages/it/archived/arbitrum/l2-transfer-tools-guide.mdx index 549618bfd7c3..4a34da9bad0e 100644 --- a/website/src/pages/it/archived/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/src/pages/it/archived/arbitrum/l2-transfer-tools-guide.mdx @@ -6,53 +6,53 @@ The Graph has made it easy to move to L2 on Arbitrum One. For each protocol part Some frequent questions about these tools are answered in the [L2 Transfer Tools FAQ](/archived/arbitrum/l2-transfer-tools-faq/). The FAQs contain in-depth explanations of how to use the tools, how they work, and things to keep in mind when using them. -## How to transfer your subgraph to Arbitrum (L2) +## How to transfer your Subgraph to Arbitrum (L2) -## Benefits of transferring your subgraphs +## Benefits of transferring your Subgraphs The Graph's community and core devs have [been preparing](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) to move to Arbitrum over the past year. Arbitrum, a layer 2 or "L2" blockchain, inherits the security from Ethereum but provides drastically lower gas fees. -When you publish or upgrade your subgraph to The Graph Network, you're interacting with smart contracts on the protocol and this requires paying for gas using ETH. By moving your subgraphs to Arbitrum, any future updates to your subgraph will require much lower gas fees. The lower fees, and the fact that curation bonding curves on L2 are flat, also make it easier for other Curators to curate on your subgraph, increasing the rewards for Indexers on your subgraph. This lower-cost environment also makes it cheaper for Indexers to index and serve your subgraph. Indexing rewards will be increasing on Arbitrum and decreasing on Ethereum mainnet over the coming months, so more and more Indexers will be transferring their stake and setting up their operations on L2. +When you publish or upgrade your Subgraph to The Graph Network, you're interacting with smart contracts on the protocol and this requires paying for gas using ETH. By moving your Subgraphs to Arbitrum, any future updates to your Subgraph will require much lower gas fees. The lower fees, and the fact that curation bonding curves on L2 are flat, also make it easier for other Curators to curate on your Subgraph, increasing the rewards for Indexers on your Subgraph. This lower-cost environment also makes it cheaper for Indexers to index and serve your Subgraph. Indexing rewards will be increasing on Arbitrum and decreasing on Ethereum mainnet over the coming months, so more and more Indexers will be transferring their stake and setting up their operations on L2. -## Understanding what happens with signal, your L1 subgraph and query URLs +## Understanding what happens with signal, your L1 Subgraph and query URLs -Transferring a subgraph to Arbitrum uses the Arbitrum GRT bridge, which in turn uses the native Arbitrum bridge, to send the subgraph to L2. The "transfer" will deprecate the subgraph on mainnet and send the information to re-create the subgraph on L2 using the bridge. It will also include the subgraph owner's signaled GRT, which must be more than zero for the bridge to accept the transfer. +Transferring a Subgraph to Arbitrum uses the Arbitrum GRT bridge, which in turn uses the native Arbitrum bridge, to send the Subgraph to L2. The "transfer" will deprecate the Subgraph on mainnet and send the information to re-create the Subgraph on L2 using the bridge. It will also include the Subgraph owner's signaled GRT, which must be more than zero for the bridge to accept the transfer. -When you choose to transfer the subgraph, this will convert all of the subgraph's curation signal to GRT. This is equivalent to "deprecating" the subgraph on mainnet. The GRT corresponding to your curation will be sent to L2 together with the subgraph, where they will be used to mint signal on your behalf. +When you choose to transfer the Subgraph, this will convert all of the Subgraph's curation signal to GRT. This is equivalent to "deprecating" the Subgraph on mainnet. The GRT corresponding to your curation will be sent to L2 together with the Subgraph, where they will be used to mint signal on your behalf. -Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same subgraph. If a subgraph owner does not transfer their subgraph to L2 and manually deprecates it via a contract call, then Curators will be notified and will be able to withdraw their curation. +Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same Subgraph. If a Subgraph owner does not transfer their Subgraph to L2 and manually deprecates it via a contract call, then Curators will be notified and will be able to withdraw their curation. -As soon as the subgraph is transferred, since all curation is converted to GRT, Indexers will no longer receive rewards for indexing the subgraph. However, there will be Indexers that will 1) keep serving transferred subgraphs for 24 hours, and 2) immediately start indexing the subgraph on L2. Since these Indexers already have the subgraph indexed, there should be no need to wait for the subgraph to sync, and it will be possible to query the L2 subgraph almost immediately. +As soon as the Subgraph is transferred, since all curation is converted to GRT, Indexers will no longer receive rewards for indexing the Subgraph. However, there will be Indexers that will 1) keep serving transferred Subgraphs for 24 hours, and 2) immediately start indexing the Subgraph on L2. Since these Indexers already have the Subgraph indexed, there should be no need to wait for the Subgraph to sync, and it will be possible to query the L2 Subgraph almost immediately. -Queries to the L2 subgraph will need to be done to a different URL (on `arbitrum-gateway.thegraph.com`), but the L1 URL will continue working for at least 48 hours. After that, the L1 gateway will forward queries to the L2 gateway (for some time), but this will add latency so it is recommended to switch all your queries to the new URL as soon as possible. +Queries to the L2 Subgraph will need to be done to a different URL (on `arbitrum-gateway.thegraph.com`), but the L1 URL will continue working for at least 48 hours. After that, the L1 gateway will forward queries to the L2 gateway (for some time), but this will add latency so it is recommended to switch all your queries to the new URL as soon as possible. ## Choosing your L2 wallet -When you published your subgraph on mainnet, you used a connected wallet to create the subgraph, and this wallet owns the NFT that represents this subgraph and allows you to publish updates. +When you published your Subgraph on mainnet, you used a connected wallet to create the Subgraph, and this wallet owns the NFT that represents this Subgraph and allows you to publish updates. -When transferring the subgraph to Arbitrum, you can choose a different wallet that will own this subgraph NFT on L2. +When transferring the Subgraph to Arbitrum, you can choose a different wallet that will own this Subgraph NFT on L2. If you're using a "regular" wallet like MetaMask (an Externally Owned Account or EOA, i.e. a wallet that is not a smart contract), then this is optional and it is recommended to keep the same owner address as in L1. -If you're using a smart contract wallet, like a multisig (e.g. a Safe), then choosing a different L2 wallet address is mandatory, as it is most likely that this account only exists on mainnet and you will not be able to make transactions on Arbitrum using this wallet. If you want to keep using a smart contract wallet or multisig, create a new wallet on Arbitrum and use its address as the L2 owner of your subgraph. +If you're using a smart contract wallet, like a multisig (e.g. a Safe), then choosing a different L2 wallet address is mandatory, as it is most likely that this account only exists on mainnet and you will not be able to make transactions on Arbitrum using this wallet. If you want to keep using a smart contract wallet or multisig, create a new wallet on Arbitrum and use its address as the L2 owner of your Subgraph. -**It is very important to use a wallet address that you control, and that can make transactions on Arbitrum. Otherwise, the subgraph will be lost and cannot be recovered.** +**It is very important to use a wallet address that you control, and that can make transactions on Arbitrum. Otherwise, the Subgraph will be lost and cannot be recovered.** ## Preparing for the transfer: bridging some ETH -Transferring the subgraph involves sending a transaction through the bridge, and then executing another transaction on Arbitrum. The first transaction uses ETH on mainnet, and includes some ETH to pay for gas when the message is received on L2. However, if this gas is insufficient, you will have to retry the transaction and pay for the gas directly on L2 (this is "Step 3: Confirming the transfer" below). This step **must be executed within 7 days of starting the transfer**. Moreover, the second transaction ("Step 4: Finishing the transfer on L2") will be done directly on Arbitrum. For these reasons, you will need some ETH on an Arbitrum wallet. If you're using a multisig or smart contract account, the ETH will need to be in the regular (EOA) wallet that you are using to execute the transactions, not on the multisig wallet itself. +Transferring the Subgraph involves sending a transaction through the bridge, and then executing another transaction on Arbitrum. The first transaction uses ETH on mainnet, and includes some ETH to pay for gas when the message is received on L2. However, if this gas is insufficient, you will have to retry the transaction and pay for the gas directly on L2 (this is "Step 3: Confirming the transfer" below). This step **must be executed within 7 days of starting the transfer**. Moreover, the second transaction ("Step 4: Finishing the transfer on L2") will be done directly on Arbitrum. For these reasons, you will need some ETH on an Arbitrum wallet. If you're using a multisig or smart contract account, the ETH will need to be in the regular (EOA) wallet that you are using to execute the transactions, not on the multisig wallet itself. You can buy ETH on some exchanges and withdraw it directly to Arbitrum, or you can use the Arbitrum bridge to send ETH from a mainnet wallet to L2: [bridge.arbitrum.io](http://bridge.arbitrum.io). Since gas fees on Arbitrum are lower, you should only need a small amount. It is recommended that you start at a low threshold (0.e.g. 01 ETH) for your transaction to be approved. -## Finding the subgraph Transfer Tool +## Finding the Subgraph Transfer Tool -You can find the L2 Transfer Tool when you're looking at your subgraph's page on Subgraph Studio: +You can find the L2 Transfer Tool when you're looking at your Subgraph's page on Subgraph Studio: ![transfer tool](/img/L2-transfer-tool1.png) -It is also available on Explorer if you're connected with the wallet that owns a subgraph and on that subgraph's page on Explorer: +It is also available on Explorer if you're connected with the wallet that owns a Subgraph and on that Subgraph's page on Explorer: ![Transferring to L2](/img/transferToL2.png) @@ -60,19 +60,19 @@ Clicking on the Transfer to L2 button will open the transfer tool where you can ## Step 1: Starting the transfer -Before starting the transfer, you must decide which address will own the subgraph on L2 (see "Choosing your L2 wallet" above), and it is strongly recommend having some ETH for gas already bridged on Arbitrum (see "Preparing for the transfer: bridging some ETH" above). +Before starting the transfer, you must decide which address will own the Subgraph on L2 (see "Choosing your L2 wallet" above), and it is strongly recommend having some ETH for gas already bridged on Arbitrum (see "Preparing for the transfer: bridging some ETH" above). -Also please note transferring the subgraph requires having a nonzero amount of signal on the subgraph with the same account that owns the subgraph; if you haven't signaled on the subgraph you will have to add a bit of curation (adding a small amount like 1 GRT would suffice). +Also please note transferring the Subgraph requires having a nonzero amount of signal on the Subgraph with the same account that owns the Subgraph; if you haven't signaled on the Subgraph you will have to add a bit of curation (adding a small amount like 1 GRT would suffice). -After opening the Transfer Tool, you will be able to input the L2 wallet address into the "Receiving wallet address" field - **make sure you've entered the correct address here**. Clicking on Transfer Subgraph will prompt you to execute the transaction on your wallet (note some ETH value is included to pay for L2 gas); this will initiate the transfer and deprecate your L1 subgraph (see "Understanding what happens with signal, your L1 subgraph and query URLs" above for more details on what goes on behind the scenes). +After opening the Transfer Tool, you will be able to input the L2 wallet address into the "Receiving wallet address" field - **make sure you've entered the correct address here**. Clicking on Transfer Subgraph will prompt you to execute the transaction on your wallet (note some ETH value is included to pay for L2 gas); this will initiate the transfer and deprecate your L1 Subgraph (see "Understanding what happens with signal, your L1 Subgraph and query URLs" above for more details on what goes on behind the scenes). -If you execute this step, **make sure you proceed until completing step 3 in less than 7 days, or the subgraph and your signal GRT will be lost.** This is due to how L1-L2 messaging works on Arbitrum: messages that are sent through the bridge are "retry-able tickets" that must be executed within 7 days, and the initial execution might need a retry if there are spikes in the gas price on Arbitrum. +If you execute this step, **make sure you proceed until completing step 3 in less than 7 days, or the Subgraph and your signal GRT will be lost.** This is due to how L1-L2 messaging works on Arbitrum: messages that are sent through the bridge are "retry-able tickets" that must be executed within 7 days, and the initial execution might need a retry if there are spikes in the gas price on Arbitrum. ![Start the transfer to L2](/img/startTransferL2.png) -## Step 2: Waiting for the subgraph to get to L2 +## Step 2: Waiting for the Subgraph to get to L2 -After you start the transfer, the message that sends your L1 subgraph to L2 must propagate through the Arbitrum bridge. This takes approximately 20 minutes (the bridge waits for the mainnet block containing the transaction to be "safe" from potential chain reorgs). +After you start the transfer, the message that sends your L1 Subgraph to L2 must propagate through the Arbitrum bridge. This takes approximately 20 minutes (the bridge waits for the mainnet block containing the transaction to be "safe" from potential chain reorgs). Once this wait time is over, Arbitrum will attempt to auto-execute the transfer on the L2 contracts. @@ -80,7 +80,7 @@ Once this wait time is over, Arbitrum will attempt to auto-execute the transfer ## Step 3: Confirming the transfer -In most cases, this step will auto-execute as the L2 gas included in step 1 should be sufficient to execute the transaction that receives the subgraph on the Arbitrum contracts. In some cases, however, it is possible that a spike in gas prices on Arbitrum causes this auto-execution to fail. In this case, the "ticket" that sends your subgraph to L2 will be pending and require a retry within 7 days. +In most cases, this step will auto-execute as the L2 gas included in step 1 should be sufficient to execute the transaction that receives the Subgraph on the Arbitrum contracts. In some cases, however, it is possible that a spike in gas prices on Arbitrum causes this auto-execution to fail. In this case, the "ticket" that sends your Subgraph to L2 will be pending and require a retry within 7 days. If this is the case, you will need to connect using an L2 wallet that has some ETH on Arbitrum, switch your wallet network to Arbitrum, and click on "Confirm Transfer" to retry the transaction. @@ -88,33 +88,33 @@ If this is the case, you will need to connect using an L2 wallet that has some E ## Step 4: Finishing the transfer on L2 -At this point, your subgraph and GRT have been received on Arbitrum, but the subgraph is not published yet. You will need to connect using the L2 wallet that you chose as the receiving wallet, switch your wallet network to Arbitrum, and click "Publish Subgraph." +At this point, your Subgraph and GRT have been received on Arbitrum, but the Subgraph is not published yet. You will need to connect using the L2 wallet that you chose as the receiving wallet, switch your wallet network to Arbitrum, and click "Publish Subgraph." -![Publish the subgraph](/img/publishSubgraphL2TransferTools.png) +![Publish the Subgraph](/img/publishSubgraphL2TransferTools.png) -![Wait for the subgraph to be published](/img/waitForSubgraphToPublishL2TransferTools.png) +![Wait for the Subgraph to be published](/img/waitForSubgraphToPublishL2TransferTools.png) -This will publish the subgraph so that Indexers that are operating on Arbitrum can start serving it. It will also mint curation signal using the GRT that were transferred from L1. +This will publish the Subgraph so that Indexers that are operating on Arbitrum can start serving it. It will also mint curation signal using the GRT that were transferred from L1. ## Step 5: Updating the query URL -Your subgraph has been successfully transferred to Arbitrum! To query the subgraph, the new URL will be : +Your Subgraph has been successfully transferred to Arbitrum! To query the Subgraph, the new URL will be : `https://arbitrum-gateway.thegraph.com/api/[api-key]/subgraphs/id/[l2-subgraph-id]` -Note that the subgraph ID on Arbitrum will be a different than the one you had on mainnet, but you can always find it on Explorer or Studio. As mentioned above (see "Understanding what happens with signal, your L1 subgraph and query URLs") the old L1 URL will be supported for a short while, but you should switch your queries to the new address as soon as the subgraph has been synced on L2. +Note that the Subgraph ID on Arbitrum will be a different than the one you had on mainnet, but you can always find it on Explorer or Studio. As mentioned above (see "Understanding what happens with signal, your L1 Subgraph and query URLs") the old L1 URL will be supported for a short while, but you should switch your queries to the new address as soon as the Subgraph has been synced on L2. ## How to transfer your curation to Arbitrum (L2) -## Understanding what happens to curation on subgraph transfers to L2 +## Understanding what happens to curation on Subgraph transfers to L2 -When the owner of a subgraph transfers a subgraph to Arbitrum, all of the subgraph's signal is converted to GRT at the same time. This applies to "auto-migrated" signal, i.e. signal that is not specific to a subgraph version or deployment but that follows the latest version of a subgraph. +When the owner of a Subgraph transfers a Subgraph to Arbitrum, all of the Subgraph's signal is converted to GRT at the same time. This applies to "auto-migrated" signal, i.e. signal that is not specific to a Subgraph version or deployment but that follows the latest version of a Subgraph. -This conversion from signal to GRT is the same as what would happen if the subgraph owner deprecated the subgraph in L1. When the subgraph is deprecated or transferred, all curation signal is "burned" simultaneously (using the curation bonding curve) and the resulting GRT is held by the GNS smart contract (that is the contract that handles subgraph upgrades and auto-migrated signal). Each Curator on that subgraph therefore has a claim to that GRT proportional to the amount of shares they had for the subgraph. +This conversion from signal to GRT is the same as what would happen if the Subgraph owner deprecated the Subgraph in L1. When the Subgraph is deprecated or transferred, all curation signal is "burned" simultaneously (using the curation bonding curve) and the resulting GRT is held by the GNS smart contract (that is the contract that handles Subgraph upgrades and auto-migrated signal). Each Curator on that Subgraph therefore has a claim to that GRT proportional to the amount of shares they had for the Subgraph. -A fraction of these GRT corresponding to the subgraph owner is sent to L2 together with the subgraph. +A fraction of these GRT corresponding to the Subgraph owner is sent to L2 together with the Subgraph. -At this point, the curated GRT will not accrue any more query fees, so Curators can choose to withdraw their GRT or transfer it to the same subgraph on L2, where it can be used to mint new curation signal. There is no rush to do this as the GRT can be help indefinitely and everybody gets an amount proportional to their shares, irrespective of when they do it. +At this point, the curated GRT will not accrue any more query fees, so Curators can choose to withdraw their GRT or transfer it to the same Subgraph on L2, where it can be used to mint new curation signal. There is no rush to do this as the GRT can be help indefinitely and everybody gets an amount proportional to their shares, irrespective of when they do it. ## Choosing your L2 wallet @@ -130,9 +130,9 @@ If you're using a smart contract wallet, like a multisig (e.g. a Safe), then cho Before starting the transfer, you must decide which address will own the curation on L2 (see "Choosing your L2 wallet" above), and it is recommended having some ETH for gas already bridged on Arbitrum in case you need to retry the execution of the message on L2. You can buy ETH on some exchanges and withdraw it directly to Arbitrum, or you can use the Arbitrum bridge to send ETH from a mainnet wallet to L2: [bridge.arbitrum.io](http://bridge.arbitrum.io) - since gas fees on Arbitrum are so low, you should only need a small amount, e.g. 0.01 ETH will probably be more than enough. -If a subgraph that you curate to has been transferred to L2, you will see a message on Explorer telling you that you're curating to a transferred subgraph. +If a Subgraph that you curate to has been transferred to L2, you will see a message on Explorer telling you that you're curating to a transferred Subgraph. -When looking at the subgraph page, you can choose to withdraw or transfer the curation. Clicking on "Transfer Signal to Arbitrum" will open the transfer tool. +When looking at the Subgraph page, you can choose to withdraw or transfer the curation. Clicking on "Transfer Signal to Arbitrum" will open the transfer tool. ![Transfer signal](/img/transferSignalL2TransferTools.png) @@ -162,4 +162,4 @@ If this is the case, you will need to connect using an L2 wallet that has some E ## Withdrawing your curation on L1 -If you prefer not to send your GRT to L2, or you'd rather bridge the GRT manually, you can withdraw your curated GRT on L1. On the banner on the subgraph page, choose "Withdraw Signal" and confirm the transaction; the GRT will be sent to your Curator address. +If you prefer not to send your GRT to L2, or you'd rather bridge the GRT manually, you can withdraw your curated GRT on L1. On the banner on the Subgraph page, choose "Withdraw Signal" and confirm the transaction; the GRT will be sent to your Curator address. From cce23c1204ae4c5f96d1a0eaf9117143e5a48dd4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:13:50 -0500 Subject: [PATCH 0323/1789] New translations l2-transfer-tools-guide.mdx (Japanese) --- .../arbitrum/l2-transfer-tools-guide.mdx | 76 +++++++++---------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/website/src/pages/ja/archived/arbitrum/l2-transfer-tools-guide.mdx b/website/src/pages/ja/archived/arbitrum/l2-transfer-tools-guide.mdx index b77261989131..bc10b94ac149 100644 --- a/website/src/pages/ja/archived/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/src/pages/ja/archived/arbitrum/l2-transfer-tools-guide.mdx @@ -6,53 +6,53 @@ title: L2 転送ツールガイド Some frequent questions about these tools are answered in the [L2 Transfer Tools FAQ](/archived/arbitrum/l2-transfer-tools-faq/). The FAQs contain in-depth explanations of how to use the tools, how they work, and things to keep in mind when using them. -## サブグラフをアービトラムに転送する方法 (L2) +## How to transfer your Subgraph to Arbitrum (L2) -## サブグラフを転送する利点 +## Benefits of transferring your Subgraphs グラフのコミュニティとコア開発者は、過去1年間、Arbitrumに移行する準備をしてきました(https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305)。レイヤー2または「L2」ブロックチェーンであるアービトラムは、イーサリアムからセキュリティを継承しますが、ガス料金を大幅に削減します。 -サブグラフをThe Graph Networkに公開またはアップグレードする際には、プロトコル上のスマートコントラクトとやり取りするため、ETHを使用してガスを支払う必要があります。サブグラフをArbitrumに移動することで、将来のサブグラフのアップデートにかかるガス料金が大幅に削減されます。低い手数料と、L2のキュレーションボンディングカーブがフラットであるという点も、他のキュレーターがあなたのサブグラフをキュレーションしやすくし、サブグラフのインデクサーへの報酬を増加させます。この低コストな環境は、インデクサーがサブグラフをインデックス化して提供するコストも削減します。アービトラム上のインデックス報酬は今後数か月間で増加し、Ethereumメインネット上では減少する予定です。そのため、ますます多くのインデクサーがステークを転送し、L2での運用を設定していくことになるでしょう。 +When you publish or upgrade your Subgraph to The Graph Network, you're interacting with smart contracts on the protocol and this requires paying for gas using ETH. By moving your Subgraphs to Arbitrum, any future updates to your Subgraph will require much lower gas fees. The lower fees, and the fact that curation bonding curves on L2 are flat, also make it easier for other Curators to curate on your Subgraph, increasing the rewards for Indexers on your Subgraph. This lower-cost environment also makes it cheaper for Indexers to index and serve your Subgraph. Indexing rewards will be increasing on Arbitrum and decreasing on Ethereum mainnet over the coming months, so more and more Indexers will be transferring their stake and setting up their operations on L2. -## シグナル、L1サブグラフ、クエリURLで何が起こるかを理解する +## Understanding what happens with signal, your L1 Subgraph and query URLs -サブグラフをアービトラムに転送するには、アービトラムGRTブリッジが使用され、アービトラムGRTブリッジはネイティブアービトラムブリッジを使用してサブグラフをL2に送信します。「転送」はメインネット上のサブグラフを非推奨にし、ブリッジを使用してL2上のサブグラフを再作成するための情報を送信します。また、サブグラフ所有者のシグナル GRT も含まれ、ブリッジが転送を受け入れるには 0 より大きくなければなりません。 +Transferring a Subgraph to Arbitrum uses the Arbitrum GRT bridge, which in turn uses the native Arbitrum bridge, to send the Subgraph to L2. The "transfer" will deprecate the Subgraph on mainnet and send the information to re-create the Subgraph on L2 using the bridge. It will also include the Subgraph owner's signaled GRT, which must be more than zero for the bridge to accept the transfer. -サブグラフの転送を選択すると、サブグラフのすべてのキュレーション信号がGRTに変換されます。これは、メインネットのサブグラフを「非推奨」にすることと同じです。キュレーションに対応するGRTはサブグラフとともにL2に送信され、そこであなたに代わってシグナルを作成するために使用されます。 +When you choose to transfer the Subgraph, this will convert all of the Subgraph's curation signal to GRT. This is equivalent to "deprecating" the Subgraph on mainnet. The GRT corresponding to your curation will be sent to L2 together with the Subgraph, where they will be used to mint signal on your behalf. -他のキュレーターは、GRTの分数を引き出すか、同じサブグラフでシグナルをミントするためにL2に転送するかを選択できます。サブグラフの所有者がサブグラフをL2に転送せず、コントラクトコールを介して手動で非推奨にした場合、キュレーターに通知され、キュレーションを取り消すことができます。 +Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same Subgraph. If a Subgraph owner does not transfer their Subgraph to L2 and manually deprecates it via a contract call, then Curators will be notified and will be able to withdraw their curation. -サブグラフが転送されるとすぐに、すべてのキュレーションがGRTに変換されるため、インデクサーはサブグラフのインデックス作成に対する報酬を受け取らなくなります。ただし、1) 転送されたサブグラフを24時間提供し続け、2) L2でサブグラフのインデックス作成をすぐに開始するインデクサーがあります。これらのインデクサーには既にサブグラフのインデックスが作成されているため、サブグラフが同期するのを待つ必要はなく、ほぼ即座にL2サブグラフを照会できます。 +As soon as the Subgraph is transferred, since all curation is converted to GRT, Indexers will no longer receive rewards for indexing the Subgraph. However, there will be Indexers that will 1) keep serving transferred Subgraphs for 24 hours, and 2) immediately start indexing the Subgraph on L2. Since these Indexers already have the Subgraph indexed, there should be no need to wait for the Subgraph to sync, and it will be possible to query the L2 Subgraph almost immediately. -L2 サブグラフへのクエリは別の URL (「arbitrum-gateway.thegraph.com」) に対して実行する必要がありますが、L1 URL は少なくとも 48 時間は機能し続けます。その後、L1ゲートウェイはクエリをL2ゲートウェイに転送しますが(しばらくの間)、これにより遅延が増えるため、できるだけ早くすべてのクエリを新しいURLに切り替えることをお勧めします。 +Queries to the L2 Subgraph will need to be done to a different URL (on `arbitrum-gateway.thegraph.com`), but the L1 URL will continue working for at least 48 hours. After that, the L1 gateway will forward queries to the L2 gateway (for some time), but this will add latency so it is recommended to switch all your queries to the new URL as soon as possible. ## L2ウォレットの選択 -メインネットでサブグラフを公開したときに、接続されたウォレットを使用してサブグラフを作成し、このウォレットはこのサブグラフを表すNFTを所有し、更新を公開できます。 +When you published your Subgraph on mainnet, you used a connected wallet to create the Subgraph, and this wallet owns the NFT that represents this Subgraph and allows you to publish updates. -サブグラフをアービトラムに転送する場合、L2でこのサブグラフNFTを所有する別のウォレットを選択できます。 +When transferring the Subgraph to Arbitrum, you can choose a different wallet that will own this Subgraph NFT on L2. MetaMaskのような "通常の" ウォレット(外部所有アカウントまたはEOA、つまりスマートコントラクトではないウォレット)を使用している場合、これはオプションであり、L1と同じ所有者アドレスを保持することをお勧めします。 -マルチシグ(Safeなど)などのスマートコントラクトウォレットを使用している場合、このアカウントはメインネットにのみ存在し、このウォレットを使用してアービトラムで取引を行うことができない可能性が高いため、別のL2ウォレットアドレスを選択する必要があります。スマートコントラクトウォレットまたはマルチシグを使い続けたい場合は、Arbitrumで新しいウォレットを作成し、そのアドレスをサブグラフのL2所有者として使用します。 +If you're using a smart contract wallet, like a multisig (e.g. a Safe), then choosing a different L2 wallet address is mandatory, as it is most likely that this account only exists on mainnet and you will not be able to make transactions on Arbitrum using this wallet. If you want to keep using a smart contract wallet or multisig, create a new wallet on Arbitrum and use its address as the L2 owner of your Subgraph. -\*\*あなたが管理し、アービトラムで取引を行うことができるウォレットアドレスを使用することは非常に重要です。そうしないと、サブグラフが失われ、復元できません。 +**It is very important to use a wallet address that you control, and that can make transactions on Arbitrum. Otherwise, the Subgraph will be lost and cannot be recovered.** ## 転送の準備: 一部のETHのブリッジング -サブグラフを転送するには、ブリッジを介してトランザクションを送信し、その後アービトラム上で別のトランザクションを実行する必要があります。最初のトランザクションでは、メインネット上のETHを使用し、L2でメッセージが受信される際にガスを支払うためにいくらかのETHが含まれています。ただし、このガスが不足している場合、トランザクションを再試行し、L2で直接ガスを支払う必要があります(これが下記の「ステップ3:転送の確認」です)。このステップは、転送を開始してから7日以内に実行する必要があります。さらに、2つ目のトランザクション(「ステップ4:L2での転送の完了」)は、直接アービトラム上で行われます。これらの理由から、アービトラムウォレットに一定のETHが必要です。マルチシグまたはスマートコントラクトアカウントを使用している場合、ETHはトランザクションを実行するために使用している通常の個人のウォレット(EOAウォレット)にある必要があり、マルチシグウォレットそのものにはないことに注意してください +Transferring the Subgraph involves sending a transaction through the bridge, and then executing another transaction on Arbitrum. The first transaction uses ETH on mainnet, and includes some ETH to pay for gas when the message is received on L2. However, if this gas is insufficient, you will have to retry the transaction and pay for the gas directly on L2 (this is "Step 3: Confirming the transfer" below). This step **must be executed within 7 days of starting the transfer**. Moreover, the second transaction ("Step 4: Finishing the transfer on L2") will be done directly on Arbitrum. For these reasons, you will need some ETH on an Arbitrum wallet. If you're using a multisig or smart contract account, the ETH will need to be in the regular (EOA) wallet that you are using to execute the transactions, not on the multisig wallet itself. 一部の取引所でETHを購入してアービトラムに直接引き出すか、アービトラムブリッジを使用してメインネットウォレットからL2にETHを送信することができます:[bridge.arbitrum.io](http://bridge.arbitrum.io)。アービトラムのガス料金は安いので、必要なのは少量だけです。トランザクションが承認されるには、低いしきい値(0.01 ETHなど)から始めることをお勧めします。 -## サブグラフ転送ツールの検索 +## Finding the Subgraph Transfer Tool -L2転送ツールは、サブグラフスタジオでサブグラフのページを見ているときに見つけることができます。 +You can find the L2 Transfer Tool when you're looking at your Subgraph's page on Subgraph Studio: ![transfer tool](/img/L2-transfer-tool1.png) -サブグラフを所有するウォレットに接続している場合は、エクスプローラーとエクスプローラーのそのサブグラフのページでも入手できます。 +It is also available on Explorer if you're connected with the wallet that owns a Subgraph and on that Subgraph's page on Explorer: ![Transferring to L2](/img/transferToL2.png) @@ -60,19 +60,19 @@ L2転送ツールは、サブグラフスタジオでサブグラフのページ ## ステップ1: 転送を開始する -転送を開始する前に、どのアドレスがL2のサブグラフを所有するかを決定する必要があり(上記の「L2ウォレットの選択」を参照)、ガス用のETHをアービトラムにすでにブリッジすることを強くお勧めします(上記の「転送の準備: ETHのブリッジング」を参照)。 +Before starting the transfer, you must decide which address will own the Subgraph on L2 (see "Choosing your L2 wallet" above), and it is strongly recommend having some ETH for gas already bridged on Arbitrum (see "Preparing for the transfer: bridging some ETH" above). -また、サブグラフを転送するには、サブグラフを所有するのと同じアカウントを持つサブグラフにゼロ以外の量のシグナルが必要であることに注意してください。サブグラフでシグナルを出していない場合は、少しキュレーションを追加する必要があります(1 GRTのような少量を追加するだけで十分です)。 +Also please note transferring the Subgraph requires having a nonzero amount of signal on the Subgraph with the same account that owns the Subgraph; if you haven't signaled on the Subgraph you will have to add a bit of curation (adding a small amount like 1 GRT would suffice). -「Transfer Tool」を開いた後、L2ウォレットアドレスを「受信ウォレットアドレス」フィールドに入力できるようになります。ここで正しいアドレスを入力していることを確認してください。「Transfer Subgraph」をクリックすると、ウォレット上でトランザクションを実行するよう求められます(注意:L2ガスの支払いに十分なETHの価値が含まれています)。これにより、トランスファーが開始され、L1サブグラフが廃止されます(詳細については、「背後で何が起こるか:シグナル、L1サブグラフ、およびクエリURLの理解」を参照してください)。 +After opening the Transfer Tool, you will be able to input the L2 wallet address into the "Receiving wallet address" field - **make sure you've entered the correct address here**. Clicking on Transfer Subgraph will prompt you to execute the transaction on your wallet (note some ETH value is included to pay for L2 gas); this will initiate the transfer and deprecate your L1 Subgraph (see "Understanding what happens with signal, your L1 Subgraph and query URLs" above for more details on what goes on behind the scenes). -このステップを実行する場合は、\*\*7日以内にステップ3を完了するまで続行してください。そうしないと、サブグラフとシグナルGRTが失われます。 これは、L1-L2メッセージングがアービトラムでどのように機能するかによるものです: ブリッジを介して送信されるメッセージは、7日以内に実行する必要がある「再試行可能なチケット」であり、アービトラムのガス価格に急上昇がある場合は、最初の実行で再試行が必要になる場合があります。 +If you execute this step, **make sure you proceed until completing step 3 in less than 7 days, or the Subgraph and your signal GRT will be lost.** This is due to how L1-L2 messaging works on Arbitrum: messages that are sent through the bridge are "retry-able tickets" that must be executed within 7 days, and the initial execution might need a retry if there are spikes in the gas price on Arbitrum. ![Start the transfer to L2](/img/startTransferL2.png) -## ステップ2: サブグラフがL2に到達するのを待つ +## Step 2: Waiting for the Subgraph to get to L2 -転送を開始した後、L1サブグラフをL2に送信するメッセージは、アービトラムブリッジを介して伝播する必要があります。これには約20分かかります(ブリッジは、トランザクションを含むメインネットブロックが潜在的なチェーン再編成から「安全」になるまで待機します)。 +After you start the transfer, the message that sends your L1 Subgraph to L2 must propagate through the Arbitrum bridge. This takes approximately 20 minutes (the bridge waits for the mainnet block containing the transaction to be "safe" from potential chain reorgs). この待機時間が終了すると、アービトラムはL2契約の転送の自動実行を試みます。 @@ -80,7 +80,7 @@ L2転送ツールは、サブグラフスタジオでサブグラフのページ ## ステップ3: 転送の確認 -ほとんどの場合、ステップ1に含まれるL2ガスは、アービトラム契約のサブグラフを受け取るトランザクションを実行するのに十分であるため、このステップは自動実行されます。ただし、場合によっては、アービトラムのガス価格の急騰により、この自動実行が失敗する可能性があります。この場合、サブグラフをL2に送信する「チケット」は保留中であり、7日以内に再試行する必要があります。 +In most cases, this step will auto-execute as the L2 gas included in step 1 should be sufficient to execute the transaction that receives the Subgraph on the Arbitrum contracts. In some cases, however, it is possible that a spike in gas prices on Arbitrum causes this auto-execution to fail. In this case, the "ticket" that sends your Subgraph to L2 will be pending and require a retry within 7 days. この場合、アービトラムにETHがあるL2ウォレットを使用して接続し、ウォレットネットワークをアービトラムに切り替え、[転送の確認] をクリックしてトランザクションを再試行する必要があります。 @@ -88,33 +88,33 @@ L2転送ツールは、サブグラフスタジオでサブグラフのページ ## ステップ4: L2での転送の完了 -この時点で、サブグラフとGRTはアービトラムで受信されましたが、サブグラフはまだ公開されていません。受信ウォレットとして選択したL2ウォレットを使用して接続し、ウォレットネットワークをArbitrumに切り替えて、[サブグラフの公開] をクリックする必要があります。 +At this point, your Subgraph and GRT have been received on Arbitrum, but the Subgraph is not published yet. You will need to connect using the L2 wallet that you chose as the receiving wallet, switch your wallet network to Arbitrum, and click "Publish Subgraph." -![Publish the subgraph](/img/publishSubgraphL2TransferTools.png) +![Publish the Subgraph](/img/publishSubgraphL2TransferTools.png) -![Wait for the subgraph to be published](/img/waitForSubgraphToPublishL2TransferTools.png) +![Wait for the Subgraph to be published](/img/waitForSubgraphToPublishL2TransferTools.png) -これにより、アービトラムで動作しているインデクサーがサブグラフの提供を開始できるように、サブグラフが公開されます。また、L1から転送されたGRTを使用してキュレーションシグナルをミントします。 +This will publish the Subgraph so that Indexers that are operating on Arbitrum can start serving it. It will also mint curation signal using the GRT that were transferred from L1. ## ステップ 5: クエリ URL の更新 -サブグラフがアービトラムに正常に転送されました! サブグラフを照会するには、新しい URL は次のようになります: +Your Subgraph has been successfully transferred to Arbitrum! To query the Subgraph, the new URL will be : `https://arbitrum-gateway.thegraph.com/api/[api-key]/subgraphs/id/[l2-subgraph-id]` -アービトラム上のサブグラフIDは、メインネット上でのものとは異なることに注意してください。ただし、エクスプローラやスタジオ上で常にそのIDを見つけることができます(詳細は「シグナル、L1サブグラフ、およびクエリURLの動作理解」を参照)。前述のように、古いL1 URLはしばらくの間サポートされますが、サブグラフがL2上で同期されたらすぐに新しいアドレスにクエリを切り替える必要があります。 +Note that the Subgraph ID on Arbitrum will be a different than the one you had on mainnet, but you can always find it on Explorer or Studio. As mentioned above (see "Understanding what happens with signal, your L1 Subgraph and query URLs") the old L1 URL will be supported for a short while, but you should switch your queries to the new address as soon as the Subgraph has been synced on L2. ## キュレーションをアービトラム(L2) に転送する方法 -## L2へのサブグラフ転送のキュレーションに何が起こるかを理解する +## Understanding what happens to curation on Subgraph transfers to L2 -サブグラフの所有者がサブグラフをアービトラムに転送すると、サブグラフのすべての信号が同時にGRTに変換されます。これは、「自動移行」シグナル、つまりサブグラフのバージョンまたはデプロイに固有ではないが、サブグラフの最新バージョンに従うシグナルに適用されます。 +When the owner of a Subgraph transfers a Subgraph to Arbitrum, all of the Subgraph's signal is converted to GRT at the same time. This applies to "auto-migrated" signal, i.e. signal that is not specific to a Subgraph version or deployment but that follows the latest version of a Subgraph. -このシグナルからGRTへの変換は、サブグラフのオーナーがL1でサブグラフを非推奨にした場合と同じです。サブグラフが非推奨化または移管されると、すべてのキュレーションシグナルは同時に(キュレーションボンディングカーブを使用して)「燃やされ」、その結果得られるGRTはGNSスマートコントラクトに保持されます(これはサブグラフのアップグレードと自動移行されるシグナルを処理するコントラクトです)。そのため、そのサブグラフの各キュレーターは、所持していたシェアの量に比例したGRTの請求権を持っています。 +This conversion from signal to GRT is the same as what would happen if the Subgraph owner deprecated the Subgraph in L1. When the Subgraph is deprecated or transferred, all curation signal is "burned" simultaneously (using the curation bonding curve) and the resulting GRT is held by the GNS smart contract (that is the contract that handles Subgraph upgrades and auto-migrated signal). Each Curator on that Subgraph therefore has a claim to that GRT proportional to the amount of shares they had for the Subgraph. -サブグラフの所有者に対応するこれらの GRT の一部は、サブグラフとともに L2 に送信されます。 +A fraction of these GRT corresponding to the Subgraph owner is sent to L2 together with the Subgraph. -この時点では、キュレートされたGRTはこれ以上のクエリ手数料を蓄積しません。したがって、キュレーターは自分のGRTを引き出すか、それをL2上の同じサブグラフに移動して新しいキュレーションシグナルを作成するために使用することができます。いつ行うかに関わらず、GRTは無期限に保持でき、すべての人が自分のシェアに比例した額を受け取ることができるため、急ぐ必要はありません。 +At this point, the curated GRT will not accrue any more query fees, so Curators can choose to withdraw their GRT or transfer it to the same Subgraph on L2, where it can be used to mint new curation signal. There is no rush to do this as the GRT can be help indefinitely and everybody gets an amount proportional to their shares, irrespective of when they do it. ## L2ウォレットの選択 @@ -130,9 +130,9 @@ L2転送ツールは、サブグラフスタジオでサブグラフのページ 転送を開始する前に、L2上でキュレーションを所有するアドレスを決定する必要があります(上記の「L2ウォレットの選択」を参照)。また、L2でメッセージの実行を再試行する必要がある場合に備えて、ガスのためにすでにArbitrumにブリッジされたいくらかのETHを持つことをお勧めします。ETHをいくつかの取引所で購入し、それを直接Arbitrumに引き出すことができます。または、Arbitrumブリッジを使用して、メインネットのウォレットからL2にETHを送信することもできます: [bridge.arbitrum.io](http://bridge.arbitrum.io)。Arbitrumのガス料金が非常に低いため、0.01 ETHなどの少額で十分です。 -もしキュレーションしているサブグラフがL2に移行された場合、エクスプローラ上でそのサブグラフが移行されたことを示すメッセージが表示されます。 +If a Subgraph that you curate to has been transferred to L2, you will see a message on Explorer telling you that you're curating to a transferred Subgraph. -サブグラフのページを表示する際に、キュレーションを引き出すか、移行するかを選択できます。"Transfer Signal to Arbitrum" をクリックすると、移行ツールが開きます。 +When looking at the Subgraph page, you can choose to withdraw or transfer the curation. Clicking on "Transfer Signal to Arbitrum" will open the transfer tool. ![Transfer signal](/img/transferSignalL2TransferTools.png) @@ -162,4 +162,4 @@ L2転送ツールは、サブグラフスタジオでサブグラフのページ ## L1 でキュレーションを取り消す -GRT を L2 に送信したくない場合、または GRT を手動でブリッジしたい場合は、L1 でキュレーションされた GRT を取り消すことができます。サブグラフページのバナーで、「シグナルの引き出し」を選択し、トランザクションを確認します。GRTはあなたのキュレーターアドレスに送信されます。 +If you prefer not to send your GRT to L2, or you'd rather bridge the GRT manually, you can withdraw your curated GRT on L1. On the banner on the Subgraph page, choose "Withdraw Signal" and confirm the transaction; the GRT will be sent to your Curator address. From f07e2a6b2c9fe509c46095e7c74de8d91e72e026 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:13:51 -0500 Subject: [PATCH 0324/1789] New translations l2-transfer-tools-guide.mdx (Korean) --- .../arbitrum/l2-transfer-tools-guide.mdx | 76 +++++++++---------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/website/src/pages/ko/archived/arbitrum/l2-transfer-tools-guide.mdx b/website/src/pages/ko/archived/arbitrum/l2-transfer-tools-guide.mdx index 549618bfd7c3..4a34da9bad0e 100644 --- a/website/src/pages/ko/archived/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/src/pages/ko/archived/arbitrum/l2-transfer-tools-guide.mdx @@ -6,53 +6,53 @@ The Graph has made it easy to move to L2 on Arbitrum One. For each protocol part Some frequent questions about these tools are answered in the [L2 Transfer Tools FAQ](/archived/arbitrum/l2-transfer-tools-faq/). The FAQs contain in-depth explanations of how to use the tools, how they work, and things to keep in mind when using them. -## How to transfer your subgraph to Arbitrum (L2) +## How to transfer your Subgraph to Arbitrum (L2) -## Benefits of transferring your subgraphs +## Benefits of transferring your Subgraphs The Graph's community and core devs have [been preparing](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) to move to Arbitrum over the past year. Arbitrum, a layer 2 or "L2" blockchain, inherits the security from Ethereum but provides drastically lower gas fees. -When you publish or upgrade your subgraph to The Graph Network, you're interacting with smart contracts on the protocol and this requires paying for gas using ETH. By moving your subgraphs to Arbitrum, any future updates to your subgraph will require much lower gas fees. The lower fees, and the fact that curation bonding curves on L2 are flat, also make it easier for other Curators to curate on your subgraph, increasing the rewards for Indexers on your subgraph. This lower-cost environment also makes it cheaper for Indexers to index and serve your subgraph. Indexing rewards will be increasing on Arbitrum and decreasing on Ethereum mainnet over the coming months, so more and more Indexers will be transferring their stake and setting up their operations on L2. +When you publish or upgrade your Subgraph to The Graph Network, you're interacting with smart contracts on the protocol and this requires paying for gas using ETH. By moving your Subgraphs to Arbitrum, any future updates to your Subgraph will require much lower gas fees. The lower fees, and the fact that curation bonding curves on L2 are flat, also make it easier for other Curators to curate on your Subgraph, increasing the rewards for Indexers on your Subgraph. This lower-cost environment also makes it cheaper for Indexers to index and serve your Subgraph. Indexing rewards will be increasing on Arbitrum and decreasing on Ethereum mainnet over the coming months, so more and more Indexers will be transferring their stake and setting up their operations on L2. -## Understanding what happens with signal, your L1 subgraph and query URLs +## Understanding what happens with signal, your L1 Subgraph and query URLs -Transferring a subgraph to Arbitrum uses the Arbitrum GRT bridge, which in turn uses the native Arbitrum bridge, to send the subgraph to L2. The "transfer" will deprecate the subgraph on mainnet and send the information to re-create the subgraph on L2 using the bridge. It will also include the subgraph owner's signaled GRT, which must be more than zero for the bridge to accept the transfer. +Transferring a Subgraph to Arbitrum uses the Arbitrum GRT bridge, which in turn uses the native Arbitrum bridge, to send the Subgraph to L2. The "transfer" will deprecate the Subgraph on mainnet and send the information to re-create the Subgraph on L2 using the bridge. It will also include the Subgraph owner's signaled GRT, which must be more than zero for the bridge to accept the transfer. -When you choose to transfer the subgraph, this will convert all of the subgraph's curation signal to GRT. This is equivalent to "deprecating" the subgraph on mainnet. The GRT corresponding to your curation will be sent to L2 together with the subgraph, where they will be used to mint signal on your behalf. +When you choose to transfer the Subgraph, this will convert all of the Subgraph's curation signal to GRT. This is equivalent to "deprecating" the Subgraph on mainnet. The GRT corresponding to your curation will be sent to L2 together with the Subgraph, where they will be used to mint signal on your behalf. -Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same subgraph. If a subgraph owner does not transfer their subgraph to L2 and manually deprecates it via a contract call, then Curators will be notified and will be able to withdraw their curation. +Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same Subgraph. If a Subgraph owner does not transfer their Subgraph to L2 and manually deprecates it via a contract call, then Curators will be notified and will be able to withdraw their curation. -As soon as the subgraph is transferred, since all curation is converted to GRT, Indexers will no longer receive rewards for indexing the subgraph. However, there will be Indexers that will 1) keep serving transferred subgraphs for 24 hours, and 2) immediately start indexing the subgraph on L2. Since these Indexers already have the subgraph indexed, there should be no need to wait for the subgraph to sync, and it will be possible to query the L2 subgraph almost immediately. +As soon as the Subgraph is transferred, since all curation is converted to GRT, Indexers will no longer receive rewards for indexing the Subgraph. However, there will be Indexers that will 1) keep serving transferred Subgraphs for 24 hours, and 2) immediately start indexing the Subgraph on L2. Since these Indexers already have the Subgraph indexed, there should be no need to wait for the Subgraph to sync, and it will be possible to query the L2 Subgraph almost immediately. -Queries to the L2 subgraph will need to be done to a different URL (on `arbitrum-gateway.thegraph.com`), but the L1 URL will continue working for at least 48 hours. After that, the L1 gateway will forward queries to the L2 gateway (for some time), but this will add latency so it is recommended to switch all your queries to the new URL as soon as possible. +Queries to the L2 Subgraph will need to be done to a different URL (on `arbitrum-gateway.thegraph.com`), but the L1 URL will continue working for at least 48 hours. After that, the L1 gateway will forward queries to the L2 gateway (for some time), but this will add latency so it is recommended to switch all your queries to the new URL as soon as possible. ## Choosing your L2 wallet -When you published your subgraph on mainnet, you used a connected wallet to create the subgraph, and this wallet owns the NFT that represents this subgraph and allows you to publish updates. +When you published your Subgraph on mainnet, you used a connected wallet to create the Subgraph, and this wallet owns the NFT that represents this Subgraph and allows you to publish updates. -When transferring the subgraph to Arbitrum, you can choose a different wallet that will own this subgraph NFT on L2. +When transferring the Subgraph to Arbitrum, you can choose a different wallet that will own this Subgraph NFT on L2. If you're using a "regular" wallet like MetaMask (an Externally Owned Account or EOA, i.e. a wallet that is not a smart contract), then this is optional and it is recommended to keep the same owner address as in L1. -If you're using a smart contract wallet, like a multisig (e.g. a Safe), then choosing a different L2 wallet address is mandatory, as it is most likely that this account only exists on mainnet and you will not be able to make transactions on Arbitrum using this wallet. If you want to keep using a smart contract wallet or multisig, create a new wallet on Arbitrum and use its address as the L2 owner of your subgraph. +If you're using a smart contract wallet, like a multisig (e.g. a Safe), then choosing a different L2 wallet address is mandatory, as it is most likely that this account only exists on mainnet and you will not be able to make transactions on Arbitrum using this wallet. If you want to keep using a smart contract wallet or multisig, create a new wallet on Arbitrum and use its address as the L2 owner of your Subgraph. -**It is very important to use a wallet address that you control, and that can make transactions on Arbitrum. Otherwise, the subgraph will be lost and cannot be recovered.** +**It is very important to use a wallet address that you control, and that can make transactions on Arbitrum. Otherwise, the Subgraph will be lost and cannot be recovered.** ## Preparing for the transfer: bridging some ETH -Transferring the subgraph involves sending a transaction through the bridge, and then executing another transaction on Arbitrum. The first transaction uses ETH on mainnet, and includes some ETH to pay for gas when the message is received on L2. However, if this gas is insufficient, you will have to retry the transaction and pay for the gas directly on L2 (this is "Step 3: Confirming the transfer" below). This step **must be executed within 7 days of starting the transfer**. Moreover, the second transaction ("Step 4: Finishing the transfer on L2") will be done directly on Arbitrum. For these reasons, you will need some ETH on an Arbitrum wallet. If you're using a multisig or smart contract account, the ETH will need to be in the regular (EOA) wallet that you are using to execute the transactions, not on the multisig wallet itself. +Transferring the Subgraph involves sending a transaction through the bridge, and then executing another transaction on Arbitrum. The first transaction uses ETH on mainnet, and includes some ETH to pay for gas when the message is received on L2. However, if this gas is insufficient, you will have to retry the transaction and pay for the gas directly on L2 (this is "Step 3: Confirming the transfer" below). This step **must be executed within 7 days of starting the transfer**. Moreover, the second transaction ("Step 4: Finishing the transfer on L2") will be done directly on Arbitrum. For these reasons, you will need some ETH on an Arbitrum wallet. If you're using a multisig or smart contract account, the ETH will need to be in the regular (EOA) wallet that you are using to execute the transactions, not on the multisig wallet itself. You can buy ETH on some exchanges and withdraw it directly to Arbitrum, or you can use the Arbitrum bridge to send ETH from a mainnet wallet to L2: [bridge.arbitrum.io](http://bridge.arbitrum.io). Since gas fees on Arbitrum are lower, you should only need a small amount. It is recommended that you start at a low threshold (0.e.g. 01 ETH) for your transaction to be approved. -## Finding the subgraph Transfer Tool +## Finding the Subgraph Transfer Tool -You can find the L2 Transfer Tool when you're looking at your subgraph's page on Subgraph Studio: +You can find the L2 Transfer Tool when you're looking at your Subgraph's page on Subgraph Studio: ![transfer tool](/img/L2-transfer-tool1.png) -It is also available on Explorer if you're connected with the wallet that owns a subgraph and on that subgraph's page on Explorer: +It is also available on Explorer if you're connected with the wallet that owns a Subgraph and on that Subgraph's page on Explorer: ![Transferring to L2](/img/transferToL2.png) @@ -60,19 +60,19 @@ Clicking on the Transfer to L2 button will open the transfer tool where you can ## Step 1: Starting the transfer -Before starting the transfer, you must decide which address will own the subgraph on L2 (see "Choosing your L2 wallet" above), and it is strongly recommend having some ETH for gas already bridged on Arbitrum (see "Preparing for the transfer: bridging some ETH" above). +Before starting the transfer, you must decide which address will own the Subgraph on L2 (see "Choosing your L2 wallet" above), and it is strongly recommend having some ETH for gas already bridged on Arbitrum (see "Preparing for the transfer: bridging some ETH" above). -Also please note transferring the subgraph requires having a nonzero amount of signal on the subgraph with the same account that owns the subgraph; if you haven't signaled on the subgraph you will have to add a bit of curation (adding a small amount like 1 GRT would suffice). +Also please note transferring the Subgraph requires having a nonzero amount of signal on the Subgraph with the same account that owns the Subgraph; if you haven't signaled on the Subgraph you will have to add a bit of curation (adding a small amount like 1 GRT would suffice). -After opening the Transfer Tool, you will be able to input the L2 wallet address into the "Receiving wallet address" field - **make sure you've entered the correct address here**. Clicking on Transfer Subgraph will prompt you to execute the transaction on your wallet (note some ETH value is included to pay for L2 gas); this will initiate the transfer and deprecate your L1 subgraph (see "Understanding what happens with signal, your L1 subgraph and query URLs" above for more details on what goes on behind the scenes). +After opening the Transfer Tool, you will be able to input the L2 wallet address into the "Receiving wallet address" field - **make sure you've entered the correct address here**. Clicking on Transfer Subgraph will prompt you to execute the transaction on your wallet (note some ETH value is included to pay for L2 gas); this will initiate the transfer and deprecate your L1 Subgraph (see "Understanding what happens with signal, your L1 Subgraph and query URLs" above for more details on what goes on behind the scenes). -If you execute this step, **make sure you proceed until completing step 3 in less than 7 days, or the subgraph and your signal GRT will be lost.** This is due to how L1-L2 messaging works on Arbitrum: messages that are sent through the bridge are "retry-able tickets" that must be executed within 7 days, and the initial execution might need a retry if there are spikes in the gas price on Arbitrum. +If you execute this step, **make sure you proceed until completing step 3 in less than 7 days, or the Subgraph and your signal GRT will be lost.** This is due to how L1-L2 messaging works on Arbitrum: messages that are sent through the bridge are "retry-able tickets" that must be executed within 7 days, and the initial execution might need a retry if there are spikes in the gas price on Arbitrum. ![Start the transfer to L2](/img/startTransferL2.png) -## Step 2: Waiting for the subgraph to get to L2 +## Step 2: Waiting for the Subgraph to get to L2 -After you start the transfer, the message that sends your L1 subgraph to L2 must propagate through the Arbitrum bridge. This takes approximately 20 minutes (the bridge waits for the mainnet block containing the transaction to be "safe" from potential chain reorgs). +After you start the transfer, the message that sends your L1 Subgraph to L2 must propagate through the Arbitrum bridge. This takes approximately 20 minutes (the bridge waits for the mainnet block containing the transaction to be "safe" from potential chain reorgs). Once this wait time is over, Arbitrum will attempt to auto-execute the transfer on the L2 contracts. @@ -80,7 +80,7 @@ Once this wait time is over, Arbitrum will attempt to auto-execute the transfer ## Step 3: Confirming the transfer -In most cases, this step will auto-execute as the L2 gas included in step 1 should be sufficient to execute the transaction that receives the subgraph on the Arbitrum contracts. In some cases, however, it is possible that a spike in gas prices on Arbitrum causes this auto-execution to fail. In this case, the "ticket" that sends your subgraph to L2 will be pending and require a retry within 7 days. +In most cases, this step will auto-execute as the L2 gas included in step 1 should be sufficient to execute the transaction that receives the Subgraph on the Arbitrum contracts. In some cases, however, it is possible that a spike in gas prices on Arbitrum causes this auto-execution to fail. In this case, the "ticket" that sends your Subgraph to L2 will be pending and require a retry within 7 days. If this is the case, you will need to connect using an L2 wallet that has some ETH on Arbitrum, switch your wallet network to Arbitrum, and click on "Confirm Transfer" to retry the transaction. @@ -88,33 +88,33 @@ If this is the case, you will need to connect using an L2 wallet that has some E ## Step 4: Finishing the transfer on L2 -At this point, your subgraph and GRT have been received on Arbitrum, but the subgraph is not published yet. You will need to connect using the L2 wallet that you chose as the receiving wallet, switch your wallet network to Arbitrum, and click "Publish Subgraph." +At this point, your Subgraph and GRT have been received on Arbitrum, but the Subgraph is not published yet. You will need to connect using the L2 wallet that you chose as the receiving wallet, switch your wallet network to Arbitrum, and click "Publish Subgraph." -![Publish the subgraph](/img/publishSubgraphL2TransferTools.png) +![Publish the Subgraph](/img/publishSubgraphL2TransferTools.png) -![Wait for the subgraph to be published](/img/waitForSubgraphToPublishL2TransferTools.png) +![Wait for the Subgraph to be published](/img/waitForSubgraphToPublishL2TransferTools.png) -This will publish the subgraph so that Indexers that are operating on Arbitrum can start serving it. It will also mint curation signal using the GRT that were transferred from L1. +This will publish the Subgraph so that Indexers that are operating on Arbitrum can start serving it. It will also mint curation signal using the GRT that were transferred from L1. ## Step 5: Updating the query URL -Your subgraph has been successfully transferred to Arbitrum! To query the subgraph, the new URL will be : +Your Subgraph has been successfully transferred to Arbitrum! To query the Subgraph, the new URL will be : `https://arbitrum-gateway.thegraph.com/api/[api-key]/subgraphs/id/[l2-subgraph-id]` -Note that the subgraph ID on Arbitrum will be a different than the one you had on mainnet, but you can always find it on Explorer or Studio. As mentioned above (see "Understanding what happens with signal, your L1 subgraph and query URLs") the old L1 URL will be supported for a short while, but you should switch your queries to the new address as soon as the subgraph has been synced on L2. +Note that the Subgraph ID on Arbitrum will be a different than the one you had on mainnet, but you can always find it on Explorer or Studio. As mentioned above (see "Understanding what happens with signal, your L1 Subgraph and query URLs") the old L1 URL will be supported for a short while, but you should switch your queries to the new address as soon as the Subgraph has been synced on L2. ## How to transfer your curation to Arbitrum (L2) -## Understanding what happens to curation on subgraph transfers to L2 +## Understanding what happens to curation on Subgraph transfers to L2 -When the owner of a subgraph transfers a subgraph to Arbitrum, all of the subgraph's signal is converted to GRT at the same time. This applies to "auto-migrated" signal, i.e. signal that is not specific to a subgraph version or deployment but that follows the latest version of a subgraph. +When the owner of a Subgraph transfers a Subgraph to Arbitrum, all of the Subgraph's signal is converted to GRT at the same time. This applies to "auto-migrated" signal, i.e. signal that is not specific to a Subgraph version or deployment but that follows the latest version of a Subgraph. -This conversion from signal to GRT is the same as what would happen if the subgraph owner deprecated the subgraph in L1. When the subgraph is deprecated or transferred, all curation signal is "burned" simultaneously (using the curation bonding curve) and the resulting GRT is held by the GNS smart contract (that is the contract that handles subgraph upgrades and auto-migrated signal). Each Curator on that subgraph therefore has a claim to that GRT proportional to the amount of shares they had for the subgraph. +This conversion from signal to GRT is the same as what would happen if the Subgraph owner deprecated the Subgraph in L1. When the Subgraph is deprecated or transferred, all curation signal is "burned" simultaneously (using the curation bonding curve) and the resulting GRT is held by the GNS smart contract (that is the contract that handles Subgraph upgrades and auto-migrated signal). Each Curator on that Subgraph therefore has a claim to that GRT proportional to the amount of shares they had for the Subgraph. -A fraction of these GRT corresponding to the subgraph owner is sent to L2 together with the subgraph. +A fraction of these GRT corresponding to the Subgraph owner is sent to L2 together with the Subgraph. -At this point, the curated GRT will not accrue any more query fees, so Curators can choose to withdraw their GRT or transfer it to the same subgraph on L2, where it can be used to mint new curation signal. There is no rush to do this as the GRT can be help indefinitely and everybody gets an amount proportional to their shares, irrespective of when they do it. +At this point, the curated GRT will not accrue any more query fees, so Curators can choose to withdraw their GRT or transfer it to the same Subgraph on L2, where it can be used to mint new curation signal. There is no rush to do this as the GRT can be help indefinitely and everybody gets an amount proportional to their shares, irrespective of when they do it. ## Choosing your L2 wallet @@ -130,9 +130,9 @@ If you're using a smart contract wallet, like a multisig (e.g. a Safe), then cho Before starting the transfer, you must decide which address will own the curation on L2 (see "Choosing your L2 wallet" above), and it is recommended having some ETH for gas already bridged on Arbitrum in case you need to retry the execution of the message on L2. You can buy ETH on some exchanges and withdraw it directly to Arbitrum, or you can use the Arbitrum bridge to send ETH from a mainnet wallet to L2: [bridge.arbitrum.io](http://bridge.arbitrum.io) - since gas fees on Arbitrum are so low, you should only need a small amount, e.g. 0.01 ETH will probably be more than enough. -If a subgraph that you curate to has been transferred to L2, you will see a message on Explorer telling you that you're curating to a transferred subgraph. +If a Subgraph that you curate to has been transferred to L2, you will see a message on Explorer telling you that you're curating to a transferred Subgraph. -When looking at the subgraph page, you can choose to withdraw or transfer the curation. Clicking on "Transfer Signal to Arbitrum" will open the transfer tool. +When looking at the Subgraph page, you can choose to withdraw or transfer the curation. Clicking on "Transfer Signal to Arbitrum" will open the transfer tool. ![Transfer signal](/img/transferSignalL2TransferTools.png) @@ -162,4 +162,4 @@ If this is the case, you will need to connect using an L2 wallet that has some E ## Withdrawing your curation on L1 -If you prefer not to send your GRT to L2, or you'd rather bridge the GRT manually, you can withdraw your curated GRT on L1. On the banner on the subgraph page, choose "Withdraw Signal" and confirm the transaction; the GRT will be sent to your Curator address. +If you prefer not to send your GRT to L2, or you'd rather bridge the GRT manually, you can withdraw your curated GRT on L1. On the banner on the Subgraph page, choose "Withdraw Signal" and confirm the transaction; the GRT will be sent to your Curator address. From d2b1e0f1ebc1b7b8986e0f131e307f3faea4b178 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:13:52 -0500 Subject: [PATCH 0325/1789] New translations l2-transfer-tools-guide.mdx (Dutch) --- .../arbitrum/l2-transfer-tools-guide.mdx | 76 +++++++++---------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/website/src/pages/nl/archived/arbitrum/l2-transfer-tools-guide.mdx b/website/src/pages/nl/archived/arbitrum/l2-transfer-tools-guide.mdx index 67a7011010e7..d8828c547837 100644 --- a/website/src/pages/nl/archived/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/src/pages/nl/archived/arbitrum/l2-transfer-tools-guide.mdx @@ -6,53 +6,53 @@ The Graph heeft het eenvoudig gemaakt om naar L2 op Arbitrum One over te stappen Some frequent questions about these tools are answered in the [L2 Transfer Tools FAQ](/archived/arbitrum/l2-transfer-tools-faq/). The FAQs contain in-depth explanations of how to use the tools, how they work, and things to keep in mind when using them. -## Hoe zet je je subgraph over naar Arbitrum (L2) +## How to transfer your Subgraph to Arbitrum (L2) -## Voordelen van het overzetten van uw subgraphs +## Benefits of transferring your Subgraphs De community en ontwikkelaars van The Graph hebben [zich voorbereid](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) op de transitie naar Arbitrum gedurende het afgelopen jaar. Arbitrum, een layer 2 of "L2" blockchain, erft de beveiliging van Ethereum maar biedt aanzienlijk lagere gas fees. -Wanneer je je subgraph publiceert of bijwerkt naar the Graph Network, interacteer je met smart contracts op het protocol en dit vereist het betalen van gas met ETH. Door je subgraphs naar Arbitrum te verplaatsen, zullen eventuele toekomstige updates aan de subgraph veel lagere gas fees vereisen. De lagere kosten, en het feit dat de curatie bonding curves op L2 vlak zijn, maken het ook makkelijker voor andere curatoren om te cureren op uw subgraph, waardoor de beloningen voor indexeerders op uw subgraph toenemen. Deze omgeving met lagere kosten maakt het ook goedkoper voor indexeerders om de subgraph te indexeren en query's te beantwoorden. Indexeringsbeloningen zullen op Arbitrum toenemen en op Ethereum mainnet afnemen in de komden maanden, dus meer en meer indexeerders zullen hun GRT overzetten en hun operaties op L2 opzetten. +When you publish or upgrade your Subgraph to The Graph Network, you're interacting with smart contracts on the protocol and this requires paying for gas using ETH. By moving your Subgraphs to Arbitrum, any future updates to your Subgraph will require much lower gas fees. The lower fees, and the fact that curation bonding curves on L2 are flat, also make it easier for other Curators to curate on your Subgraph, increasing the rewards for Indexers on your Subgraph. This lower-cost environment also makes it cheaper for Indexers to index and serve your Subgraph. Indexing rewards will be increasing on Arbitrum and decreasing on Ethereum mainnet over the coming months, so more and more Indexers will be transferring their stake and setting up their operations on L2. -## Begrijpen wat er gebeurt met signalen, de L1 subgraph en query URL's +## Understanding what happens with signal, your L1 Subgraph and query URLs -Het overzetten van een subgraph naar Arbitrum gebruikt de Arbitrum GRT brug, die op zijn beurt de natuurlijke Arbitrum brug gebruikt, om de subgraph naar L2 te sturen. De "transfer" zal de subgraph op mainnet verwijderen en de informatie versturen om de subgraph op L2 opnieuw te creëren met de bridge. Het zal ook de gesignaleerde GRT van de eigenaar van de subgraph bevatten, wat meer dan nul moet zijn voor de brug om de overdracht te accepteren. +Transferring a Subgraph to Arbitrum uses the Arbitrum GRT bridge, which in turn uses the native Arbitrum bridge, to send the Subgraph to L2. The "transfer" will deprecate the Subgraph on mainnet and send the information to re-create the Subgraph on L2 using the bridge. It will also include the Subgraph owner's signaled GRT, which must be more than zero for the bridge to accept the transfer. -Wanneer je kiest om de subgraph over te dragen, zal dit alle curatie van de subgraph omzetten in GRT. Dit staat gelijk aan het "degraderen" van de subgraph op mainnet. De GRT die overeenkomt met je curatie zal samen met de subgraph naar L2 worden gestuurd, waar ze zullen worden gebruikt om signaal namens u te munten. +When you choose to transfer the Subgraph, this will convert all of the Subgraph's curation signal to GRT. This is equivalent to "deprecating" the Subgraph on mainnet. The GRT corresponding to your curation will be sent to L2 together with the Subgraph, where they will be used to mint signal on your behalf. -Andere curatoren kunnen kiezen of ze hun fractie van GRT willen opnemen, of het ook naar L2 willen overzetten om signaal op dezelfde subgraph te munten. Als een eigenaar van een subgraph hun subgraph niet naar L2 overzet en handmatig verwijderd via een contract call, dan zullen curatoren worden genotificeerd en zullen ze in staat zijn om hun curatie op te nemen. +Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same Subgraph. If a Subgraph owner does not transfer their Subgraph to L2 and manually deprecates it via a contract call, then Curators will be notified and will be able to withdraw their curation. -Zodra de subgraph is overgedragen, aangezien alle curatie is omgezet in GRT, zullen indexeerders geen beloningen meer ontvangen voor het indexeren van de subgraph. Er zullen echter indexeerders zijn die 1) overgedragen subgraphs 24 uur blijven ondersteunen, en 2) onmiddelijk beginnen met het indexeren van de subgraph op L2. Aangezien deze indexeerders de subgraph al hebben geïndexeerd, zou er geen noodzaak moeten zijn om te wachten tot de subgraph is gesynchroniseerd, en het zal mogelijk zijn om de L2 subgraph bijna onmiddelijk te queryen. +As soon as the Subgraph is transferred, since all curation is converted to GRT, Indexers will no longer receive rewards for indexing the Subgraph. However, there will be Indexers that will 1) keep serving transferred Subgraphs for 24 hours, and 2) immediately start indexing the Subgraph on L2. Since these Indexers already have the Subgraph indexed, there should be no need to wait for the Subgraph to sync, and it will be possible to query the L2 Subgraph almost immediately. -Query's naar de L2 subgraph moeten worden gedaan naar een andere URL (op `arbitrum-gateway.thegraph.com`), maar het L1 URL zal minimaal 48 uur blijven werken. Daarna zal de L1 gateway query's doorsturen naar de L2 gateway (voor enige tijd), maar dit zal latentie toevoegen dus het wordt aanbevolen om al uw query's zo snel mogelijk naar de nieuwe URL over te schakelen. +Queries to the L2 Subgraph will need to be done to a different URL (on `arbitrum-gateway.thegraph.com`), but the L1 URL will continue working for at least 48 hours. After that, the L1 gateway will forward queries to the L2 gateway (for some time), but this will add latency so it is recommended to switch all your queries to the new URL as soon as possible. ## Jouw L2 wallet kiezen -Wanneer je je subgraph op mainnet publiceerde, gebruikte je een verbonden wallet om de subgraph te creëren, en deze wallet bezit de NFT die deze subgraph vertegenwoordigt en dit zorgt er voor dat je updates kunt publiceren. +When you published your Subgraph on mainnet, you used a connected wallet to create the Subgraph, and this wallet owns the NFT that represents this Subgraph and allows you to publish updates. -Bij het overzetten van de subgraph naar Arbitrum, kunt u een andere wallet kiezen die deze subgraph NFT op L2 zal bezitten. +When transferring the Subgraph to Arbitrum, you can choose a different wallet that will own this Subgraph NFT on L2. Als je een "reguliere" wallet gebruikt zoals MetaMask (een Externally Owned Account of EOA, d.w.z. een wallet die geen smart contract is), dan is dit optioneel en wordt het aanbevolen om dezelfde wallet te gebruiken als in L1. -Als je een smart contract wallet gebruikt, zoals een multisig (bijv. een Safe) dan is het kiezen van een ander L2 wallet adres verplicht, aangezien het waarschijnlijk is dat de multisig alleen op mainnet bestaat en je geen transacties op Arbitrum kunt maken met deze wallet. Als je een smart contract wallet of multisig wilt blijven gebruiken, maak dan een nieuwe wallet aan op Arbitrum en gebruik het adres ervan als de L2 eigenaar van jouw subgraph. +If you're using a smart contract wallet, like a multisig (e.g. a Safe), then choosing a different L2 wallet address is mandatory, as it is most likely that this account only exists on mainnet and you will not be able to make transactions on Arbitrum using this wallet. If you want to keep using a smart contract wallet or multisig, create a new wallet on Arbitrum and use its address as the L2 owner of your Subgraph. -**Het is erg belangrijk om een wallet adres te gebruiken dat u controleert, en dat transacties op Arbitrum kan maken. Anders zal de subgraph verloren gaan en kan niet worden hersteld.** +**It is very important to use a wallet address that you control, and that can make transactions on Arbitrum. Otherwise, the Subgraph will be lost and cannot be recovered.** ## Voorbereiden op de overdracht: ETH verplaatsen van L1 naar L2 -Het overzetten van de subgraph houdt in dat je een transactie verstuurt via de brug, en vervolgens een andere transactie uitvoert op Arbitrum. De eerste transactie gebruikt ETH op mainnet, en bevat wat ETH om te betalen voor gas wanneer het op L2 wordt ontvangen. Echter, als dit onvoldoende is, zul je de transactie opnieuw moeten proberen en betalen voor het gas direct op L2 (dit is "Stap 3: De overdracht bevestigen" hieronder). Deze stap **moet worden uitgevoerd binnen 7 dagen na het starten van de overdracht**. Bovendien, de tweede transactie ("Stap 4: De overdracht op L2 afronden") zal direct op Arbitrum worden gedaan. Om deze redenen, zul je wat ETH nodig hebben op een Arbitrum wallet. Als je een multisig of smart contract wallet gebruikt, zal de ETH in de reguliere (EOA) wallet moeten zijn die je gebruikt om de transacties uit te voeren, niet op de multisig wallet zelf. +Transferring the Subgraph involves sending a transaction through the bridge, and then executing another transaction on Arbitrum. The first transaction uses ETH on mainnet, and includes some ETH to pay for gas when the message is received on L2. However, if this gas is insufficient, you will have to retry the transaction and pay for the gas directly on L2 (this is "Step 3: Confirming the transfer" below). This step **must be executed within 7 days of starting the transfer**. Moreover, the second transaction ("Step 4: Finishing the transfer on L2") will be done directly on Arbitrum. For these reasons, you will need some ETH on an Arbitrum wallet. If you're using a multisig or smart contract account, the ETH will need to be in the regular (EOA) wallet that you are using to execute the transactions, not on the multisig wallet itself. Je kunt ETH kopen op sommige exchanges en direct naar Arbitrum opnemen, of je kunt de Arbitrum bridge gebruiken om ETH van een mainnet wallet naar L2 te sturen: [bridge.arbitrum.io](http://bridge.arbitrum.io). Aangezien de gasprijzen op Arbitrum lager zijn, zou u slechts een kleine hoeveelheid nodig moeten hebben. Het wordt aanbevolen om te beginnen met een lage drempel (e.g. 0.1 ETH) voor uw transactie om te worden goedgekeurd. -## Het vinden van de Transfer Tool voor subgraphs +## Finding the Subgraph Transfer Tool -Je kunt de L2 Transfer Tool vinden als je naar de pagina van je subgraph kijkt in de Subgraph Studio: +You can find the L2 Transfer Tool when you're looking at your Subgraph's page on Subgraph Studio: ![transfer tool](/img/L2-transfer-tool1.png) -Het is ook beschikbaar in de Explorer als je verbonden bent met de wallet die een subgraph bezit en op de pagina van die subgraph in de Explorer kijkt: +It is also available on Explorer if you're connected with the wallet that owns a Subgraph and on that Subgraph's page on Explorer: ![Transferring to L2](/img/transferToL2.png) @@ -60,19 +60,19 @@ Door op de knop 'Transfer to L2' te klikken, wordt de Transfer Tool geopend waar ## Stap 1: Het transfer proces starten -Voordat je met het transfer proces begint, moet je beslissen welk adres de subgraph op L2 zal bezitten (zie "Je L2 portemonnee kiezen" hierboven), en het wordt sterk aanbevolen om al wat ETH voor gas op Arbitrum te hebben (zie "Voorbereiden op de overdracht: ETH verplaatsen van L1 naar L2" hierboven). +Before starting the transfer, you must decide which address will own the Subgraph on L2 (see "Choosing your L2 wallet" above), and it is strongly recommend having some ETH for gas already bridged on Arbitrum (see "Preparing for the transfer: bridging some ETH" above). -Let ook op dat het overzetten van de subgraph vereist dat je een hoeveelheid signaal groter dan nul op de subgraph hebt met dezelfde account die de subgraph bezit; als je nog geen signaal op de subgraph hebt, moet je een klein beetje curatie toevoegen (een kleine hoeveelheid zoals 1 GRT zou voldoende zijn). +Also please note transferring the Subgraph requires having a nonzero amount of signal on the Subgraph with the same account that owns the Subgraph; if you haven't signaled on the Subgraph you will have to add a bit of curation (adding a small amount like 1 GRT would suffice). -Na het openen van de Transfer Tool, kun je het adres van de L2 wallet invoeren in het veld "Receiving wallet address" - **zorg ervoor dat je het juiste adres hier invoert**. Door op 'Transfer Subgraph' te klikken, wordt je gevraagd de transactie op je wallet uit te voeren (let op dat er wel wat ETH in je wallet zit om te betalen voor L2 gas); dit zal de transfer initiëren en je L1 subgraph verwijderen (zie "Begrijpen wat er gebeurt met signalen, de L1 subgraph en query URL's" hierboven voor meer details over wat er achter de schermen gebeurt). +After opening the Transfer Tool, you will be able to input the L2 wallet address into the "Receiving wallet address" field - **make sure you've entered the correct address here**. Clicking on Transfer Subgraph will prompt you to execute the transaction on your wallet (note some ETH value is included to pay for L2 gas); this will initiate the transfer and deprecate your L1 Subgraph (see "Understanding what happens with signal, your L1 Subgraph and query URLs" above for more details on what goes on behind the scenes). -Als je deze stap uitvoert, **zorg ervoor dat je doorgaat tot het voltooien van stap 3 in minder dan 7 dagen, of de subgraph en je signaal GRT zullen verloren gaan.** Dit komt door hoe L1-L2 berichtgeving werkt op Arbitrum: berichten die via de bridge worden verzonden, zijn "retry-able tickets" die binnen 7 dagen uitgevoerd moeten worden, en de initiële uitvoering zou een nieuwe poging nodig kunnen hebben als er pieken zijn in de prijs voor gas op Arbitrum. +If you execute this step, **make sure you proceed until completing step 3 in less than 7 days, or the Subgraph and your signal GRT will be lost.** This is due to how L1-L2 messaging works on Arbitrum: messages that are sent through the bridge are "retry-able tickets" that must be executed within 7 days, and the initial execution might need a retry if there are spikes in the gas price on Arbitrum. ![Start the transfer to L2](/img/startTransferL2.png) -## Stap 2: Wachten tot de transfer van de subgraph naar L2 voltooid is +## Step 2: Waiting for the Subgraph to get to L2 -Nadat je de transfer gestart bent, moet het bericht dat je L1-subgraph naar L2 stuurt, via de Arbitrum brug worden doorgestuurd. Dit duurt ongeveer 20 minuten (de brug wacht tot het mainnet block dat de transactie bevat "veilig" is van potentiële chain reorganisaties). +After you start the transfer, the message that sends your L1 Subgraph to L2 must propagate through the Arbitrum bridge. This takes approximately 20 minutes (the bridge waits for the mainnet block containing the transaction to be "safe" from potential chain reorgs). Zodra deze wachttijd voorbij is, zal Arbitrum proberen de transfer automatisch uit te voeren op de L2 contracten. @@ -80,7 +80,7 @@ Zodra deze wachttijd voorbij is, zal Arbitrum proberen de transfer automatisch u ## Stap 3: De transfer bevestigen -In de meeste gevallen zal deze stap automatisch worden uitgevoerd aangezien de L2 gas kosten die bij stap 1 zijn inbegrepen, voldoende zouden moeten zijn om de transactie die de subgraph op de Arbitrum contracten ontvangt, uit te voeren. In sommige gevallen kan het echter zo zijn dat een piek in de gasprijzen op Arbitrum ervoor zorgt dat deze automatische uitvoering mislukt. In dat geval zal het "ticket" dat je subgraph naar L2 stuurt, in behandeling blijven en is nodig het binnen 7 dagen nogmaals te proberen. +In most cases, this step will auto-execute as the L2 gas included in step 1 should be sufficient to execute the transaction that receives the Subgraph on the Arbitrum contracts. In some cases, however, it is possible that a spike in gas prices on Arbitrum causes this auto-execution to fail. In this case, the "ticket" that sends your Subgraph to L2 will be pending and require a retry within 7 days. Als dit het geval is, moet je verbinding maken met een L2 wallet die wat ETH op Arbitrum heeft, je walletnetwerk naar Arbitrum overschakelen en op "Bevestig Transfer" klikken op de transactie opnieuw te proberen. @@ -88,33 +88,33 @@ Als dit het geval is, moet je verbinding maken met een L2 wallet die wat ETH op ## Stap 4: De transfer op L2 afronden -Na de vorige stappen zijn je subgraph en GRT ontvangen op Arbitrum, maar de subgraph is nog niet gepubliceerd. Je moet verbinding maken met de L2 wallet die je hebt gekozen als ontvangende wallet, je walletnetwerk naar Arbitrum overschakelen en op "Publiceer Subgraph" klikken +At this point, your Subgraph and GRT have been received on Arbitrum, but the Subgraph is not published yet. You will need to connect using the L2 wallet that you chose as the receiving wallet, switch your wallet network to Arbitrum, and click "Publish Subgraph." -![Publish the subgraph](/img/publishSubgraphL2TransferTools.png) +![Publish the Subgraph](/img/publishSubgraphL2TransferTools.png) -![Wait for the subgraph to be published](/img/waitForSubgraphToPublishL2TransferTools.png) +![Wait for the Subgraph to be published](/img/waitForSubgraphToPublishL2TransferTools.png) -Dit zal de subgraph publiceren zodat Indexeerders die op Arbitrum actief zijn, deze kunnen indexeren. Het zal ook curatie signaal munten met de GRT die van L1 zijn overgedragen. +This will publish the Subgraph so that Indexers that are operating on Arbitrum can start serving it. It will also mint curation signal using the GRT that were transferred from L1. ## Stap 5: De query-URL bijwerken -Je subgraph is succesvol overgedragen naar Arbitrum! Om query's naar de subgraph te sturen, kun je deze nieuwe URL gebruiken: +Your Subgraph has been successfully transferred to Arbitrum! To query the Subgraph, the new URL will be : `https://arbitrum-gateway.thegraph.com/api/[api-key]/subgraphs/id/[l2-subgraph-id]` -Let op dat de subgraph ID op Arbitum anders zal zijn dan degene die je op mainnet had, maar je kunt deze altijd vinden op de Explorer of in de Studio. Zoals hierboven vermeld (zie "Begrijpen wat er gebeurt met signalen, de L1 subgraph en query URL's") zal de oude L1-URL nog een korte tijd worden ondersteund, maar je zou zo snel mogelijk al je query's naar het nieuwe adres moeten overschakelen zodra de subgraph op L2 is gesynchroniseerd. +Note that the Subgraph ID on Arbitrum will be a different than the one you had on mainnet, but you can always find it on Explorer or Studio. As mentioned above (see "Understanding what happens with signal, your L1 Subgraph and query URLs") the old L1 URL will be supported for a short while, but you should switch your queries to the new address as soon as the Subgraph has been synced on L2. ## Hoe je je curatie signaal naar Arbitrum (L2) overzet -## Begrijpen wat er gebeurt met curatie bij subgraph transfers naar L2 +## Understanding what happens to curation on Subgraph transfers to L2 -Wanneer de eigenaar van een subgraph een subgraph naar Arbitrum verplaatst, wordt al het signaal van de subgraph tegelijkertijd omgezet in GRT. Dit is van toepassing op "automatisch gemigreerd" signaal, dus signaal dat niet specifiek is voor een subgraph versie, maar automatisch op de nieuwste versie van de subgraph signaleerd. +When the owner of a Subgraph transfers a Subgraph to Arbitrum, all of the Subgraph's signal is converted to GRT at the same time. This applies to "auto-migrated" signal, i.e. signal that is not specific to a Subgraph version or deployment but that follows the latest version of a Subgraph. -De conversie van signaal naar GRT is hetzelfde als wat zou gebeuren als de eigenaar van de subgraph de subgraph van L1 zou verwijderen. Wanneer de subgraph wordt verwijderd of verplaatst wordt naar L2, wordt al het curatie signaal tegelijkertijd "verbrand" (met behulp van de curation bonding curve) en wordt de GRT vastgehouden door het GNS smart contract (dat is het contract dat subgraph upgrades en automatisch gemigreerd signaal afhandeld). Elke Curator op die subgraph heeft daarom recht op die GRT naar rato van het aantal aandelen dat ze voor de subgraph hadden. +This conversion from signal to GRT is the same as what would happen if the Subgraph owner deprecated the Subgraph in L1. When the Subgraph is deprecated or transferred, all curation signal is "burned" simultaneously (using the curation bonding curve) and the resulting GRT is held by the GNS smart contract (that is the contract that handles Subgraph upgrades and auto-migrated signal). Each Curator on that Subgraph therefore has a claim to that GRT proportional to the amount of shares they had for the Subgraph. -Een deel van de GRT, dat behoort tot de eigenaar van de subgraph, wordt samen met de subgraph naar L2 gestuurd. +A fraction of these GRT corresponding to the Subgraph owner is sent to L2 together with the Subgraph. -Op dit punt zal de gesignaleerde GRT niet langer query kosten verzamelen, dus curatoren kunnen kiezen om hun GRT op te nemen of het naar dezelfde subgraph op L2 over te dragen, waar het gebruikt kan worden om nieuw curatie signaal te creëren. Er is geen haast bij, aangezien de GRT voor onbepaalde tijd kan worden bewaard en iedereen krijgt een hoeveelheid naar rato van hun aandelen, ongeacht wanneer ze het doen. +At this point, the curated GRT will not accrue any more query fees, so Curators can choose to withdraw their GRT or transfer it to the same Subgraph on L2, where it can be used to mint new curation signal. There is no rush to do this as the GRT can be help indefinitely and everybody gets an amount proportional to their shares, irrespective of when they do it. ## Jouw L2 wallet kiezen @@ -130,9 +130,9 @@ Als je een smart contract wallet gebruikt, zoals een multisig (bijv. een Safe) d Voordat je de transfer start, moet je beslissen welk wallet adres de curatie op L2 zal bezitting (zie "De L2 wallet kiezen" hierboven) en wordt het aanbevolen om al wat ETH voor gas op Arbitrum te hebben voor het geval je de uitvoering van het bericht op L2 opnieuw moet uitvoeren. Je kunt ETH kopen op sommige beurzen en deze rechstreeks naar je Arbitrum wallet sturen, of je kunt de Arbitrum bridge gebruiken om ETH van een mainnet wallet naar L2 te sturen: [bridge.arbitrum.io](http://bridge.arbitrum.io) - aangezien de gasprijzen op Arbitrum zo laag zijn, heb je waarschijnlijk maar een kleine hoeveelheid nodig, 0.01 ETH is waarschijnlijk meer dan genoeg. -Als een subgraph waar je curatie signaal op hebt naar L2 is verstuurd, zie je een bericht op de Explorer die je verteld dat je curatie hebt op een subgraph die een transfer heeft gemaakt naar L2. +If a Subgraph that you curate to has been transferred to L2, you will see a message on Explorer telling you that you're curating to a transferred Subgraph. -Wanneer je naar de subgraph pagina kijkt, kun je ervoor kiezen om de curatie op te nemen of over te dragen naar L2. Door op "Transfer Signal to Arbitrum" te klikken, worden de Transfer Tools geopend. +When looking at the Subgraph page, you can choose to withdraw or transfer the curation. Clicking on "Transfer Signal to Arbitrum" will open the transfer tool. ![Transfer signal](/img/transferSignalL2TransferTools.png) @@ -162,4 +162,4 @@ Als dit het geval is, moet je verbinding maken met een L2 wallet die wat ETH op ## Jouw curatie opnemen op L1 -Als je je GRT liever niet naar L2 stuurt, of als je de GRT handmatig over de brug wilt sturen, kunt je je gecureerde GRT op L1 opnemen. Kies op de banner op de subgraph pagina "Withdraw Signal" en bevestig de transactie; de GRT wordt naar uw Curator adres gestuurd. +If you prefer not to send your GRT to L2, or you'd rather bridge the GRT manually, you can withdraw your curated GRT on L1. On the banner on the Subgraph page, choose "Withdraw Signal" and confirm the transaction; the GRT will be sent to your Curator address. From 361862e89f8a5d1dcbae05672daf6e0aa98623a2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:13:53 -0500 Subject: [PATCH 0326/1789] New translations l2-transfer-tools-guide.mdx (Polish) --- .../arbitrum/l2-transfer-tools-guide.mdx | 76 +++++++++---------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/website/src/pages/pl/archived/arbitrum/l2-transfer-tools-guide.mdx b/website/src/pages/pl/archived/arbitrum/l2-transfer-tools-guide.mdx index 2e4e4050450e..91e2f52b8525 100644 --- a/website/src/pages/pl/archived/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/src/pages/pl/archived/arbitrum/l2-transfer-tools-guide.mdx @@ -6,53 +6,53 @@ Graph ułatwił przeniesienie danych do L2 na Arbitrum One. Dla każdego uczestn Some frequent questions about these tools are answered in the [L2 Transfer Tools FAQ](/archived/arbitrum/l2-transfer-tools-faq/). The FAQs contain in-depth explanations of how to use the tools, how they work, and things to keep in mind when using them. -## Jak przenieść swój subgraph do Arbitrum (L2) +## How to transfer your Subgraph to Arbitrum (L2) -## Benefits of transferring your subgraphs +## Benefits of transferring your Subgraphs Społeczność i deweloperzy Graph [przygotowywali się](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) do przejścia na Arbitrum w ciągu ostatniego roku. Arbitrum, blockchain warstwy 2 lub "L2", dziedziczy bezpieczeństwo po Ethereum, ale zapewnia znacznie niższe opłaty za gaz. -When you publish or upgrade your subgraph to The Graph Network, you're interacting with smart contracts on the protocol and this requires paying for gas using ETH. By moving your subgraphs to Arbitrum, any future updates to your subgraph will require much lower gas fees. The lower fees, and the fact that curation bonding curves on L2 are flat, also make it easier for other Curators to curate on your subgraph, increasing the rewards for Indexers on your subgraph. This lower-cost environment also makes it cheaper for Indexers to index and serve your subgraph. Indexing rewards will be increasing on Arbitrum and decreasing on Ethereum mainnet over the coming months, so more and more Indexers will be transferring their stake and setting up their operations on L2. +When you publish or upgrade your Subgraph to The Graph Network, you're interacting with smart contracts on the protocol and this requires paying for gas using ETH. By moving your Subgraphs to Arbitrum, any future updates to your Subgraph will require much lower gas fees. The lower fees, and the fact that curation bonding curves on L2 are flat, also make it easier for other Curators to curate on your Subgraph, increasing the rewards for Indexers on your Subgraph. This lower-cost environment also makes it cheaper for Indexers to index and serve your Subgraph. Indexing rewards will be increasing on Arbitrum and decreasing on Ethereum mainnet over the coming months, so more and more Indexers will be transferring their stake and setting up their operations on L2. -## Understanding what happens with signal, your L1 subgraph and query URLs +## Understanding what happens with signal, your L1 Subgraph and query URLs -Transferring a subgraph to Arbitrum uses the Arbitrum GRT bridge, which in turn uses the native Arbitrum bridge, to send the subgraph to L2. The "transfer" will deprecate the subgraph on mainnet and send the information to re-create the subgraph on L2 using the bridge. It will also include the subgraph owner's signaled GRT, which must be more than zero for the bridge to accept the transfer. +Transferring a Subgraph to Arbitrum uses the Arbitrum GRT bridge, which in turn uses the native Arbitrum bridge, to send the Subgraph to L2. The "transfer" will deprecate the Subgraph on mainnet and send the information to re-create the Subgraph on L2 using the bridge. It will also include the Subgraph owner's signaled GRT, which must be more than zero for the bridge to accept the transfer. -When you choose to transfer the subgraph, this will convert all of the subgraph's curation signal to GRT. This is equivalent to "deprecating" the subgraph on mainnet. The GRT corresponding to your curation will be sent to L2 together with the subgraph, where they will be used to mint signal on your behalf. +When you choose to transfer the Subgraph, this will convert all of the Subgraph's curation signal to GRT. This is equivalent to "deprecating" the Subgraph on mainnet. The GRT corresponding to your curation will be sent to L2 together with the Subgraph, where they will be used to mint signal on your behalf. -Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same subgraph. If a subgraph owner does not transfer their subgraph to L2 and manually deprecates it via a contract call, then Curators will be notified and will be able to withdraw their curation. +Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same Subgraph. If a Subgraph owner does not transfer their Subgraph to L2 and manually deprecates it via a contract call, then Curators will be notified and will be able to withdraw their curation. -As soon as the subgraph is transferred, since all curation is converted to GRT, Indexers will no longer receive rewards for indexing the subgraph. However, there will be Indexers that will 1) keep serving transferred subgraphs for 24 hours, and 2) immediately start indexing the subgraph on L2. Since these Indexers already have the subgraph indexed, there should be no need to wait for the subgraph to sync, and it will be possible to query the L2 subgraph almost immediately. +As soon as the Subgraph is transferred, since all curation is converted to GRT, Indexers will no longer receive rewards for indexing the Subgraph. However, there will be Indexers that will 1) keep serving transferred Subgraphs for 24 hours, and 2) immediately start indexing the Subgraph on L2. Since these Indexers already have the Subgraph indexed, there should be no need to wait for the Subgraph to sync, and it will be possible to query the L2 Subgraph almost immediately. -Queries to the L2 subgraph will need to be done to a different URL (on `arbitrum-gateway.thegraph.com`), but the L1 URL will continue working for at least 48 hours. After that, the L1 gateway will forward queries to the L2 gateway (for some time), but this will add latency so it is recommended to switch all your queries to the new URL as soon as possible. +Queries to the L2 Subgraph will need to be done to a different URL (on `arbitrum-gateway.thegraph.com`), but the L1 URL will continue working for at least 48 hours. After that, the L1 gateway will forward queries to the L2 gateway (for some time), but this will add latency so it is recommended to switch all your queries to the new URL as soon as possible. ## Choosing your L2 wallet -When you published your subgraph on mainnet, you used a connected wallet to create the subgraph, and this wallet owns the NFT that represents this subgraph and allows you to publish updates. +When you published your Subgraph on mainnet, you used a connected wallet to create the Subgraph, and this wallet owns the NFT that represents this Subgraph and allows you to publish updates. -When transferring the subgraph to Arbitrum, you can choose a different wallet that will own this subgraph NFT on L2. +When transferring the Subgraph to Arbitrum, you can choose a different wallet that will own this Subgraph NFT on L2. If you're using a "regular" wallet like MetaMask (an Externally Owned Account or EOA, i.e. a wallet that is not a smart contract), then this is optional and it is recommended to keep the same owner address as in L1. -If you're using a smart contract wallet, like a multisig (e.g. a Safe), then choosing a different L2 wallet address is mandatory, as it is most likely that this account only exists on mainnet and you will not be able to make transactions on Arbitrum using this wallet. If you want to keep using a smart contract wallet or multisig, create a new wallet on Arbitrum and use its address as the L2 owner of your subgraph. +If you're using a smart contract wallet, like a multisig (e.g. a Safe), then choosing a different L2 wallet address is mandatory, as it is most likely that this account only exists on mainnet and you will not be able to make transactions on Arbitrum using this wallet. If you want to keep using a smart contract wallet or multisig, create a new wallet on Arbitrum and use its address as the L2 owner of your Subgraph. -**It is very important to use a wallet address that you control, and that can make transactions on Arbitrum. Otherwise, the subgraph will be lost and cannot be recovered.** +**It is very important to use a wallet address that you control, and that can make transactions on Arbitrum. Otherwise, the Subgraph will be lost and cannot be recovered.** ## Preparing for the transfer: bridging some ETH -Transferring the subgraph involves sending a transaction through the bridge, and then executing another transaction on Arbitrum. The first transaction uses ETH on mainnet, and includes some ETH to pay for gas when the message is received on L2. However, if this gas is insufficient, you will have to retry the transaction and pay for the gas directly on L2 (this is "Step 3: Confirming the transfer" below). This step **must be executed within 7 days of starting the transfer**. Moreover, the second transaction ("Step 4: Finishing the transfer on L2") will be done directly on Arbitrum. For these reasons, you will need some ETH on an Arbitrum wallet. If you're using a multisig or smart contract account, the ETH will need to be in the regular (EOA) wallet that you are using to execute the transactions, not on the multisig wallet itself. +Transferring the Subgraph involves sending a transaction through the bridge, and then executing another transaction on Arbitrum. The first transaction uses ETH on mainnet, and includes some ETH to pay for gas when the message is received on L2. However, if this gas is insufficient, you will have to retry the transaction and pay for the gas directly on L2 (this is "Step 3: Confirming the transfer" below). This step **must be executed within 7 days of starting the transfer**. Moreover, the second transaction ("Step 4: Finishing the transfer on L2") will be done directly on Arbitrum. For these reasons, you will need some ETH on an Arbitrum wallet. If you're using a multisig or smart contract account, the ETH will need to be in the regular (EOA) wallet that you are using to execute the transactions, not on the multisig wallet itself. You can buy ETH on some exchanges and withdraw it directly to Arbitrum, or you can use the Arbitrum bridge to send ETH from a mainnet wallet to L2: [bridge.arbitrum.io](http://bridge.arbitrum.io). Since gas fees on Arbitrum are lower, you should only need a small amount. It is recommended that you start at a low threshold (0.e.g. 01 ETH) for your transaction to be approved. -## Finding the subgraph Transfer Tool +## Finding the Subgraph Transfer Tool -You can find the L2 Transfer Tool when you're looking at your subgraph's page on Subgraph Studio: +You can find the L2 Transfer Tool when you're looking at your Subgraph's page on Subgraph Studio: ![transfer tool](/img/L2-transfer-tool1.png) -It is also available on Explorer if you're connected with the wallet that owns a subgraph and on that subgraph's page on Explorer: +It is also available on Explorer if you're connected with the wallet that owns a Subgraph and on that Subgraph's page on Explorer: ![Transferring to L2](/img/transferToL2.png) @@ -60,19 +60,19 @@ Clicking on the Transfer to L2 button will open the transfer tool where you can ## Step 1: Starting the transfer -Before starting the transfer, you must decide which address will own the subgraph on L2 (see "Choosing your L2 wallet" above), and it is strongly recommend having some ETH for gas already bridged on Arbitrum (see "Preparing for the transfer: bridging some ETH" above). +Before starting the transfer, you must decide which address will own the Subgraph on L2 (see "Choosing your L2 wallet" above), and it is strongly recommend having some ETH for gas already bridged on Arbitrum (see "Preparing for the transfer: bridging some ETH" above). -Also please note transferring the subgraph requires having a nonzero amount of signal on the subgraph with the same account that owns the subgraph; if you haven't signaled on the subgraph you will have to add a bit of curation (adding a small amount like 1 GRT would suffice). +Also please note transferring the Subgraph requires having a nonzero amount of signal on the Subgraph with the same account that owns the Subgraph; if you haven't signaled on the Subgraph you will have to add a bit of curation (adding a small amount like 1 GRT would suffice). -After opening the Transfer Tool, you will be able to input the L2 wallet address into the "Receiving wallet address" field - **make sure you've entered the correct address here**. Clicking on Transfer Subgraph will prompt you to execute the transaction on your wallet (note some ETH value is included to pay for L2 gas); this will initiate the transfer and deprecate your L1 subgraph (see "Understanding what happens with signal, your L1 subgraph and query URLs" above for more details on what goes on behind the scenes). +After opening the Transfer Tool, you will be able to input the L2 wallet address into the "Receiving wallet address" field - **make sure you've entered the correct address here**. Clicking on Transfer Subgraph will prompt you to execute the transaction on your wallet (note some ETH value is included to pay for L2 gas); this will initiate the transfer and deprecate your L1 Subgraph (see "Understanding what happens with signal, your L1 Subgraph and query URLs" above for more details on what goes on behind the scenes). -If you execute this step, **make sure you proceed until completing step 3 in less than 7 days, or the subgraph and your signal GRT will be lost.** This is due to how L1-L2 messaging works on Arbitrum: messages that are sent through the bridge are "retry-able tickets" that must be executed within 7 days, and the initial execution might need a retry if there are spikes in the gas price on Arbitrum. +If you execute this step, **make sure you proceed until completing step 3 in less than 7 days, or the Subgraph and your signal GRT will be lost.** This is due to how L1-L2 messaging works on Arbitrum: messages that are sent through the bridge are "retry-able tickets" that must be executed within 7 days, and the initial execution might need a retry if there are spikes in the gas price on Arbitrum. ![Start the transfer to L2](/img/startTransferL2.png) -## Step 2: Waiting for the subgraph to get to L2 +## Step 2: Waiting for the Subgraph to get to L2 -After you start the transfer, the message that sends your L1 subgraph to L2 must propagate through the Arbitrum bridge. This takes approximately 20 minutes (the bridge waits for the mainnet block containing the transaction to be "safe" from potential chain reorgs). +After you start the transfer, the message that sends your L1 Subgraph to L2 must propagate through the Arbitrum bridge. This takes approximately 20 minutes (the bridge waits for the mainnet block containing the transaction to be "safe" from potential chain reorgs). Once this wait time is over, Arbitrum will attempt to auto-execute the transfer on the L2 contracts. @@ -80,7 +80,7 @@ Once this wait time is over, Arbitrum will attempt to auto-execute the transfer ## Step 3: Confirming the transfer -In most cases, this step will auto-execute as the L2 gas included in step 1 should be sufficient to execute the transaction that receives the subgraph on the Arbitrum contracts. In some cases, however, it is possible that a spike in gas prices on Arbitrum causes this auto-execution to fail. In this case, the "ticket" that sends your subgraph to L2 will be pending and require a retry within 7 days. +In most cases, this step will auto-execute as the L2 gas included in step 1 should be sufficient to execute the transaction that receives the Subgraph on the Arbitrum contracts. In some cases, however, it is possible that a spike in gas prices on Arbitrum causes this auto-execution to fail. In this case, the "ticket" that sends your Subgraph to L2 will be pending and require a retry within 7 days. If this is the case, you will need to connect using an L2 wallet that has some ETH on Arbitrum, switch your wallet network to Arbitrum, and click on "Confirm Transfer" to retry the transaction. @@ -88,33 +88,33 @@ If this is the case, you will need to connect using an L2 wallet that has some E ## Step 4: Finishing the transfer on L2 -At this point, your subgraph and GRT have been received on Arbitrum, but the subgraph is not published yet. You will need to connect using the L2 wallet that you chose as the receiving wallet, switch your wallet network to Arbitrum, and click "Publish Subgraph." +At this point, your Subgraph and GRT have been received on Arbitrum, but the Subgraph is not published yet. You will need to connect using the L2 wallet that you chose as the receiving wallet, switch your wallet network to Arbitrum, and click "Publish Subgraph." -![Publish the subgraph](/img/publishSubgraphL2TransferTools.png) +![Publish the Subgraph](/img/publishSubgraphL2TransferTools.png) -![Wait for the subgraph to be published](/img/waitForSubgraphToPublishL2TransferTools.png) +![Wait for the Subgraph to be published](/img/waitForSubgraphToPublishL2TransferTools.png) -This will publish the subgraph so that Indexers that are operating on Arbitrum can start serving it. It will also mint curation signal using the GRT that were transferred from L1. +This will publish the Subgraph so that Indexers that are operating on Arbitrum can start serving it. It will also mint curation signal using the GRT that were transferred from L1. ## Step 5: Updating the query URL -Your subgraph has been successfully transferred to Arbitrum! To query the subgraph, the new URL will be : +Your Subgraph has been successfully transferred to Arbitrum! To query the Subgraph, the new URL will be : `https://arbitrum-gateway.thegraph.com/api/[api-key]/subgraphs/id/[l2-subgraph-id]` -Note that the subgraph ID on Arbitrum will be a different than the one you had on mainnet, but you can always find it on Explorer or Studio. As mentioned above (see "Understanding what happens with signal, your L1 subgraph and query URLs") the old L1 URL will be supported for a short while, but you should switch your queries to the new address as soon as the subgraph has been synced on L2. +Note that the Subgraph ID on Arbitrum will be a different than the one you had on mainnet, but you can always find it on Explorer or Studio. As mentioned above (see "Understanding what happens with signal, your L1 Subgraph and query URLs") the old L1 URL will be supported for a short while, but you should switch your queries to the new address as soon as the Subgraph has been synced on L2. ## How to transfer your curation to Arbitrum (L2) -## Understanding what happens to curation on subgraph transfers to L2 +## Understanding what happens to curation on Subgraph transfers to L2 -When the owner of a subgraph transfers a subgraph to Arbitrum, all of the subgraph's signal is converted to GRT at the same time. This applies to "auto-migrated" signal, i.e. signal that is not specific to a subgraph version or deployment but that follows the latest version of a subgraph. +When the owner of a Subgraph transfers a Subgraph to Arbitrum, all of the Subgraph's signal is converted to GRT at the same time. This applies to "auto-migrated" signal, i.e. signal that is not specific to a Subgraph version or deployment but that follows the latest version of a Subgraph. -This conversion from signal to GRT is the same as what would happen if the subgraph owner deprecated the subgraph in L1. When the subgraph is deprecated or transferred, all curation signal is "burned" simultaneously (using the curation bonding curve) and the resulting GRT is held by the GNS smart contract (that is the contract that handles subgraph upgrades and auto-migrated signal). Each Curator on that subgraph therefore has a claim to that GRT proportional to the amount of shares they had for the subgraph. +This conversion from signal to GRT is the same as what would happen if the Subgraph owner deprecated the Subgraph in L1. When the Subgraph is deprecated or transferred, all curation signal is "burned" simultaneously (using the curation bonding curve) and the resulting GRT is held by the GNS smart contract (that is the contract that handles Subgraph upgrades and auto-migrated signal). Each Curator on that Subgraph therefore has a claim to that GRT proportional to the amount of shares they had for the Subgraph. -A fraction of these GRT corresponding to the subgraph owner is sent to L2 together with the subgraph. +A fraction of these GRT corresponding to the Subgraph owner is sent to L2 together with the Subgraph. -At this point, the curated GRT will not accrue any more query fees, so Curators can choose to withdraw their GRT or transfer it to the same subgraph on L2, where it can be used to mint new curation signal. There is no rush to do this as the GRT can be help indefinitely and everybody gets an amount proportional to their shares, irrespective of when they do it. +At this point, the curated GRT will not accrue any more query fees, so Curators can choose to withdraw their GRT or transfer it to the same Subgraph on L2, where it can be used to mint new curation signal. There is no rush to do this as the GRT can be help indefinitely and everybody gets an amount proportional to their shares, irrespective of when they do it. ## Choosing your L2 wallet @@ -130,9 +130,9 @@ If you're using a smart contract wallet, like a multisig (e.g. a Safe), then cho Before starting the transfer, you must decide which address will own the curation on L2 (see "Choosing your L2 wallet" above), and it is recommended having some ETH for gas already bridged on Arbitrum in case you need to retry the execution of the message on L2. You can buy ETH on some exchanges and withdraw it directly to Arbitrum, or you can use the Arbitrum bridge to send ETH from a mainnet wallet to L2: [bridge.arbitrum.io](http://bridge.arbitrum.io) - since gas fees on Arbitrum are so low, you should only need a small amount, e.g. 0.01 ETH will probably be more than enough. -If a subgraph that you curate to has been transferred to L2, you will see a message on Explorer telling you that you're curating to a transferred subgraph. +If a Subgraph that you curate to has been transferred to L2, you will see a message on Explorer telling you that you're curating to a transferred Subgraph. -When looking at the subgraph page, you can choose to withdraw or transfer the curation. Clicking on "Transfer Signal to Arbitrum" will open the transfer tool. +When looking at the Subgraph page, you can choose to withdraw or transfer the curation. Clicking on "Transfer Signal to Arbitrum" will open the transfer tool. ![Transfer signal](/img/transferSignalL2TransferTools.png) @@ -162,4 +162,4 @@ If this is the case, you will need to connect using an L2 wallet that has some E ## Withdrawing your curation on L1 -If you prefer not to send your GRT to L2, or you'd rather bridge the GRT manually, you can withdraw your curated GRT on L1. On the banner on the subgraph page, choose "Withdraw Signal" and confirm the transaction; the GRT will be sent to your Curator address. +If you prefer not to send your GRT to L2, or you'd rather bridge the GRT manually, you can withdraw your curated GRT on L1. On the banner on the Subgraph page, choose "Withdraw Signal" and confirm the transaction; the GRT will be sent to your Curator address. From f5236fd0b8de70d076bc6c20705c8f40311bd166 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:13:54 -0500 Subject: [PATCH 0327/1789] New translations l2-transfer-tools-guide.mdx (Portuguese) --- .../arbitrum/l2-transfer-tools-guide.mdx | 76 +++++++++---------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/website/src/pages/pt/archived/arbitrum/l2-transfer-tools-guide.mdx b/website/src/pages/pt/archived/arbitrum/l2-transfer-tools-guide.mdx index a6a744aeeb19..320c947532a4 100644 --- a/website/src/pages/pt/archived/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/src/pages/pt/archived/arbitrum/l2-transfer-tools-guide.mdx @@ -6,53 +6,53 @@ O The Graph facilitou muito o processo de se mudar para a L2 no Arbitrum One. Pa Some frequent questions about these tools are answered in the [L2 Transfer Tools FAQ](/archived/arbitrum/l2-transfer-tools-faq/). The FAQs contain in-depth explanations of how to use the tools, how they work, and things to keep in mind when using them. -## Como transferir o seu subgraph ao Arbitrum (L2) +## How to transfer your Subgraph to Arbitrum (L2) -## Benefícios de transferir os seus subgraphs +## Benefits of transferring your Subgraphs A comunidade e os programadores centrais do The Graph andaram [preparando](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) as suas mudanças ao Arbitrum ao longo do último ano. O Arbitrum, uma blockchain layer 2, ou "L2", herda a segurança do Ethereum, mas providencia taxas de gas muito menores. -Ao publicar ou atualizar o seu subgraph na Graph Network, você interaje com contratos inteligentes no protocolo, e isto exige o pagamento de gas usando ETH. Ao mover os seus subgraphs ao Arbitrum, quaisquer atualizações futuras ao seu subgraph exigirão taxas de gas muito menores. As taxas menores, e o fato de que bonding curves de curadoria na L2 são planas, também facilitarão a curadoria no seu subgraph para outros Curadores, a fim de aumentar as recompensas para Indexadores no seu subgraph. Este ambiente de custo reduzido também barateia a indexação e o serviço de Indexadores no seu subgraph. As recompensas de indexação também aumentarão no Arbitrum e decairão na mainnet do Ethereum nos próximos meses, então mais e mais Indexadores transferirão o seu stake e preparando as suas operações na L2. +When you publish or upgrade your Subgraph to The Graph Network, you're interacting with smart contracts on the protocol and this requires paying for gas using ETH. By moving your Subgraphs to Arbitrum, any future updates to your Subgraph will require much lower gas fees. The lower fees, and the fact that curation bonding curves on L2 are flat, also make it easier for other Curators to curate on your Subgraph, increasing the rewards for Indexers on your Subgraph. This lower-cost environment also makes it cheaper for Indexers to index and serve your Subgraph. Indexing rewards will be increasing on Arbitrum and decreasing on Ethereum mainnet over the coming months, so more and more Indexers will be transferring their stake and setting up their operations on L2. -## Como entender o que acontece com o sinal, o seu subgraph na L1 e URLs de query +## Understanding what happens with signal, your L1 Subgraph and query URLs -Transferir um subgraph ao Arbitrum usa a bridge de GRT do Arbitrum, que por sua vez usa a bridge nativa do Arbitrum, para enviar o subgraph à L2. A "transferência" depreciará o subgraph na mainnet e enviará a informação para recriar o subgraph na L2 com o uso da bridge. Ele também incluirá o GRT sinalizado do dono do subgraph, que deve ser maior que zero para que a bridge aceite a transferência. +Transferring a Subgraph to Arbitrum uses the Arbitrum GRT bridge, which in turn uses the native Arbitrum bridge, to send the Subgraph to L2. The "transfer" will deprecate the Subgraph on mainnet and send the information to re-create the Subgraph on L2 using the bridge. It will also include the Subgraph owner's signaled GRT, which must be more than zero for the bridge to accept the transfer. -Ao escolher transferir o subgraph, isto converterá todo o sinal de curadoria do subgraph em GRT. Isto é equivalente à "depreciação" do subgraph na mainnet. O GRT correspondente à sua curadoria será enviado à L2 junto com o subgraph, onde ele será usado para mintar sinais em seu nome. +When you choose to transfer the Subgraph, this will convert all of the Subgraph's curation signal to GRT. This is equivalent to "deprecating" the Subgraph on mainnet. The GRT corresponding to your curation will be sent to L2 together with the Subgraph, where they will be used to mint signal on your behalf. -Outros Curadores podem escolher retirar a sua fração de GRT, ou também transferi-la à L2 para mintar sinais no mesmo subgraph. Se um dono de subgraph não transferir o seu subgraph à L2 e depreciá-lo manualmente através de uma chamada de contrato, os Curadores serão notificados, e poderão retirar a sua curadoria. +Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same Subgraph. If a Subgraph owner does not transfer their Subgraph to L2 and manually deprecates it via a contract call, then Curators will be notified and will be able to withdraw their curation. -Assim que o subgraph for transferido, como toda curadoria é convertida em GRT, Indexadores não receberão mais recompensas por indexar o subgraph. Porém, haverão Indexadores que 1) continuarão a servir subgraphs transferidos por 24 horas, e 2) começarão imediatamente a indexar o subgraph na L2. Como estes Indexadores já têm o subgraph indexado, não deve haver necessidade de esperar que o subgraph se sincronize, e será possível consultar o subgraph na L2 quase que imediatamente. +As soon as the Subgraph is transferred, since all curation is converted to GRT, Indexers will no longer receive rewards for indexing the Subgraph. However, there will be Indexers that will 1) keep serving transferred Subgraphs for 24 hours, and 2) immediately start indexing the Subgraph on L2. Since these Indexers already have the Subgraph indexed, there should be no need to wait for the Subgraph to sync, and it will be possible to query the L2 Subgraph almost immediately. -Queries no subgraph na L2 deverão ser feitas para uma URL diferente (or 'arbitrum-gateway.thegraph'), mas a URL na L1 continuará a trabalhar por no mínimo 48 horas. Após isto, o gateway na L1 encaminhará queries ao gateway na L2 (por um certo tempo), mas isto adicionará latência, então é recomendado trocar todas as suas queries para a nova URL o mais rápido possível. +Queries to the L2 Subgraph will need to be done to a different URL (on `arbitrum-gateway.thegraph.com`), but the L1 URL will continue working for at least 48 hours. After that, the L1 gateway will forward queries to the L2 gateway (for some time), but this will add latency so it is recommended to switch all your queries to the new URL as soon as possible. ## Como escolher a sua carteira na L2 -Ao publicar o seu subgraph na mainnet, você usou uma carteira conectada para criar o subgraph, e esta carteira é dona do NFT que representa este subgraph e lhe permite publicar atualizações. +When you published your Subgraph on mainnet, you used a connected wallet to create the Subgraph, and this wallet owns the NFT that represents this Subgraph and allows you to publish updates. -Ao transferir o subgraph ao Arbitrum, você pode escolher uma carteira diferente que será dona deste NFT de subgraph na L2. +When transferring the Subgraph to Arbitrum, you can choose a different wallet that will own this Subgraph NFT on L2. Se você usar uma carteira "regular" como o MetaMask (uma Conta de Titularidade Externa, ou EOA, por ex. uma carteira que não é um contrato inteligente), então isto é opcional, e é recomendado manter o mesmo endereço titular que o da L1. -Se você usar uma carteira de contrato inteligente, como uma multisig (por ex. uma Safe), então escolher um endereço de carteira diferente na L2 é obrigatório, pois as chances são altas desta conta só existir na mainnet, e você não poderá fazer transações no Arbitrum enquanto usar esta carteira. Se quiser continuar a usar uma carteira de contrato inteligente ou multisig, crie uma nova carteira no Arbitrum e use o seu endereço lá como o dono do seu subgraph na L2. +If you're using a smart contract wallet, like a multisig (e.g. a Safe), then choosing a different L2 wallet address is mandatory, as it is most likely that this account only exists on mainnet and you will not be able to make transactions on Arbitrum using this wallet. If you want to keep using a smart contract wallet or multisig, create a new wallet on Arbitrum and use its address as the L2 owner of your Subgraph. -**É muito importante usar um endereço de carteira que você controle, e possa fazer transações no Arbitrum. Caso contrário, o subgraph será perdido e não poderá ser recuperado.** +**It is very important to use a wallet address that you control, and that can make transactions on Arbitrum. Otherwise, the Subgraph will be lost and cannot be recovered.** ## Preparações para a transferência: bridging de ETH -Transferir o subgraph envolve o envio de uma transação através da bridge, e depois, a execução de outra transação no Arbitrum. A primeira transação usa ETH na mainnet, e inclui um pouco de ETH para pagar por gas quando a mensagem for recebida na L2. Porém, se este gas for insuficiente, você deverá tentar executar a transação novamente e pagar o gas diretamente na L2 (este é o terceiro passo: "Confirmação da transação" abaixo). Este passo **deve ser executado até 7 dias depois do início da transação**. Além disto, a segunda transação ("4º passo: Finalização da transferência na L2") será feita diretamente no Arbitrum. Por estas razões, você precisará de um pouco de ETH em uma carteira Arbitrum. Se usar uma multisig ou uma conta de contrato inteligente, o ETH deverá estar na carteira regular (EOA) que você usar para executar as transações, e não na própria carteira multisig. +Transferring the Subgraph involves sending a transaction through the bridge, and then executing another transaction on Arbitrum. The first transaction uses ETH on mainnet, and includes some ETH to pay for gas when the message is received on L2. However, if this gas is insufficient, you will have to retry the transaction and pay for the gas directly on L2 (this is "Step 3: Confirming the transfer" below). This step **must be executed within 7 days of starting the transfer**. Moreover, the second transaction ("Step 4: Finishing the transfer on L2") will be done directly on Arbitrum. For these reasons, you will need some ETH on an Arbitrum wallet. If you're using a multisig or smart contract account, the ETH will need to be in the regular (EOA) wallet that you are using to execute the transactions, not on the multisig wallet itself. Você pode comprar ETH em algumas exchanges e retirá-la diretamente no Arbitrum, ou você pode usar a bridge do Arbitrum para enviar ETH de uma carteira na mainnet para a L2: [bridge.arbitrum.io](http://bridge.arbitrum.io). Como as taxas de gas no Arbitrum são menores, você só deve precisar de uma quantidade pequena. É recomendado começar em um limite baixo (por ex. 0.01 ETH) para que a sua transação seja aprovada. -## Como encontrar a Ferramenta de Transferência de Subgraphs +## Finding the Subgraph Transfer Tool -A Ferramenta de Transferência para L2 pode ser encontrada ao olhar a página do seu subgraph no Subgraph Studio: +You can find the L2 Transfer Tool when you're looking at your Subgraph's page on Subgraph Studio: ![ferramenta de transferência](/img/L2-transfer-tool1.png) -Ela também está disponível no Explorer se você se conectar com a carteira dona de um subgraph, e na página daquele subgraph no Explorer: +It is also available on Explorer if you're connected with the wallet that owns a Subgraph and on that Subgraph's page on Explorer: ![Transferência para L2](/img/transferToL2.png) @@ -60,19 +60,19 @@ Clicar no botão Transfer to L2 (Transferir para L2) abrirá a ferramenta de tra ## 1º Passo: Como começar a transferência -Antes de começar a transferência, decida qual endereço será dono do subgraph na L2 (ver "Como escolher a sua carteira na L2" acima), e é altamente recomendado ter um pouco de ETH para o gas já em bridge no Arbitrum (ver "Preparações para a transferência: bridging de ETH" acima). +Before starting the transfer, you must decide which address will own the Subgraph on L2 (see "Choosing your L2 wallet" above), and it is strongly recommend having some ETH for gas already bridged on Arbitrum (see "Preparing for the transfer: bridging some ETH" above). -Note também que transferir o subgraph exige ter uma quantidade de sinal no subgraph maior que zero, com a mesma conta dona do subgraph; se você não tiver sinalizado no subgraph, você deverá adicionar um pouco de curadoria (uma adição pequena, como 1 GRT, seria o suficiente). +Also please note transferring the Subgraph requires having a nonzero amount of signal on the Subgraph with the same account that owns the Subgraph; if you haven't signaled on the Subgraph you will have to add a bit of curation (adding a small amount like 1 GRT would suffice). -Após abrir a Ferramenta de Transferências, você poderá colocar o endereço da carteira na L2 no campo "Receiving wallet address" (endereço da carteira destinatária) - **certifique-se que inseriu o endereço correto**. Clicar em Transfer Subgraph (transferir subgraph) resultará em um pedido para executar a transação na sua carteira (note que um valor em ETH é incluído para pagar pelo gas na L2); isto iniciará a transferência e depreciará o seu subgraph na L1 (veja "Como entender o que acontece com o sinal, o seu subgraph na L1 e URLs de query" acima para mais detalhes sobre o que acontece nos bastidores). +After opening the Transfer Tool, you will be able to input the L2 wallet address into the "Receiving wallet address" field - **make sure you've entered the correct address here**. Clicking on Transfer Subgraph will prompt you to execute the transaction on your wallet (note some ETH value is included to pay for L2 gas); this will initiate the transfer and deprecate your L1 Subgraph (see "Understanding what happens with signal, your L1 Subgraph and query URLs" above for more details on what goes on behind the scenes). -Ao executar este passo, **garanta que executará o 3º passo em menos de 7 dias, ou o subgraph e o seu GRT de sinalização serão perdidos.** Isto se deve à maneira de como as mensagens L1-L2 funcionam no Arbitrum: mensagens enviadas através da bridge são "bilhetes de tentativas extras" que devem ser executadas dentro de 7 dias, e a execução inicial pode exigir outra tentativa se houver um surto no preço de gas no Arbitrum. +If you execute this step, **make sure you proceed until completing step 3 in less than 7 days, or the Subgraph and your signal GRT will be lost.** This is due to how L1-L2 messaging works on Arbitrum: messages that are sent through the bridge are "retry-able tickets" that must be executed within 7 days, and the initial execution might need a retry if there are spikes in the gas price on Arbitrum. ![Comece a transferência à L2](/img/startTransferL2.png) -## 2º Passo: A espera do caminho do subgraph até a L2 +## Step 2: Waiting for the Subgraph to get to L2 -Após iniciar a transferência, a mensagem que envia o seu subgraph da L1 para a L2 deve propagar pela bridge do Arbitrum. Isto leva cerca de 20 minutos (a bridge espera que o bloco da mainnet que contém a transação esteja "seguro" de reorganizações potenciais da chain). +After you start the transfer, the message that sends your L1 Subgraph to L2 must propagate through the Arbitrum bridge. This takes approximately 20 minutes (the bridge waits for the mainnet block containing the transaction to be "safe" from potential chain reorgs). Quando esta espera acabar, o Arbitrum tentará executar a transferência automaticamente nos contratos na L2. @@ -80,7 +80,7 @@ Quando esta espera acabar, o Arbitrum tentará executar a transferência automat ## 3º Passo: Como confirmar a transferência -Geralmente, este passo será executado automaticamente, já que o gas na L2 incluído no primeiro passo deverá ser suficiente para executar a transação que recebe o subgraph nos contratos do Arbitrum. Porém, em alguns casos, é possível que um surto nos preços de gas do Arbitrum faça com que esta execução automática falhe. Neste caso, o "bilhete" que envia o seu subgraph à L2 estará pendente e exigirá outra tentativa dentro de 7 dias. +In most cases, this step will auto-execute as the L2 gas included in step 1 should be sufficient to execute the transaction that receives the Subgraph on the Arbitrum contracts. In some cases, however, it is possible that a spike in gas prices on Arbitrum causes this auto-execution to fail. In this case, the "ticket" that sends your Subgraph to L2 will be pending and require a retry within 7 days. Se este for o caso, você deverá se conectar com uma carteira L2 que tenha um pouco de ETH no Arbitrum, trocar a rede da sua carteira para Arbitrum, e clicar em "Confirmar Transferência" para tentar a transação novamente. @@ -88,33 +88,33 @@ Se este for o caso, você deverá se conectar com uma carteira L2 que tenha um p ## 4º Passo: A finalização da transferência à L2 -Até aqui, o seu subgraph e GRT já foram recebidos no Arbitrum, mas o subgraph ainda não foi publicado. Você deverá se conectar com a carteira L2 que escolheu como a carteira destinatária, trocar a rede da carteira para Arbitrum, e clicar em "Publish Subgraph" (Publicar Subgraph). +At this point, your Subgraph and GRT have been received on Arbitrum, but the Subgraph is not published yet. You will need to connect using the L2 wallet that you chose as the receiving wallet, switch your wallet network to Arbitrum, and click "Publish Subgraph." -![Publicação do subgraph](/img/publishSubgraphL2TransferTools.png) +![Publish the Subgraph](/img/publishSubgraphL2TransferTools.png) -![Espera para a publicação do subgraph](/img/waitForSubgraphToPublishL2TransferTools.png) +![Wait for the Subgraph to be published](/img/waitForSubgraphToPublishL2TransferTools.png) -Isto publicará o subgraph de forma que Indexadores operantes no Arbitrum comecem a servi-lo. Ele também mintará sinais de curadoria com o GRT que foi transferido da L1. +This will publish the Subgraph so that Indexers that are operating on Arbitrum can start serving it. It will also mint curation signal using the GRT that were transferred from L1. ## 5º passo: Atualização da URL de query -Parabéns, o seu subgraph foi transferido ao Arbitrum com êxito! Para consultar o subgraph, a nova URL será: +Your Subgraph has been successfully transferred to Arbitrum! To query the Subgraph, the new URL will be : `https://arbitrum-gateway.thegraph.com/api/[api-key]/subgraphs/id/[l2-subgraph-id]` -Note que a ID do subgraph no Arbitrum será diferente daquela que você tinha na mainnet, mas você pode sempre encontrá-la no Explorer ou no Studio. Como mencionado acima (ver "Como entender o que acontece com o sinal, o seu subgraph na L1 e URLs de query"), a URL antiga na L1 será apoiada por um período curto, mas você deve trocar as suas queries para o novo endereço assim que o subgraph for sincronizado na L2. +Note that the Subgraph ID on Arbitrum will be a different than the one you had on mainnet, but you can always find it on Explorer or Studio. As mentioned above (see "Understanding what happens with signal, your L1 Subgraph and query URLs") the old L1 URL will be supported for a short while, but you should switch your queries to the new address as soon as the Subgraph has been synced on L2. ## Como transferir a sua curadoria ao Arbitrum (L2) -## Como entender o que acontece com a curadoria ao transferir um subgraph à L2 +## Understanding what happens to curation on Subgraph transfers to L2 -Quando o dono de um subgraph transfere um subgraph ao Arbitrum, todo o sinal do subgraph é convertido em GRT ao mesmo tempo. Isto se aplica a sinais "migrados automaticamente", por ex. sinais que não forem específicos a uma versão de um subgraph ou publicação, mas que segue a versão mais recente de um subgraph. +When the owner of a Subgraph transfers a Subgraph to Arbitrum, all of the Subgraph's signal is converted to GRT at the same time. This applies to "auto-migrated" signal, i.e. signal that is not specific to a Subgraph version or deployment but that follows the latest version of a Subgraph. -Esta conversão do sinal ao GRT é a mesma que aconteceria se o dono de um subgraph depreciasse o subgraph na L1. Quando o subgraph é depreciado ou transferido, todo o sinal de curadoria é "queimado" em simultâneo (com o uso da bonding curve de curadoria) e o GRT resultante fica em posse do contrato inteligente GNS (sendo o contrato que cuida de atualizações de subgraph e sinais migrados automaticamente). Cada Curador naquele subgraph então tem um direito àquele GRT, proporcional à quantidade de ações que tinham no subgraph. +This conversion from signal to GRT is the same as what would happen if the Subgraph owner deprecated the Subgraph in L1. When the Subgraph is deprecated or transferred, all curation signal is "burned" simultaneously (using the curation bonding curve) and the resulting GRT is held by the GNS smart contract (that is the contract that handles Subgraph upgrades and auto-migrated signal). Each Curator on that Subgraph therefore has a claim to that GRT proportional to the amount of shares they had for the Subgraph. -Uma fração deste GRT correspondente ao dono do subgraph é enviado à L2 junto com o subgraph. +A fraction of these GRT corresponding to the Subgraph owner is sent to L2 together with the Subgraph. -Neste ponto, o GRT curado não acumulará mais taxas de query, então Curadores podem escolher sacar o seu GRT ou transferi-lo ao mesmo subgraph na L2, onde ele pode ser usado para mintar novos sinais de curadoria. Não há pressa para fazer isto, já que o GRT pode ser possuído por tempo indeterminado, e todos conseguem uma quantidade proporcional às suas ações, irrespectivo de quando a fizerem. +At this point, the curated GRT will not accrue any more query fees, so Curators can choose to withdraw their GRT or transfer it to the same Subgraph on L2, where it can be used to mint new curation signal. There is no rush to do this as the GRT can be help indefinitely and everybody gets an amount proportional to their shares, irrespective of when they do it. ## Como escolher a sua carteira na L2 @@ -130,9 +130,9 @@ Se você usar uma carteira de contrato inteligente, como uma multisig (por ex. u Antes de iniciar a transferência, você deve decidir qual endereço será titular da curadoria na L2 (ver "Como escolher a sua carteira na L2" acima), e é recomendado ter um pouco de ETH para o gas já em bridge no Arbitrum, caso seja necessário tentar a execução da mensagem na L2 novamente. Você pode comprar ETH em algumas exchanges e retirá-lo diretamente no Arbitrum, ou você pode usar a bridge do Arbitrum para enviar ETH de uma carteira na mainnet à L2: [bridge.arbitrum.io](http://bridge.arbitrum.io) - como as taxas de gas no Arbitrum são menores, você só deve precisar de uma quantidade pequena; por ex. 0.01 ETH deve ser mais que o suficiente. -Se um subgraph para o qual você cura já foi transferido para a L2, você verá uma mensagem no Explorer lhe dizendo que você curará para um subgraph transferido. +If a Subgraph that you curate to has been transferred to L2, you will see a message on Explorer telling you that you're curating to a transferred Subgraph. -Ao olhar a página do subgraph, você pode escolher retirar ou transferir a curadoria. Clicar em "Transfer Signal to Arbitrum" (transferir sinal ao Arbitrum) abrirá a ferramenta de transferência. +When looking at the Subgraph page, you can choose to withdraw or transfer the curation. Clicking on "Transfer Signal to Arbitrum" will open the transfer tool. ![Transferir sinall](/img/transferSignalL2TransferTools.png) @@ -162,4 +162,4 @@ Se este for o caso, você deverá se conectar com uma carteira L2 que tenha um p ## Como retirar a sua curadoria na L1 -Se preferir não enviar o seu GRT à L2, ou preferir fazer um bridge do GRT de forma manual, você pode retirar o seu GRT curado na L1. No banner da página do subgraph, escolha "Withdraw Signal" (Retirar Sinal) e confirme a transação; o GRT será enviado ao seu endereço de Curador. +If you prefer not to send your GRT to L2, or you'd rather bridge the GRT manually, you can withdraw your curated GRT on L1. On the banner on the Subgraph page, choose "Withdraw Signal" and confirm the transaction; the GRT will be sent to your Curator address. From 40ea219b74de8ee9ec92e32b52dd2408000504ea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:13:55 -0500 Subject: [PATCH 0328/1789] New translations l2-transfer-tools-guide.mdx (Russian) --- .../arbitrum/l2-transfer-tools-guide.mdx | 76 +++++++++---------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/website/src/pages/ru/archived/arbitrum/l2-transfer-tools-guide.mdx b/website/src/pages/ru/archived/arbitrum/l2-transfer-tools-guide.mdx index 1dc689d934d3..b3509a9c7f8d 100644 --- a/website/src/pages/ru/archived/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/src/pages/ru/archived/arbitrum/l2-transfer-tools-guide.mdx @@ -6,53 +6,53 @@ The Graph упростил переход на L2 в Arbitrum One. Для каж Some frequent questions about these tools are answered in the [L2 Transfer Tools FAQ](/archived/arbitrum/l2-transfer-tools-faq/). The FAQs contain in-depth explanations of how to use the tools, how they work, and things to keep in mind when using them. -## Как перенести свой субграф в Arbitrum (L2) +## How to transfer your Subgraph to Arbitrum (L2) -## Преимущества переноса Ваших субграфов +## Benefits of transferring your Subgraphs Сообщество и разработчики ядра The Graph [готовились](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) к переходу на Arbitrum в течение прошлого года. Arbitrum, блокчейн уровня 2 или «L2», наследует безопасность от Ethereum, но обеспечивает значительно более низкую комиссию сети. -Когда Вы публикуете или обновляете свой субграф до The Graph Network, Вы взаимодействуете со смарт-контрактами по протоколу, и для этого требуется проплачивать комиссию сети с помощью ETH. После перемещения Ваших субграфов в Arbitrum, любые будущие обновления Вашего субграфа потребуют гораздо более низких сборов за комиссию сети. Более низкие сборы и тот факт, что кривые связи курирования на L2 ровные, также облегчают другим кураторам курирование Вашего субграфа, увеличивая вознаграждение для индексаторов в Вашем субграфе. Эта менее затратная среда также упрощает индексацию и обслуживание Вашего субграфа. В ближайшие месяцы вознаграждения за индексацию в Arbitrum будут увеличиваться, а в основной сети Ethereum уменьшаться, поэтому все больше и больше индексаторов будут переводить свои стейки и настраивать операции на L2. +When you publish or upgrade your Subgraph to The Graph Network, you're interacting with smart contracts on the protocol and this requires paying for gas using ETH. By moving your Subgraphs to Arbitrum, any future updates to your Subgraph will require much lower gas fees. The lower fees, and the fact that curation bonding curves on L2 are flat, also make it easier for other Curators to curate on your Subgraph, increasing the rewards for Indexers on your Subgraph. This lower-cost environment also makes it cheaper for Indexers to index and serve your Subgraph. Indexing rewards will be increasing on Arbitrum and decreasing on Ethereum mainnet over the coming months, so more and more Indexers will be transferring their stake and setting up their operations on L2. -## Понимание того, что происходит с сигналом, Вашим субграфом L1 и URL-адресами запроса +## Understanding what happens with signal, your L1 Subgraph and query URLs -Для передачи субграфа в Arbitrum используется мост Arbitrum GRT, который, в свою очередь, использует собственный мост Arbitrum для отправки субграфа на L2. «Перенос» отменяет поддержку субграфа в основной сети и отправляет информацию для повторного создания субграфа на L2 с использованием моста. Он также будет включать сигнал GRT владельца субграфа, который должен быть больше нуля, чтобы мост смог принять передачу. +Transferring a Subgraph to Arbitrum uses the Arbitrum GRT bridge, which in turn uses the native Arbitrum bridge, to send the Subgraph to L2. The "transfer" will deprecate the Subgraph on mainnet and send the information to re-create the Subgraph on L2 using the bridge. It will also include the Subgraph owner's signaled GRT, which must be more than zero for the bridge to accept the transfer. -Когда Вы решите передать субграф, весь сигнал курирования подграфа будет преобразован в GRT. Это эквивалентно «прекращению поддержки» субграфа в основной сети. GRT, соответствующие Вашему кураторству, будут отправлен на L2 вместе с субграфом, где они будут использоваться для производства сигнала от Вашего имени. +When you choose to transfer the Subgraph, this will convert all of the Subgraph's curation signal to GRT. This is equivalent to "deprecating" the Subgraph on mainnet. The GRT corresponding to your curation will be sent to L2 together with the Subgraph, where they will be used to mint signal on your behalf. -Другие Кураторы могут выбрать, вывести ли свою долю GRT или также перевести ее в L2 для производства сигнала на том же субграфе. Если владелец субграфа не перенесет свой субграф в L2 и вручную аннулирует его с помощью вызова контракта, то Кураторы будут уведомлены и смогут отозвать свое курирование. +Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same Subgraph. If a Subgraph owner does not transfer their Subgraph to L2 and manually deprecates it via a contract call, then Curators will be notified and will be able to withdraw their curation. -Индексаторы больше не будут получать вознаграждение за индексирование субграфа, как только субграф будет перенесён, так как всё курирование конвертируется в GRT. Однако будут индексаторы, которые 1) продолжат обслуживать переданные субграфы в течение 24 часов и 2) немедленно начнут индексировать субграф на L2. Поскольку эти индексаторы уже проиндексировали субграф, не нужно будет ждать синхронизации субграфа, и можно будет запросить субграф L2 практически сразу. +As soon as the Subgraph is transferred, since all curation is converted to GRT, Indexers will no longer receive rewards for indexing the Subgraph. However, there will be Indexers that will 1) keep serving transferred Subgraphs for 24 hours, and 2) immediately start indexing the Subgraph on L2. Since these Indexers already have the Subgraph indexed, there should be no need to wait for the Subgraph to sync, and it will be possible to query the L2 Subgraph almost immediately. -Запросы к субграфу L2 необходимо будет выполнять по другому URL-адресу (на `arbitrum-gateway.thegraph.com`), но URL-адрес L1 будет продолжать работать в течение как минимум 48 часов. После этого шлюз L1 будет перенаправлять запросы на шлюз L2 (на некоторое время), но это увеличит задержку, поэтому рекомендуется как можно скорее переключить все Ваши запросы на новый URL-адрес. +Queries to the L2 Subgraph will need to be done to a different URL (on `arbitrum-gateway.thegraph.com`), but the L1 URL will continue working for at least 48 hours. After that, the L1 gateway will forward queries to the L2 gateway (for some time), but this will add latency so it is recommended to switch all your queries to the new URL as soon as possible. ## Выбор Вашего кошелька L2 -Когда Вы опубликовали свой субграф в основной сети, Вы использовали подключенный кошелек для его создания, и этот кошелек обладает NFT, который представляет этот субграф и позволяет Вам публиковать обновления. +When you published your Subgraph on mainnet, you used a connected wallet to create the Subgraph, and this wallet owns the NFT that represents this Subgraph and allows you to publish updates. -При переносе субграфа в Arbitrum Вы можете выбрать другой кошелек, которому будет принадлежать этот NFT субграфа на L2. +When transferring the Subgraph to Arbitrum, you can choose a different wallet that will own this Subgraph NFT on L2. Если Вы используете «обычный» кошелек, такой как MetaMask (Externally Owned Account или EOA, то есть кошелек, который не является смарт-контрактом), тогда это необязательно, и рекомендуется сохранить тот же адрес владельца, что и в L1. -Если Вы используете смарт-контрактный кошелек, такой как кошелёк с мультиподписью (например, Safe), то выбор другого адреса кошелька L2 является обязательным, так как, скорее всего, эта учетная запись существует только в основной сети, и Вы не сможете совершать транзакции в сети Arbitrum с помощью этого кошелька. Если Вы хотите продолжать использовать кошелек смарт-контрактов или мультиподпись, создайте новый кошелек на Arbitrum и используйте его адрес в качестве владельца L2 Вашего субграфа. +If you're using a smart contract wallet, like a multisig (e.g. a Safe), then choosing a different L2 wallet address is mandatory, as it is most likely that this account only exists on mainnet and you will not be able to make transactions on Arbitrum using this wallet. If you want to keep using a smart contract wallet or multisig, create a new wallet on Arbitrum and use its address as the L2 owner of your Subgraph. -**Очень важно использовать адрес кошелька, которым Вы управляете и с которого можно совершать транзакции в Arbitrum. В противном случае субграф будет потерян и его невозможно будет восстановить.** +**It is very important to use a wallet address that you control, and that can make transactions on Arbitrum. Otherwise, the Subgraph will be lost and cannot be recovered.** ## Подготовка к переносу: использование моста с некоторым количеством ETH -Передача субграфа включает в себя отправку транзакции через мост, а затем выполнение другой транзакции в Arbitrum. Первая транзакция использует ETH в основной сети и включает некоторое количество ETH для оплаты комиссии сети при получении сообщения на уровне L2. Однако, если этого количества будет недостаточно, Вам придется повторить транзакцию и оплатить комиссию сети непосредственно на L2 (это «Шаг 3: Подтверждение перевода» ниже). Этот шаг **должен быть выполнен в течение 7 дней после начала переноса**. Более того, вторая транзакция («Шаг 4: Завершение перевода на L2») будет выполнена непосредственно на Arbitrum. В связи с этим Вам понадобится некоторое количество ETH на кошельке Arbitrum. Если Вы используете учетную запись с мультиподписью или смарт-контрактом, ETH должен находиться в обычном (EOA) кошельке, который Вы используете для выполнения транзакций, а не в самом кошельке с мультиподписью. +Transferring the Subgraph involves sending a transaction through the bridge, and then executing another transaction on Arbitrum. The first transaction uses ETH on mainnet, and includes some ETH to pay for gas when the message is received on L2. However, if this gas is insufficient, you will have to retry the transaction and pay for the gas directly on L2 (this is "Step 3: Confirming the transfer" below). This step **must be executed within 7 days of starting the transfer**. Moreover, the second transaction ("Step 4: Finishing the transfer on L2") will be done directly on Arbitrum. For these reasons, you will need some ETH on an Arbitrum wallet. If you're using a multisig or smart contract account, the ETH will need to be in the regular (EOA) wallet that you are using to execute the transactions, not on the multisig wallet itself. Вы можете приобрести ETH на некоторых биржах и вывести его напрямую на Arbitrum, или Вы можете использовать мост Arbitrum для отправки ETH из кошелька основной сети на L2: [bridge.arbitrum.io](http://bridge.arbitrum.io). Поскольку плата за комиссию сети в Arbitrum ниже, Вам понадобится лишь небольшая сумма. Рекомендуется начинать с низкого порога (например, 0,01 ETH), чтобы Ваша транзакция была одобрена. -## Поиск инструмента переноса субграфа +## Finding the Subgraph Transfer Tool -Вы можете найти инструмент переноса L2, когда просматриваете страницу своего субграфа в Subgraph Studio: +You can find the L2 Transfer Tool when you're looking at your Subgraph's page on Subgraph Studio: ![инструмент переноса](/img/L2-transfer-tool1.png) -Он также доступен в Explorer, если Вы подключены к кошельку, которому принадлежит субграф, и на странице этого субграфа в Explorer: +It is also available on Explorer if you're connected with the wallet that owns a Subgraph and on that Subgraph's page on Explorer: ![Перенос на L2](/img/transferToL2.png) @@ -60,19 +60,19 @@ Some frequent questions about these tools are answered in the [L2 Transfer Tools ## Шаг 1: Запуск перевода -Прежде чем начать перенос, Вы должны решить, какому адресу будет принадлежать субграф на L2 (см. «Выбор кошелька L2» выше), также настоятельно рекомендуется иметь некоторое количество ETH для оплаты комиссии сети за соединение мостом с Arbitrum (см. «Подготовка к переносу: использование моста с некоторым количеством ETH" выше). +Before starting the transfer, you must decide which address will own the Subgraph on L2 (see "Choosing your L2 wallet" above), and it is strongly recommend having some ETH for gas already bridged on Arbitrum (see "Preparing for the transfer: bridging some ETH" above). -Также обратите внимание, что для передачи субграфа требуется наличие ненулевого количества сигнала в субграфе с той же учетной записью, которая владеет субграфом; если Вы не просигнализировали на субграфе, Вам придется добавить немного монет для курирования (достаточно добавить небольшую сумму, например 1 GRT). +Also please note transferring the Subgraph requires having a nonzero amount of signal on the Subgraph with the same account that owns the Subgraph; if you haven't signaled on the Subgraph you will have to add a bit of curation (adding a small amount like 1 GRT would suffice). -После открытия инструмента переноса Вы сможете ввести адрес кошелька L2 в поле «Адрес получающего кошелька» — **убедитесь, что Вы ввели здесь правильный адрес**. После нажатия на «Перевод субграфа», Вам будет предложено выполнить транзакцию в Вашем кошельке (обратите внимание, что некоторое количество ETH включено для оплаты газа L2); это инициирует передачу и отменит Ваш субграф на L1 (см. «Понимание того, что происходит с сигналом, Вашим субграфом L1 и URL-адресами запроса» выше для получения более подробной информации о том, что происходит за кулисами). +After opening the Transfer Tool, you will be able to input the L2 wallet address into the "Receiving wallet address" field - **make sure you've entered the correct address here**. Clicking on Transfer Subgraph will prompt you to execute the transaction on your wallet (note some ETH value is included to pay for L2 gas); this will initiate the transfer and deprecate your L1 Subgraph (see "Understanding what happens with signal, your L1 Subgraph and query URLs" above for more details on what goes on behind the scenes). -Если Вы выполните этот шаг, ** убедитесь в том, что Вы завершили шаг 3 менее чем за 7 дней, иначе субграф и Ваш сигнал GRT будут утеряны.** Это связано с тем, как в Arbitrum работает обмен сообщениями L1-L2: сообщения, которые отправляются через мост, представляют собой «билеты с возможностью повторной попытки», которые должны быть выполнены в течение 7 дней, и для первоначального исполнения может потребоваться повторная попытка, если в Arbitrum будут скачки цен комиссии сети. +If you execute this step, **make sure you proceed until completing step 3 in less than 7 days, or the Subgraph and your signal GRT will be lost.** This is due to how L1-L2 messaging works on Arbitrum: messages that are sent through the bridge are "retry-able tickets" that must be executed within 7 days, and the initial execution might need a retry if there are spikes in the gas price on Arbitrum. ![Запустите перенос на L2](/img/startTransferL2.png) -## Шаг 2: Ожидание перехода субграфа в L2 +## Step 2: Waiting for the Subgraph to get to L2 -После того, как Вы начнете передачу, сообщение, которое отправляет Ваш субграф с L1 в L2, должно пройти через мост Arbitrum. Это занимает примерно 20 минут (мост ожидает, пока блок основной сети, содержащий транзакцию, будет «защищен» от потенциальных реорганизаций чейна). +After you start the transfer, the message that sends your L1 Subgraph to L2 must propagate through the Arbitrum bridge. This takes approximately 20 minutes (the bridge waits for the mainnet block containing the transaction to be "safe" from potential chain reorgs). По истечении этого времени ожидания Arbitrum попытается автоматически выполнить перевод по контрактам L2. @@ -80,7 +80,7 @@ Some frequent questions about these tools are answered in the [L2 Transfer Tools ## Шаг 3: Подтверждение переноса -В большинстве случаев этот шаг будет выполняться автоматически, поскольку комиссии сети L2, включенной в шаг 1, должно быть достаточно для выполнения транзакции, которая получает субграф в контрактах Arbitrum. Однако в некоторых случаях возможно, что скачок цен комиссии сети на Arbitrum приведёт к сбою этого автоматического выполнения. В этом случае «тикет», который отправляет ваш субграф на L2, будет находиться в ожидании и потребует повторной попытки в течение 7 дней. +In most cases, this step will auto-execute as the L2 gas included in step 1 should be sufficient to execute the transaction that receives the Subgraph on the Arbitrum contracts. In some cases, however, it is possible that a spike in gas prices on Arbitrum causes this auto-execution to fail. In this case, the "ticket" that sends your Subgraph to L2 will be pending and require a retry within 7 days. В этом случае Вам нужно будет подключиться с помощью кошелька L2, в котором есть некоторое количество ETH в сети Arbitrum, переключить сеть Вашего кошелька на Arbitrum и нажать «Подтвердить перевод», чтобы повторить транзакцию. @@ -88,33 +88,33 @@ Some frequent questions about these tools are answered in the [L2 Transfer Tools ## Шаг 4: Завершение переноса в L2 -На данный момент Ваш субграф и GRT получены в Arbitrum, но субграф еще не опубликован. Вам нужно будет подключиться с помощью кошелька L2, который Вы выбрали в качестве принимающего кошелька, переключить сеть Вашего кошелька на Arbitrum и нажать «Опубликовать субграф». +At this point, your Subgraph and GRT have been received on Arbitrum, but the Subgraph is not published yet. You will need to connect using the L2 wallet that you chose as the receiving wallet, switch your wallet network to Arbitrum, and click "Publish Subgraph." -![Опубликуйте субграф](/img/publishSubgraphL2TransferTools.png) +![Publish the Subgraph](/img/publishSubgraphL2TransferTools.png) -![Дождитесь публикации субграфа](/img/waitForSubgraphToPublishL2TransferTools.png) +![Wait for the Subgraph to be published](/img/waitForSubgraphToPublishL2TransferTools.png) -Субграф будет опубликован, и индексаторы, работающие на Arbitrum, смогут начать его обслуживание. Он также будет создавать сигнал курирования, используя GRT, переданные из L1. +This will publish the Subgraph so that Indexers that are operating on Arbitrum can start serving it. It will also mint curation signal using the GRT that were transferred from L1. ## Шаг 5. Обновление URL-адреса запроса -Ваш субграф успешно перенесен в Arbitrum! Для запроса субграфа новый URL будет следующим: +Your Subgraph has been successfully transferred to Arbitrum! To query the Subgraph, the new URL will be : `https://arbitrum-gateway.thegraph.com/api/[api-key]/subgraphs/id/[l2-subgraph-id]` -Обратите внимание, что идентификатор субграфа в Arbitrum будет отличаться от того, который был у Вас в основной сети, но Вы всегда можете найти его в Explorer или Studio. Как упоминалось выше (см. «Понимание того, что происходит с сигналом, Вашим субграфом L1 и URL-адресами запроса»), старый URL-адрес L1 будет поддерживаться в течение некоторого времени, но Вы должны переключить свои запросы на новый адрес, как только субграф будет синхронизирован в L2. +Note that the Subgraph ID on Arbitrum will be a different than the one you had on mainnet, but you can always find it on Explorer or Studio. As mentioned above (see "Understanding what happens with signal, your L1 Subgraph and query URLs") the old L1 URL will be supported for a short while, but you should switch your queries to the new address as soon as the Subgraph has been synced on L2. ## Как перенести свой субграф в Arbitrum (L2) -## Понимание того, что происходит с курированием передачи субграфов на L2 +## Understanding what happens to curation on Subgraph transfers to L2 -Когда владелец субграфа передает субграф в Arbitrum, весь сигнал субграфа одновременно конвертируется в GRT. Это же относится и к "автоматически мигрировавшему" сигналу, т.е. сигналу, который не относится к конкретной версии или развертыванию субграфа, но который следует за последней версией субграфа. +When the owner of a Subgraph transfers a Subgraph to Arbitrum, all of the Subgraph's signal is converted to GRT at the same time. This applies to "auto-migrated" signal, i.e. signal that is not specific to a Subgraph version or deployment but that follows the latest version of a Subgraph. -Это преобразование сигнала в GRT аналогично тому, что произошло бы, если бы владелец субграфа объявил его устаревшим на L1. Когда субграф устаревает или переносится, в то же время «сжигается» весь сигнал курирования (с использованием кривой связывания курирования), а полученный GRT сохраняется в смарт-контракте GNS (то есть контракте, который обрабатывает обновления субграфа и сигнал автоматической миграции). Таким образом, каждый куратор этого субграфа имеет право на GRT, пропорционально количеству акций, которыми он владел в этом субграфе. +This conversion from signal to GRT is the same as what would happen if the Subgraph owner deprecated the Subgraph in L1. When the Subgraph is deprecated or transferred, all curation signal is "burned" simultaneously (using the curation bonding curve) and the resulting GRT is held by the GNS smart contract (that is the contract that handles Subgraph upgrades and auto-migrated signal). Each Curator on that Subgraph therefore has a claim to that GRT proportional to the amount of shares they had for the Subgraph. -Часть этих GRT, принадлежащая владельцу субграфа, отправляется на L2 вместе с субграфом. +A fraction of these GRT corresponding to the Subgraph owner is sent to L2 together with the Subgraph. -На этом этапе курируемый GRT больше не будет начислять комиссии за запросы, поэтому кураторы могут выбрать: вывести свой GRT или перевести его на тот же субграф на L2, где его можно использовать для создания нового сигнала курирования. Спешить с этим не стоит, так как GRT может храниться неограниченное время, и каждый получит сумму пропорционально своим долям, независимо от того, когда это будет сделано. +At this point, the curated GRT will not accrue any more query fees, so Curators can choose to withdraw their GRT or transfer it to the same Subgraph on L2, where it can be used to mint new curation signal. There is no rush to do this as the GRT can be help indefinitely and everybody gets an amount proportional to their shares, irrespective of when they do it. ## Выбор Вашего кошелька L2 @@ -130,9 +130,9 @@ Some frequent questions about these tools are answered in the [L2 Transfer Tools Прежде чем начать перенос, Вы должны решить, какой адрес будет владеть курированием на L2 (см. "Выбор кошелька L2" выше), также рекомендуется уже иметь на Arbitrum некоторое количество ETH для газа на случай, если Вам потребуется повторно выполнить отправку сообщения на L2. Вы можете купить ETH на любых биржах и вывести его напрямую на Arbitrum, или использовать мост Arbitrum для отправки ETH из кошелька основной сети на L2: [bridge.arbitrum.io](http://bridge.arbitrum.io) — поскольку комиссии за газ на Arbitrum очень низкие, Вам понадобится небольшая сумма, например, 0.01 ETH, этого, вероятно, будет более чем достаточно. -Если субграф, который Вы курируете, был перенесен на L2, Вы увидите сообщение в Explorer о том, что Вы курируете перенесённый субграф. +If a Subgraph that you curate to has been transferred to L2, you will see a message on Explorer telling you that you're curating to a transferred Subgraph. -При просмотре страницы субграфа Вы можете выбрать вывод или перенос курирования. Нажатие на кнопку "Перенести сигнал в Arbitrum", откроет инструмент переноса. +When looking at the Subgraph page, you can choose to withdraw or transfer the curation. Clicking on "Transfer Signal to Arbitrum" will open the transfer tool. ![Перенос сигнала](/img/transferSignalL2TransferTools.png) @@ -162,4 +162,4 @@ Some frequent questions about these tools are answered in the [L2 Transfer Tools ## Снятие Вашего курирования на L1 -Если Вы предпочитаете не отправлять свой GRT на L2 или хотите передать GRT вручную, Вы можете вывести свой курируемый GRT на L1. На баннере на странице субграфа выберите "Вывести сигнал" и подтвердите транзакцию; GRT будет отправлен на Ваш адрес Куратора. +If you prefer not to send your GRT to L2, or you'd rather bridge the GRT manually, you can withdraw your curated GRT on L1. On the banner on the Subgraph page, choose "Withdraw Signal" and confirm the transaction; the GRT will be sent to your Curator address. From b5c789ab571ee5d62a4406f343a5511ecf2355e3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:13:56 -0500 Subject: [PATCH 0329/1789] New translations l2-transfer-tools-guide.mdx (Swedish) --- .../arbitrum/l2-transfer-tools-guide.mdx | 76 +++++++++---------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/website/src/pages/sv/archived/arbitrum/l2-transfer-tools-guide.mdx b/website/src/pages/sv/archived/arbitrum/l2-transfer-tools-guide.mdx index 4dde699e5079..9cdb196e9c09 100644 --- a/website/src/pages/sv/archived/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/src/pages/sv/archived/arbitrum/l2-transfer-tools-guide.mdx @@ -6,53 +6,53 @@ The Graph har gjort det enkelt att flytta till L2 på Arbitrum One. För varje p Some frequent questions about these tools are answered in the [L2 Transfer Tools FAQ](/archived/arbitrum/l2-transfer-tools-faq/). The FAQs contain in-depth explanations of how to use the tools, how they work, and things to keep in mind when using them. -## Så här överför du din subgraf till Arbitrum (L2) +## How to transfer your Subgraph to Arbitrum (L2) -## Fördelar med att överföra dina subgrafer +## Benefits of transferring your Subgraphs The Graphs community och kärnutvecklare har [förberett sig](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) för att flytta till Arbitrum under det senaste året. Arbitrum, en blockkedja av lager 2 eller "L2", ärver säkerheten från Ethereum men ger drastiskt lägre gasavgifter. -När du publicerar eller uppgraderar din subgraf till The Graph Network, interagerar du med smarta kontrakt på protokollet och detta kräver att du betalar för gas med ETH. Genom att flytta dina subgrafer till Arbitrum kommer alla framtida uppdateringar av din subgraf att kräva mycket lägre gasavgifter. De lägre avgifterna, och det faktum att curation bonding-kurvorna på L2 är platta, gör det också lättare för andra curatorer att kurera på din subgraf, vilket ökar belöningarna för Indexers på din subgraf. Denna miljö med lägre kostnader gör det också billigare för indexerare att indexera och betjäna din subgraf. Indexeringsbelöningar kommer att öka på Arbitrum och minska på Ethereums mainnet under de kommande månaderna, så fler och fler indexerare kommer att överföra sin andel och sätta upp sin verksamhet på L2. +When you publish or upgrade your Subgraph to The Graph Network, you're interacting with smart contracts on the protocol and this requires paying for gas using ETH. By moving your Subgraphs to Arbitrum, any future updates to your Subgraph will require much lower gas fees. The lower fees, and the fact that curation bonding curves on L2 are flat, also make it easier for other Curators to curate on your Subgraph, increasing the rewards for Indexers on your Subgraph. This lower-cost environment also makes it cheaper for Indexers to index and serve your Subgraph. Indexing rewards will be increasing on Arbitrum and decreasing on Ethereum mainnet over the coming months, so more and more Indexers will be transferring their stake and setting up their operations on L2. -## Förstå vad som händer med signal, din L1 subgraf och frågewebbadresser +## Understanding what happens with signal, your L1 Subgraph and query URLs -Att överföra en subgraf till Arbitrum använder Arbitrum GRT-bryggan, som i sin tur använder den inhemska Arbitrum-bryggan, för att skicka subgrafen till L2. "Överföringen" kommer att fasa ut subgrafen på mainnet och skicka informationen för att återskapa subgrafen på L2 med hjälp av bryggan. Den kommer också att inkludera subgrafägarens signalerade GRT, som måste vara mer än noll för att bryggan ska acceptera överföringen. +Transferring a Subgraph to Arbitrum uses the Arbitrum GRT bridge, which in turn uses the native Arbitrum bridge, to send the Subgraph to L2. The "transfer" will deprecate the Subgraph on mainnet and send the information to re-create the Subgraph on L2 using the bridge. It will also include the Subgraph owner's signaled GRT, which must be more than zero for the bridge to accept the transfer. -När du väljer att överföra subgrafen kommer detta att konvertera hela subgrafens kurationssignal till GRT. Detta motsvarar att "avskriva" subgrafen på mainnet. GRT som motsvarar din kuration kommer att skickas till L2 tillsammans med subgrafen, där de kommer att användas för att skapa signaler å dina vägnar. +When you choose to transfer the Subgraph, this will convert all of the Subgraph's curation signal to GRT. This is equivalent to "deprecating" the Subgraph on mainnet. The GRT corresponding to your curation will be sent to L2 together with the Subgraph, where they will be used to mint signal on your behalf. -Andra kuratorer kan välja om de vill ta tillbaka sin del av GRT eller också överföra den till L2 för att få en signal på samma subgraf. Om en subgrafägare inte överför sin subgraf till L2 och manuellt fasar ut den via ett kontraktsanrop, kommer Curatorer att meddelas och kommer att kunna dra tillbaka sin curation. +Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same Subgraph. If a Subgraph owner does not transfer their Subgraph to L2 and manually deprecates it via a contract call, then Curators will be notified and will be able to withdraw their curation. -Så snart subgrafen har överförts, eftersom all kuration konverteras till GRT, kommer indexerare inte längre att få belöningar för att indexera subgrafen. Det kommer dock att finnas indexerare som kommer 1) att fortsätta visa överförda subgrafer i 24 timmar och 2) omedelbart börja indexera subgrafen på L2. Eftersom dessa indexerare redan har subgrafen indexerad, borde det inte finnas något behov av att vänta på att subgrafen ska synkroniseras, och det kommer att vara möjligt att fråga L2-subgrafen nästan omedelbart. +As soon as the Subgraph is transferred, since all curation is converted to GRT, Indexers will no longer receive rewards for indexing the Subgraph. However, there will be Indexers that will 1) keep serving transferred Subgraphs for 24 hours, and 2) immediately start indexing the Subgraph on L2. Since these Indexers already have the Subgraph indexed, there should be no need to wait for the Subgraph to sync, and it will be possible to query the L2 Subgraph almost immediately. -Förfrågningar till L2-subgrafen kommer att behöva göras till en annan URL (på `arbitrum-gateway.thegraph.com`), men L1-URL:n fortsätter att fungera i minst 48 timmar. Efter det kommer L1-gatewayen att vidarebefordra frågor till L2-gatewayen (under en tid), men detta kommer att lägga till latens så det rekommenderas att byta alla dina frågor till den nya URL:en så snart som möjligt. +Queries to the L2 Subgraph will need to be done to a different URL (on `arbitrum-gateway.thegraph.com`), but the L1 URL will continue working for at least 48 hours. After that, the L1 gateway will forward queries to the L2 gateway (for some time), but this will add latency so it is recommended to switch all your queries to the new URL as soon as possible. ## Välja din L2 plånbok -När du publicerade din subgraf på mainnet använde du en ansluten plånbok för att skapa subgrafen, och denna plånbok äger NFT som representerar denna subgraf och låter dig publicera uppdateringar. +When you published your Subgraph on mainnet, you used a connected wallet to create the Subgraph, and this wallet owns the NFT that represents this Subgraph and allows you to publish updates. -När du överför subgrafen till Arbitrum kan du välja en annan plånbok som kommer att äga denna subgraf NFT på L2. +When transferring the Subgraph to Arbitrum, you can choose a different wallet that will own this Subgraph NFT on L2. Om du använder en "vanlig" plånbok som MetaMask (ett externt ägt konto eller EOA, d.v.s. en plånbok som inte är ett smart kontrakt), så är detta valfritt och det rekommenderas att behålla samma ägaradress som i L1. -Om du använder en smart kontraktsplånbok, som en multisig (t.ex. ett kassaskåp), är det obligatoriskt att välja en annan L2-plånboksadress, eftersom det är mest troligt att det här kontot bara finns på mainnet och att du inte kommer att kunna göra transaktioner på Arbitrum med denna plånbok. Om du vill fortsätta använda en smart kontraktsplånbok eller multisig, skapa en ny plånbok på Arbitrum och använd dess adress som L2-ägare till din subgraf. +If you're using a smart contract wallet, like a multisig (e.g. a Safe), then choosing a different L2 wallet address is mandatory, as it is most likely that this account only exists on mainnet and you will not be able to make transactions on Arbitrum using this wallet. If you want to keep using a smart contract wallet or multisig, create a new wallet on Arbitrum and use its address as the L2 owner of your Subgraph. -**Det är mycket viktigt att använda en plånboksadress som du kontrollerar, och som kan göra transaktioner på Arbitrum. Annars kommer subgrafen att gå förlorad och kan inte återställas.** +**It is very important to use a wallet address that you control, and that can make transactions on Arbitrum. Otherwise, the Subgraph will be lost and cannot be recovered.** ## Förbereder för överföringen: överbrygga lite ETH -Att överföra subgrafen innebär att man skickar en transaktion genom bryggan och sedan utför en annan transaktion på Arbitrum. Den första transaktionen använder ETH på huvudnätet och inkluderar en del ETH för att betala för gas när meddelandet tas emot på L2. Men om denna gas är otillräcklig måste du göra om transaktionen och betala för gasen direkt på L2 (detta är "Steg 3: Bekräfta överföringen" nedan). Detta steg **måste utföras inom 7 dagar efter att överföringen påbörjats**. Dessutom kommer den andra transaktionen ("Steg 4: Avsluta överföringen på L2") att göras direkt på Arbitrum. Av dessa skäl behöver du lite ETH på en Arbitrum-plånbok. Om du använder ett multisig- eller smart kontraktskonto måste ETH: en finnas i den vanliga (EOA) plånboken som du använder för att utföra transaktionerna, inte på själva multisig plånboken. +Transferring the Subgraph involves sending a transaction through the bridge, and then executing another transaction on Arbitrum. The first transaction uses ETH on mainnet, and includes some ETH to pay for gas when the message is received on L2. However, if this gas is insufficient, you will have to retry the transaction and pay for the gas directly on L2 (this is "Step 3: Confirming the transfer" below). This step **must be executed within 7 days of starting the transfer**. Moreover, the second transaction ("Step 4: Finishing the transfer on L2") will be done directly on Arbitrum. For these reasons, you will need some ETH on an Arbitrum wallet. If you're using a multisig or smart contract account, the ETH will need to be in the regular (EOA) wallet that you are using to execute the transactions, not on the multisig wallet itself. Du kan köpa ETH på vissa börser och ta ut den direkt till Arbitrum, eller så kan du använda Arbitrum-bryggan för att skicka ETH från en mainnet-plånbok till L2: [bridge.arbitrum.io](http://bridge.arbitrum.io). Eftersom gasavgifterna på Arbitrum är lägre bör du bara behöva en liten summa. Det rekommenderas att du börjar vid en låg tröskel (0.t.ex. 01 ETH) för att din transaktion ska godkännas. -## Hitta subgrafen Överföringsverktyg +## Finding the Subgraph Transfer Tool -Du kan hitta L2 Överföringsverktyg när du tittar på din subgrafs sida på Subgraf Studio: +You can find the L2 Transfer Tool when you're looking at your Subgraph's page on Subgraph Studio: ![Överföringsverktyg](/img/L2-transfer-tool1.png) -Den är också tillgänglig på Explorer om du är ansluten till plånboken som äger en subgraf och på den subgrafens sida på Explorer: +It is also available on Explorer if you're connected with the wallet that owns a Subgraph and on that Subgraph's page on Explorer: ![Överför till L2](/img/transferToL2.png) @@ -60,19 +60,19 @@ Genom att klicka på knappen Överför till L2 öppnas överföringsverktyget d ## Steg 1: Starta överföringen -Innan du påbörjar överföringen måste du bestämma vilken adress som ska äga subgrafen på L2 (se "Välja din L2 plånbok" ovan), och det rekommenderas starkt att ha lite ETH för gas som redan är överbryggad på Arbitrum (se "Förbereda för överföringen: brygga" lite ETH" ovan). +Before starting the transfer, you must decide which address will own the Subgraph on L2 (see "Choosing your L2 wallet" above), and it is strongly recommend having some ETH for gas already bridged on Arbitrum (see "Preparing for the transfer: bridging some ETH" above). -Observera också att överföring av subgrafen kräver att en signal som inte är noll på subgrafen med samma konto som äger subgrafen; om du inte har signalerat på subgrafen måste du lägga till lite curation (att lägga till en liten mängd som 1 GRT skulle räcka). +Also please note transferring the Subgraph requires having a nonzero amount of signal on the Subgraph with the same account that owns the Subgraph; if you haven't signaled on the Subgraph you will have to add a bit of curation (adding a small amount like 1 GRT would suffice). -Efter att ha öppnat överföringsverktyget kommer du att kunna ange L2-plånboksadressen i fältet "Mottagande plånboksadress" - **se till att du har angett rätt adress här**. Om du klickar på Transfer Subgraph kommer du att uppmana dig att utföra transaktionen på din plånbok (observera att ett ETH-värde ingår för att betala för L2-gas); detta kommer att initiera överföringen och fasa ut din L1-subgraf (se "Förstå vad som händer med signal, din L1-subgraf och sökadresser" ovan för mer information om vad som händer bakom kulisserna). +After opening the Transfer Tool, you will be able to input the L2 wallet address into the "Receiving wallet address" field - **make sure you've entered the correct address here**. Clicking on Transfer Subgraph will prompt you to execute the transaction on your wallet (note some ETH value is included to pay for L2 gas); this will initiate the transfer and deprecate your L1 Subgraph (see "Understanding what happens with signal, your L1 Subgraph and query URLs" above for more details on what goes on behind the scenes). -Om du utför det här steget, **se till att du fortsätter tills du har slutfört steg 3 om mindre än 7 dagar, annars försvinner subgrafen och din signal-GRT.** Detta beror på hur L1-L2-meddelanden fungerar på Arbitrum: meddelanden som skickas genom bryggan är "omförsökbara biljetter" som måste utföras inom 7 dagar, och det första utförandet kan behöva ett nytt försök om det finns toppar i gaspriset på Arbitrum. +If you execute this step, **make sure you proceed until completing step 3 in less than 7 days, or the Subgraph and your signal GRT will be lost.** This is due to how L1-L2 messaging works on Arbitrum: messages that are sent through the bridge are "retry-able tickets" that must be executed within 7 days, and the initial execution might need a retry if there are spikes in the gas price on Arbitrum. ![Start the transfer to L2](/img/startTransferL2.png) -## Steg 2: Väntar på att subgrafen ska komma till L2 +## Step 2: Waiting for the Subgraph to get to L2 -När du har startat överföringen måste meddelandet som skickar din L1 subgraf till L2 spridas genom Arbitrum bryggan. Detta tar cirka 20 minuter (bryggan väntar på att huvudnäts blocket som innehåller transaktionen är "säkert" från potentiella kedjereorganisationer). +After you start the transfer, the message that sends your L1 Subgraph to L2 must propagate through the Arbitrum bridge. This takes approximately 20 minutes (the bridge waits for the mainnet block containing the transaction to be "safe" from potential chain reorgs). När denna väntetid är över kommer Arbitrum att försöka utföra överföringen automatiskt på L2 kontrakten. @@ -80,7 +80,7 @@ När denna väntetid är över kommer Arbitrum att försöka utföra överförin ## Steg 3: Bekräfta överföringen -I de flesta fall kommer detta steg att utföras automatiskt eftersom L2-gasen som ingår i steg 1 borde vara tillräcklig för att utföra transaktionen som tar emot subgrafen på Arbitrum-kontrakten. I vissa fall är det dock möjligt att en topp i gaspriserna på Arbitrum gör att denna autoexekvering misslyckas. I det här fallet kommer "biljetten" som skickar din subgraf till L2 att vara vilande och kräver ett nytt försök inom 7 dagar. +In most cases, this step will auto-execute as the L2 gas included in step 1 should be sufficient to execute the transaction that receives the Subgraph on the Arbitrum contracts. In some cases, however, it is possible that a spike in gas prices on Arbitrum causes this auto-execution to fail. In this case, the "ticket" that sends your Subgraph to L2 will be pending and require a retry within 7 days. Om så är fallet måste du ansluta med en L2 plånbok som har lite ETH på Arbitrum, byta ditt plånboksnätverk till Arbitrum och klicka på "Bekräfta överföring" för att försöka genomföra transaktionen igen. @@ -88,33 +88,33 @@ Om så är fallet måste du ansluta med en L2 plånbok som har lite ETH på Arbi ## Steg 4: Avsluta överföringen på L2 -Vid det här laget har din subgraf och GRT tagits emot på Arbitrum, men subgrafen är inte publicerad ännu. Du måste ansluta med L2 plånboken som du valde som mottagande plånbok, byta ditt plånboksnätverk till Arbitrum och klicka på "Publicera subgraf" +At this point, your Subgraph and GRT have been received on Arbitrum, but the Subgraph is not published yet. You will need to connect using the L2 wallet that you chose as the receiving wallet, switch your wallet network to Arbitrum, and click "Publish Subgraph." -![Publicera subgrafen](/img/publishSubgraphL2TransferTools.png) +![Publish the Subgraph](/img/publishSubgraphL2TransferTools.png) -![Vänta på att subgrafen ska publiceras](/img/waitForSubgraphToPublishL2TransferTools.png) +![Wait for the Subgraph to be published](/img/waitForSubgraphToPublishL2TransferTools.png) -Detta kommer att publicera subgrafen så att indexerare som är verksamma på Arbitrum kan börja servera den. Det kommer också att skapa kurations signaler med hjälp av GRT som överfördes från L1. +This will publish the Subgraph so that Indexers that are operating on Arbitrum can start serving it. It will also mint curation signal using the GRT that were transferred from L1. ## Steg 5: Uppdatera sökfrågans URL -Din subgraf har överförts till Arbitrum! För att fråga subgrafen kommer den nya webbadressen att vara: +Your Subgraph has been successfully transferred to Arbitrum! To query the Subgraph, the new URL will be : `https://arbitrum-gateway.thegraph.com/api/[api-key]/subgraphs/id/[l2-subgraph-id]` -Observera att subgraf-ID: t på Arbitrum kommer att vara ett annat än det du hade på mainnet, men du kan alltid hitta det på Explorer eller Studio. Som nämnts ovan (se "Förstå vad som händer med signal, dina L1-subgraf- och sökwebbadresser") kommer den gamla L1-URL: n att stödjas under en kort stund, men du bör byta dina frågor till den nya adressen så snart subgrafen har synkroniserats på L2. +Note that the Subgraph ID on Arbitrum will be a different than the one you had on mainnet, but you can always find it on Explorer or Studio. As mentioned above (see "Understanding what happens with signal, your L1 Subgraph and query URLs") the old L1 URL will be supported for a short while, but you should switch your queries to the new address as soon as the Subgraph has been synced on L2. ## Så här överför du din kuration till Arbitrum (L2) -## Förstå vad som händer med curation vid subgraf överföringar till L2 +## Understanding what happens to curation on Subgraph transfers to L2 -När ägaren av en subgraf överför en subgraf till Arbitrum, omvandlas all subgrafs signal till GRT samtidigt. Detta gäller för "auto-migrerad" signal, det vill säga signal som inte är specifik för en subgraf version eller utbyggnad men som följer den senaste versionen av en subgraf. +When the owner of a Subgraph transfers a Subgraph to Arbitrum, all of the Subgraph's signal is converted to GRT at the same time. This applies to "auto-migrated" signal, i.e. signal that is not specific to a Subgraph version or deployment but that follows the latest version of a Subgraph. -Denna omvandling från signal till GRT är densamma som vad som skulle hända om subgrafägaren avskaffade subgrafen i L1. När subgrafen föråldras eller överförs, "bränns" all curation-signal samtidigt (med hjälp av curation bonding-kurvan) och den resulterande GRT hålls av GNS smarta kontraktet (det är kontraktet som hanterar subgrafuppgraderingar och automatisk migrerad signal). Varje kurator i det stycket har därför ett anspråk på den GRT som är proportionell mot antalet aktier de hade för stycket. +This conversion from signal to GRT is the same as what would happen if the Subgraph owner deprecated the Subgraph in L1. When the Subgraph is deprecated or transferred, all curation signal is "burned" simultaneously (using the curation bonding curve) and the resulting GRT is held by the GNS smart contract (that is the contract that handles Subgraph upgrades and auto-migrated signal). Each Curator on that Subgraph therefore has a claim to that GRT proportional to the amount of shares they had for the Subgraph. -En bråkdel av dessa BRT som motsvarar subgrafägaren skickas till L2 tillsammans med subgrafen. +A fraction of these GRT corresponding to the Subgraph owner is sent to L2 together with the Subgraph. -Vid denna tidpunkt kommer den kurerade BRT inte att samla på sig några fler frågeavgifter, så kuratorer kan välja att dra tillbaka sin BRT eller överföra den till samma subgraf på L2, där den kan användas för att skapa en ny kurationssignal. Det är ingen brådska att göra detta eftersom BRT kan hjälpa till på obestämd tid och alla får ett belopp som är proportionellt mot sina aktier, oavsett när de gör det. +At this point, the curated GRT will not accrue any more query fees, so Curators can choose to withdraw their GRT or transfer it to the same Subgraph on L2, where it can be used to mint new curation signal. There is no rush to do this as the GRT can be help indefinitely and everybody gets an amount proportional to their shares, irrespective of when they do it. ## Välja din L2 plånbok @@ -130,9 +130,9 @@ Om du använder en smart kontraktsplånbok, som en multisig (t.ex. ett kassaskå Innan du påbörjar överföringen måste du bestämma vilken adress som ska äga kurationen på L2 (se "Välja din L2-plånbok" ovan), och det rekommenderas att ha en del ETH för gas som redan är överbryggad på Arbitrum ifall du behöver försöka utföra exekveringen av meddelande på L2. Du kan köpa ETH på vissa börser och ta ut den direkt till Arbitrum, eller så kan du använda Arbitrum-bryggan för att skicka ETH från en mainnet-plånbok till L2: [bridge.arbitrum.io](http://bridge.arbitrum.io) - eftersom gasavgifterna på Arbitrum är så låga ska du bara behöva en liten summa, t.ex. 0,01 ETH kommer förmodligen att vara mer än tillräckligt. -Om en subgraf som du kurerar till har överförts till L2 kommer du att se ett meddelande i Explorer som talar om att du kurerar till en överförd subgraf. +If a Subgraph that you curate to has been transferred to L2, you will see a message on Explorer telling you that you're curating to a transferred Subgraph. -När du tittar på subgraf sidan kan du välja att dra tillbaka eller överföra kurationen. Genom att klicka på "Överför signal till Arbitrum" öppnas överföringsverktyget. +When looking at the Subgraph page, you can choose to withdraw or transfer the curation. Clicking on "Transfer Signal to Arbitrum" will open the transfer tool. ![Överföringssignal](/img/transferSignalL2TransferTools.png) @@ -162,4 +162,4 @@ Om så är fallet måste du ansluta med en L2 plånbok som har lite ETH på Arbi ## Dra tillbaka din kuration på L1 -Om du föredrar att inte skicka din GRT till L2, eller om du hellre vill överbrygga GRT manuellt, kan du ta tillbaka din kurerade BRT på L1. På bannern på subgraf sidan väljer du "Ta tillbaka signal" och bekräftar transaktionen; GRT kommer att skickas till din kurator adress. +If you prefer not to send your GRT to L2, or you'd rather bridge the GRT manually, you can withdraw your curated GRT on L1. On the banner on the Subgraph page, choose "Withdraw Signal" and confirm the transaction; the GRT will be sent to your Curator address. From 5e7cb4488c59aadaf1ff15ec857c444530a716e9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:13:57 -0500 Subject: [PATCH 0330/1789] New translations l2-transfer-tools-guide.mdx (Turkish) --- .../arbitrum/l2-transfer-tools-guide.mdx | 76 +++++++++---------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/website/src/pages/tr/archived/arbitrum/l2-transfer-tools-guide.mdx b/website/src/pages/tr/archived/arbitrum/l2-transfer-tools-guide.mdx index 15b3bfb1004e..949f7e1ca425 100644 --- a/website/src/pages/tr/archived/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/src/pages/tr/archived/arbitrum/l2-transfer-tools-guide.mdx @@ -6,53 +6,53 @@ Graph, Arbitrum One üzerinde Katman2'ye geçişi kolaylaştırmıştır. Her pr Some frequent questions about these tools are answered in the [L2 Transfer Tools FAQ](/archived/arbitrum/l2-transfer-tools-faq/). The FAQs contain in-depth explanations of how to use the tools, how they work, and things to keep in mind when using them. -## Subgraph'ınızı Arbitrum'a nasıl transfer edebilirsiniz (Katman2) +## How to transfer your Subgraph to Arbitrum (L2) -## Subgraphlar'ınızı transfer etmenin faydaları +## Benefits of transferring your Subgraphs Graph topluluğu ve çekirdek geliştiricileri geçtiğimiz yıl boyunca Arbitrum'a geçmek için [hazırlanıyordu] (https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305). Bir katman 2 veya "L2" blok zinciri olan Arbitrum, güvenliği Ethereum'dan devralmakla birlikte büyük ölçüde daha düşük gaz ücretleri sağlamaktadır. -Subgraph'ınızı Graph Ağı'nda yayınladığınızda veya yükselttiğinizde, protokol üzerindeki akıllı sözleşmelerle etkileşime girersiniz ve bu ETH kullanarak gas ödemesi yapmayı gerektirir. Subgraphlar'ınızı Arbitrum'a taşıdığınızda, gelecekte subgraphlar'ınızda yapılacak tüm güncellemeler çok daha düşük gas ücretleri gerektirecektir. Daha düşük ücretler ve Katman2'deki kürasyon bağlanma eğrilerinin sabit olması, diğer Küratörlerin subgraph'ınızda kürasyon yapmasını kolaylaştırır ve subgraph'ınızdaki İndeksleyiciler için ödülleri artırır. Bu düşük maliyetli ortam, İndeksleyicilerin subgraph'ınızı indekslemesini ve hizmet vermesini de daha ucuz hale getirmektedir.. Önümüzdeki aylarda İndeksleme ödülleri Arbitrum'da artacak ve Ethereum ana ağında azalacaktır, bu nedenle gittikçe daha fazla İndeksleyici mevcut stake'lerini transfer edecek ve operasyonlarını Katman2'de başlatacaktır. +When you publish or upgrade your Subgraph to The Graph Network, you're interacting with smart contracts on the protocol and this requires paying for gas using ETH. By moving your Subgraphs to Arbitrum, any future updates to your Subgraph will require much lower gas fees. The lower fees, and the fact that curation bonding curves on L2 are flat, also make it easier for other Curators to curate on your Subgraph, increasing the rewards for Indexers on your Subgraph. This lower-cost environment also makes it cheaper for Indexers to index and serve your Subgraph. Indexing rewards will be increasing on Arbitrum and decreasing on Ethereum mainnet over the coming months, so more and more Indexers will be transferring their stake and setting up their operations on L2. -## Sinyal, Katman1 subgraph'ınız ve sorgu URL'leri ile neler gerçekleştiğini anlama +## Understanding what happens with signal, your L1 Subgraph and query URLs -Bir subgraph'ı Arbitrum'a transfer etmek için Arbitrum GRT köprüsü kullanılmaktadır, bu köprüde subgraph'ı Katman2'ye göndermek için yerel Arbitrum köprüsünü kullanır. "transfer", ana ağdaki subgraph'ı kullanımdan kaldıracak ve köprüyü kullanarak Katman2'de subgraph'ı yeniden oluşturmak için bilgi gönderecektir. Aynı zamanda, köprünün transferi kabul etmesi için subgraph sahibinin sinyallenmiş GRT'sini de dahil edecektir ve bu değer sıfırdan büyük olmalıdır. +Transferring a Subgraph to Arbitrum uses the Arbitrum GRT bridge, which in turn uses the native Arbitrum bridge, to send the Subgraph to L2. The "transfer" will deprecate the Subgraph on mainnet and send the information to re-create the Subgraph on L2 using the bridge. It will also include the Subgraph owner's signaled GRT, which must be more than zero for the bridge to accept the transfer. -Subgraph transfer etmeyi seçtiğinizde, bu, subgraph'ın tüm kürasyon sinyalini GRT'ye dönüştürecektir. Bu, ana ağdaki subgraph'ı "kullanımdan kaldırmaya" eşdeğerdir. Kürasyonunuza karşılık gelen GRT, subgraphla birlikte Katman2'ye gönderilecek ve burada sizin adınıza sinyal basmak için kullanılacaktır. +When you choose to transfer the Subgraph, this will convert all of the Subgraph's curation signal to GRT. This is equivalent to "deprecating" the Subgraph on mainnet. The GRT corresponding to your curation will be sent to L2 together with the Subgraph, where they will be used to mint signal on your behalf. -Diğer Küratörler, GRT tokenlerinin bir bölümünü geri çekmeyi ya da aynı subgraph üzerinde sinyal basmak için Katman2'ye transfer etmeyi tercih edebilirler. Bir subgraph sahibi subgraph'ını Katman2'ye transfer edemezse ve bir sözleşme çağrısı yoluyla manuel olarak kullanımdan kaldırırsa, Küratörler bilgilendirilecek ve kürasyonlarını geri çekebileceklerdir. +Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same Subgraph. If a Subgraph owner does not transfer their Subgraph to L2 and manually deprecates it via a contract call, then Curators will be notified and will be able to withdraw their curation. -Subgraph transfer edilir edilmez, tüm kürasyon GRT'ye dönüştürüldüğünden, İndeksleyiciler artık subgraph'ı indekslemek için ödül almayacaktır. Ancak, 1) aktarılan subgraphlar'ı 24 saat boyunca sunmaya devam edecek ve 2) hemen Katman2'de subgraph'ı indekslemeye başlayacak İndeksleyiciler olacaktır. Bu İndeksleyiciler subgraph'ı zaten indekslediğinden, subgraph'ın senkronize olmasını beklemeye gerek kalmayacak ve Katman2 subgraph'ını neredeyse anında sorgulamak mümkün olacaktır. +As soon as the Subgraph is transferred, since all curation is converted to GRT, Indexers will no longer receive rewards for indexing the Subgraph. However, there will be Indexers that will 1) keep serving transferred Subgraphs for 24 hours, and 2) immediately start indexing the Subgraph on L2. Since these Indexers already have the Subgraph indexed, there should be no need to wait for the Subgraph to sync, and it will be possible to query the L2 Subgraph almost immediately. -Katman2 subgraph'ına yönelik sorgular farklı bir URL üzerinden yapılmalıdır (arbitrum-gateway.thegraph.com). Ancak Katman1 URL'si en az 48 saat boyunca çalışmaya devam edecektir. Bu sürenin ardından, Katman1 ağ geçidi sorguları (bir süre için) Katman2 ağ geçidine iletecektir, fakat bu gecikmeye neden olacağından ötürü mümkün olan en kısa sürede tüm sorgularınızı yeni URL'ye geçirmeniz önerilir. +Queries to the L2 Subgraph will need to be done to a different URL (on `arbitrum-gateway.thegraph.com`), but the L1 URL will continue working for at least 48 hours. After that, the L1 gateway will forward queries to the L2 gateway (for some time), but this will add latency so it is recommended to switch all your queries to the new URL as soon as possible. ## Katman2 cüzdanınızın seçimi -Subgraph'ınızı ana ağ üzerinde yayınladığınızda, subgraph'ı oluşturmak için bağlı bir cüzdan kullandınız ve bu cüzdan, bu subgraph'ı temsil eden ve güncellemeleri yayınlamanıza izin veren NFT'nin sahibidir. +When you published your Subgraph on mainnet, you used a connected wallet to create the Subgraph, and this wallet owns the NFT that represents this Subgraph and allows you to publish updates. -Subgraph'ı Arbitrum'a transfer ederken, Katman2 üzerinde bu subgraph NFT'ye sahip olacak farklı bir cüzdan seçebilirsiniz. +When transferring the Subgraph to Arbitrum, you can choose a different wallet that will own this Subgraph NFT on L2. MetaMask gibi "genel" bir cüzdan (Harici Olarak Sahip Olunan Hesap veya EOA, yani akıllı sözleşme olmayan bir cüzdan) kullanıyorsanız, bu opsiyoneldir ve Katman1'deki ile aynı sahip adresini kullanmanız önerilir. -Çoklu imza (örneğin Safe) gibi bir akıllı sözleşme cüzdanı kullanıyorsanız, farklı bir Katman2 cüzdan adresi seçmek zorunludur, çünkü büyük olasılıkla bu hesap yalnızca ana ağ üzerinde kullanılabilir ve bu cüzdanı kullanarak Arbitrum'da işlem yapamazsınız. Bir akıllı sözleşme cüzdanı veya çoklu imza cüzdanı kullanmaya devam etmek istiyorsanız, Arbitrum'da yeni bir cüzdan oluşturun ve adresini subgraph'ınızın Katman2 sahibi olarak kullanın. +If you're using a smart contract wallet, like a multisig (e.g. a Safe), then choosing a different L2 wallet address is mandatory, as it is most likely that this account only exists on mainnet and you will not be able to make transactions on Arbitrum using this wallet. If you want to keep using a smart contract wallet or multisig, create a new wallet on Arbitrum and use its address as the L2 owner of your Subgraph. -**Sizin kontrolünüzde ve Arbitrum üzerinde işlem yapabilen bir cüzdan adresi kullanmak oldukça önemlidir. Aksi takdirde, subgraph kaybolacak ve kurtarılamayacaktır.** +**It is very important to use a wallet address that you control, and that can make transactions on Arbitrum. Otherwise, the Subgraph will be lost and cannot be recovered.** ## Transfer için hazırlık: Bir miktar ETH köprüleme -Subgraph'ın transfer edilmesi, köprü üzerinden bir işlemin gönderilmesini ve ardından Arbitrum'da başka bir işlemin yürütülmesini içermektedir. İlk işlem ana ağda ETH kullanır ve mesaj Katman2'de alındığında gas için ödeme yapmak üzere bir miktar ETH içerir. Ancak, bu gas yetersizse, işlemi yeniden denemeniz ve gas için doğrudan Katman2'de ödeme yapmanız gerekecektir (bu, aşağıdaki "Adım 3: Transferi onaylama" dır). Bu adım **transferin başlamasından sonraki 7 gün içinde gerçekleştirilmelidir**. Ayrıca, ikinci işlem ("Adım 4: Katman2'de transferin tamamlanması") doğrudan Arbitrum'da gerçekleştirilecektir. Bu nedenlerden dolayı, Arbitrum cüzdanında bir miktar ETH'ye ihtiyacınız olacak. Bir çoklu imzalı veya akıllı sözleşme hesabı kullanıyorsanız, ETH'nin çoklu imza değil, işlemleri gerçekleştirmek için kullandığınız normal harici hesap (EOA) cüzdanında olması gerekecektir. +Transferring the Subgraph involves sending a transaction through the bridge, and then executing another transaction on Arbitrum. The first transaction uses ETH on mainnet, and includes some ETH to pay for gas when the message is received on L2. However, if this gas is insufficient, you will have to retry the transaction and pay for the gas directly on L2 (this is "Step 3: Confirming the transfer" below). This step **must be executed within 7 days of starting the transfer**. Moreover, the second transaction ("Step 4: Finishing the transfer on L2") will be done directly on Arbitrum. For these reasons, you will need some ETH on an Arbitrum wallet. If you're using a multisig or smart contract account, the ETH will need to be in the regular (EOA) wallet that you are using to execute the transactions, not on the multisig wallet itself. Bazı borsalardan ETH satın alabilir ve doğrudan Arbitrum'a çekebilir veya bir ana ağ cüzdanından Katman2'ye ETH göndermek için Arbitrum köprüsünü kullanabilirsiniz: [bridge.arbitrum.io](http://bridge.arbitrum.io). Arbitrum'daki gas ücretleri daha düşük olduğundan, yalnızca küçük bir miktara ihtiyacınız olacaktır. İşleminizin onaylanması için düşük bir eşikten (ör. 0.01 ETH) başlamanız önerilir. -## Subgraph Transfer Aracını bulma +## Finding the Subgraph Transfer Tool -Subgraph Stüdyo'da subgraph'ınızın sayfasına bakarak Katman2 Transfer Aracını bulabilirsiniz: +You can find the L2 Transfer Tool when you're looking at your Subgraph's page on Subgraph Studio: ![transfer tool](/img/L2-transfer-tool1.png) -Ayrıca, bir subgraph'ın sahibi olan cüzdana bağlıysanız Gezgin'de ve Gezgin'deki subgraph'ın sayfasında da bulunmaktadır: +It is also available on Explorer if you're connected with the wallet that owns a Subgraph and on that Subgraph's page on Explorer: ![Transferring to L2](/img/transferToL2.png) @@ -60,19 +60,19 @@ Katman2'ye Transfer düğmesine tıkladığınızda transfer işlemini başlatab ## Adım 1: Transferin başlatılması -Transfere başlamadan önce, Katman2'de hangi adresin subgraph'a sahip olacağına karar vermelisiniz (yukarıdaki "Katman2 cüzdanınızın seçimi" bölümüne bakın) ve Arbitrum'da halihazırda köprülenmiş gas için kullanacağınız bir miktar ETH bulundurmanız şiddetle tavsiye edilir (yukarıdaki "Transfer için hazırlık: Bir miktar ETH köprüleme" bölümüne bakın). +Before starting the transfer, you must decide which address will own the Subgraph on L2 (see "Choosing your L2 wallet" above), and it is strongly recommend having some ETH for gas already bridged on Arbitrum (see "Preparing for the transfer: bridging some ETH" above). -Ayrıca, subgraph'ın sahibi olan hesabın bir subgraph transferi gerçekleştirebilmesi için ilgili subgraph üzerinde belirli bir sinyale sahip olması gerektiğini göz önünde bulundurun; eğer subgraph üzerinde sinyal vermediyseniz, biraz kürasyon eklemeniz gerekecektir (1 GRT gibi küçük bir miktar eklemek yeterli olacaktır). +Also please note transferring the Subgraph requires having a nonzero amount of signal on the Subgraph with the same account that owns the Subgraph; if you haven't signaled on the Subgraph you will have to add a bit of curation (adding a small amount like 1 GRT would suffice). -Transfer Aracını açtıktan sonra, Katman2 cüzdan adresini "Alıcı cüzdan adresi" alanına girebileceksiniz - **buraya doğru adresi girdiğinizden emin olun**. Subgraph'ı Transfer Et'e tıkladığınızda, cüzdanınızda işlemi gerçekleştirmeniz istenecektir (Katman2 gas'ı için ödeme yapmak üzere bir miktar ETH'nin dahil edildiğini unutmayın); bu, transferi başlatacak ve Katman1 subgraph'ınızı kullanımdan kaldıracaktır (perde arkasında neler olup bittiğine ilişkin daha fazla ayrıntı için yukarıdaki "Sinyal, Katman1 subgraph'ınız ve sorgu URL'leri ile neler gerçekleştiğini anlama" bölümüne bakın). +After opening the Transfer Tool, you will be able to input the L2 wallet address into the "Receiving wallet address" field - **make sure you've entered the correct address here**. Clicking on Transfer Subgraph will prompt you to execute the transaction on your wallet (note some ETH value is included to pay for L2 gas); this will initiate the transfer and deprecate your L1 Subgraph (see "Understanding what happens with signal, your L1 Subgraph and query URLs" above for more details on what goes on behind the scenes). -Bu adımı uygularsanız, **3. adımı tamamlamak için yedi günden daha kısa bir sürede ilerlediğinizden mutlaka emin olmalısınız; aksi halde subgraph ve sinyal GRT'nizi kaybedeceksiniz.** Bunun nedeni Arbitrum'da Katman1-Katman2 mesajlaşmasının çalışma şeklidir: köprü üzerinden gönderilen mesajlar 7 gün içinde yürütülmesi gereken "yeniden denenebilir biletler"dir ve Arbitrum'da gas fiyatında ani artışlar olması durumunda ilk yürütmenin yeniden denenmesi gerekebilir. +If you execute this step, **make sure you proceed until completing step 3 in less than 7 days, or the Subgraph and your signal GRT will be lost.** This is due to how L1-L2 messaging works on Arbitrum: messages that are sent through the bridge are "retry-able tickets" that must be executed within 7 days, and the initial execution might need a retry if there are spikes in the gas price on Arbitrum. ![L2’ye transferi başlatın](/img/startTransferL2.png) -## Adım 2: Subgraph'ın Katman2'ye ulaşmasını bekleme +## Step 2: Waiting for the Subgraph to get to L2 -Transferi başlattıktan sonra, Katman1 subgraph'ınızı Katman2'ye gönderen mesajın Arbitrum köprüsü üzerinden yayılması gerekir. Bu işlem yaklaşık 20 dakika sürer (köprü, işlemi içeren ana ağ bloğunun olası zincir yeniden düzenlemelerine karşı "güvenli" olmasını bekler). +After you start the transfer, the message that sends your L1 Subgraph to L2 must propagate through the Arbitrum bridge. This takes approximately 20 minutes (the bridge waits for the mainnet block containing the transaction to be "safe" from potential chain reorgs). Bu bekleme süresi sona erdiğinde Arbitrum, Katman2 sözleşmelerinde transferi otomatik olarak yürütmeye çalışacaktır. @@ -80,7 +80,7 @@ Bu bekleme süresi sona erdiğinde Arbitrum, Katman2 sözleşmelerinde transferi ## Adım 3: Transferi onaylama -Çoğu durumda, bu adım otomatik olarak yürütülecektir çünkü 1. adımda yer alan Katman2 gas'ı Arbitrum sözleşmelerinde subgraph'ı içeren işlemi yürütmek için yeterli olacaktır. Ancak bazı durumlarda, Arbitrum'daki gas fiyatlarındaki bir artış bu otomatik yürütmenin başarısız olmasına neden olabilir. Bu durumda, subgraph'ınızı Katman2'ye gönderen "bilet" beklemede olacak ve 7 gün içinde yeniden denenmesi gerekecektir. +In most cases, this step will auto-execute as the L2 gas included in step 1 should be sufficient to execute the transaction that receives the Subgraph on the Arbitrum contracts. In some cases, however, it is possible that a spike in gas prices on Arbitrum causes this auto-execution to fail. In this case, the "ticket" that sends your Subgraph to L2 will be pending and require a retry within 7 days. Durum buysa, Arbitrum'da bir miktar ETH bulunan bir Katman2 cüzdanı bağlanmanız, cüzdan ağınızı Arbitrum'a geçirmeniz ve işlemi yeniden denemek için "Transferi Onayla" seçeneğine tıklamanız gerekecektir. @@ -88,33 +88,33 @@ Durum buysa, Arbitrum'da bir miktar ETH bulunan bir Katman2 cüzdanı bağlanman ## Adım 4: Katman2'de transferin tamamlanması -Bu noktada, subgraph'ınız ve GRT'niz Arbitrum'a ulaşmıştır, ancak subgraph henüz yayınlanmamıştır. Alıcı cüzdan olarak seçtiğiniz Katman2 cüzdanını bağlanmanız, cüzdan ağınızı Arbitrum'a geçirmeniz ve "Subgraph'ı Yayınla" seçeneğine tıklamanız gerekecektir. +At this point, your Subgraph and GRT have been received on Arbitrum, but the Subgraph is not published yet. You will need to connect using the L2 wallet that you chose as the receiving wallet, switch your wallet network to Arbitrum, and click "Publish Subgraph." -![Publish the subgraph](/img/publishSubgraphL2TransferTools.png) +![Publish the Subgraph](/img/publishSubgraphL2TransferTools.png) -![Wait for the subgraph to be published](/img/waitForSubgraphToPublishL2TransferTools.png) +![Wait for the Subgraph to be published](/img/waitForSubgraphToPublishL2TransferTools.png) -Bu, Arbitrum üzerinde çalışan İndeksleyicilerin hizmet vermeye başlayabilmesi için subgraph'ı yayınlayacaktır. Ayrıca Katman1'den aktarılan GRT'yi kullanarak kürasyon sinyalini de basacaktır. +This will publish the Subgraph so that Indexers that are operating on Arbitrum can start serving it. It will also mint curation signal using the GRT that were transferred from L1. ## Adım 5: Sorgu URL'sini güncelleme -Subgraph'ınız Arbitrum'a başarıyla transfer edildi! Subgraph'ı sorgulamak için yeni URL şu şekilde olacaktır: +Your Subgraph has been successfully transferred to Arbitrum! To query the Subgraph, the new URL will be : `https://arbitrum-gateway.thegraph.com/api/[api-key]/subgraphs/id/[l2-subgraph-id]` -Arbitrum'daki subgraph kimliğinin ana ağda sahip olduğunuzdan farklı olacağını unutmayın, ancak bunu her zaman Gezgin veya Stüdyo aracılığıyla bulabilirsiniz. Yukarıda belirtildiği gibi ("Sinyal, Katman1 subgraph'ınız ve sorgu URL'leri ile neler gerçekleştiğini anlama" bölümüne bakın) eski Katman1 URL'si kısa bir süre için desteklenecektir, ancak subgraph Katman2'de senkronize edilir edilmez sorgularınızı yeni adrese geçirmelisiniz. +Note that the Subgraph ID on Arbitrum will be a different than the one you had on mainnet, but you can always find it on Explorer or Studio. As mentioned above (see "Understanding what happens with signal, your L1 Subgraph and query URLs") the old L1 URL will be supported for a short while, but you should switch your queries to the new address as soon as the Subgraph has been synced on L2. ## Kürasyonunuzu Arbitrum'a nasıl transfer edebilirsiniz (Katman2) -## Katman2'ye subgraph transferlerinde kürasyona ne olduğunu anlama +## Understanding what happens to curation on Subgraph transfers to L2 -Bir subgraph'ın sahibi subgraph'ı Arbitrum'a transfer ettiğinde, subgrpah'ın tüm sinyali aynı anda GRT'ye dönüştürülür. Bu, "otomatik olarak taşınan" sinyal, yani bir subgraph sürümüne veya dağıtımına özgü olmayan ancak bir subgraph'ın en son sürümünü takip eden sinyal için geçerlidir. +When the owner of a Subgraph transfers a Subgraph to Arbitrum, all of the Subgraph's signal is converted to GRT at the same time. This applies to "auto-migrated" signal, i.e. signal that is not specific to a Subgraph version or deployment but that follows the latest version of a Subgraph. -Sinyalden GRT'ye bu dönüşüm, subgraph sahibinin subgraph'ı Katman1'de kullanımdan kaldırması durumunda gerçekleşecek olanla aynıdır. Subgraph kullanımdan kaldırıldığında veya transfer edildiğinde, tüm kürasyon sinyali aynı anda "yakılır" (kürasyon bağlanma eğrisi kullanılarak) ve ortaya çıkan GRT, GNS akıllı sözleşmesi (yani subgraph yükseltmelerini ve otomatik olarak taşınan sinyali işleyen sözleşme) tarafından tutulur. Bu nedenle, bu subgraph'daki her Küratör, subgraph için sahip oldukları stake miktarıyla orantılı olarak GRT üzerinde hak iddia eder. +This conversion from signal to GRT is the same as what would happen if the Subgraph owner deprecated the Subgraph in L1. When the Subgraph is deprecated or transferred, all curation signal is "burned" simultaneously (using the curation bonding curve) and the resulting GRT is held by the GNS smart contract (that is the contract that handles Subgraph upgrades and auto-migrated signal). Each Curator on that Subgraph therefore has a claim to that GRT proportional to the amount of shares they had for the Subgraph. -Bu GRT tokenlerin subgraph sahibine ilişkin bir bölümü, subgraph ile birlikte Katman2'ye iletilir. +A fraction of these GRT corresponding to the Subgraph owner is sent to L2 together with the Subgraph. -Bu noktada, küratörlüğü yapılan GRT daha fazla sorgu ücreti biriktirmeyecektir, bu nedenle Küratörler GRT'lerini geri çekmeyi veya yeni kürasyon sinyali basmak için kullanılabilecekleri Katman2'deki aynı subgraph'a transfer etmeyi seçebilirler. GRT süresiz bir şekilde kullanılabileceğinden ve ne zaman yaptıklarına bakılmaksızın herkes paylarıyla orantılı bir miktar alacağından bunu yapmak için acele etmeye gerek yoktur. +At this point, the curated GRT will not accrue any more query fees, so Curators can choose to withdraw their GRT or transfer it to the same Subgraph on L2, where it can be used to mint new curation signal. There is no rush to do this as the GRT can be help indefinitely and everybody gets an amount proportional to their shares, irrespective of when they do it. ## Katman2 cüzdanınızın seçimi @@ -130,9 +130,9 @@ Metamask gibi "genel" bir cüzdan (Harici Olarak Sahip Olunan Hesap veya EOA, ya Transfere başlamadan önce, Katman2'deki kürasyonun hangi adrese ait olacağına karar vermelisiniz (yukarıdaki "Katman2 cüzdanınızın seçinmi" bölümüne bakın) ve mesajın Katman2'de yürütülmesini yeniden denemeniz gerektiğinde Arbitrum'da zaten köprülenmiş gas için kullanabileceğiniz bir miktar ETH bulundurmanız önerilir. Bazı borsalardan ETH satın alabilir ve doğrudan Arbitrum'a çekebilir veya bir ana ağ cüzdanından Katman2'ye ETH göndermek için Arbitrum köprüsünü kullanabilirsiniz: [bridge.arbitrum.io](http://bridge.arbitrum.io) - Arbitrum'daki gas ücretleri çok düşük olduğundan, yalnızca küçük bir miktara ihtiyacınız olacak, örneğin 0.01 ETH muhtemelen fazlasıyla yeterli olacaktır. -Küratörlüğünü yaptığınız bir subgraph Katman2'ye transfer edilmişse, Gezgin'de transfer edilmiş bir subgraph'a küratörlük yaptığınızı belirten bir mesaj göreceksiniz. +If a Subgraph that you curate to has been transferred to L2, you will see a message on Explorer telling you that you're curating to a transferred Subgraph. -Subgraph sayfasına bakarken, kürasyonu geri çekmeyi veya transfer etmeyi seçebilirsiniz. "Sinyali Arbitrum'a Transfer Et" seçeneğine tıkladığınızda transfer aracı açılacaktır. +When looking at the Subgraph page, you can choose to withdraw or transfer the curation. Clicking on "Transfer Signal to Arbitrum" will open the transfer tool. ![Transfer signal](/img/transferSignalL2TransferTools.png) @@ -162,4 +162,4 @@ Durum buysa, Arbitrum'da bir miktar ETH bulunan bir Katman2 cüzdanı bağlanman ## Katman1'deki kürasyonunuzu çekme -GRT'nizi Katman2'ye göndermek istemiyorsanız veya manuel olarak köprülemeyi tercih ediyorsanız, Katman1'de kürasyonu gerçekleşmiş GRT'lerinizi çekebilirsiniz. Subgraph sayfasındaki afişte "Sinyali Çek" seçeneğini seçin ve işlemi onaylayın; GRT, Küratör adresinize gönderilecektir. +If you prefer not to send your GRT to L2, or you'd rather bridge the GRT manually, you can withdraw your curated GRT on L1. On the banner on the Subgraph page, choose "Withdraw Signal" and confirm the transaction; the GRT will be sent to your Curator address. From 30d5819e032420198020c941573777cbea10e540 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:13:59 -0500 Subject: [PATCH 0331/1789] New translations l2-transfer-tools-guide.mdx (Ukrainian) --- .../arbitrum/l2-transfer-tools-guide.mdx | 76 +++++++++---------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/website/src/pages/uk/archived/arbitrum/l2-transfer-tools-guide.mdx b/website/src/pages/uk/archived/arbitrum/l2-transfer-tools-guide.mdx index 549618bfd7c3..4a34da9bad0e 100644 --- a/website/src/pages/uk/archived/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/src/pages/uk/archived/arbitrum/l2-transfer-tools-guide.mdx @@ -6,53 +6,53 @@ The Graph has made it easy to move to L2 on Arbitrum One. For each protocol part Some frequent questions about these tools are answered in the [L2 Transfer Tools FAQ](/archived/arbitrum/l2-transfer-tools-faq/). The FAQs contain in-depth explanations of how to use the tools, how they work, and things to keep in mind when using them. -## How to transfer your subgraph to Arbitrum (L2) +## How to transfer your Subgraph to Arbitrum (L2) -## Benefits of transferring your subgraphs +## Benefits of transferring your Subgraphs The Graph's community and core devs have [been preparing](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) to move to Arbitrum over the past year. Arbitrum, a layer 2 or "L2" blockchain, inherits the security from Ethereum but provides drastically lower gas fees. -When you publish or upgrade your subgraph to The Graph Network, you're interacting with smart contracts on the protocol and this requires paying for gas using ETH. By moving your subgraphs to Arbitrum, any future updates to your subgraph will require much lower gas fees. The lower fees, and the fact that curation bonding curves on L2 are flat, also make it easier for other Curators to curate on your subgraph, increasing the rewards for Indexers on your subgraph. This lower-cost environment also makes it cheaper for Indexers to index and serve your subgraph. Indexing rewards will be increasing on Arbitrum and decreasing on Ethereum mainnet over the coming months, so more and more Indexers will be transferring their stake and setting up their operations on L2. +When you publish or upgrade your Subgraph to The Graph Network, you're interacting with smart contracts on the protocol and this requires paying for gas using ETH. By moving your Subgraphs to Arbitrum, any future updates to your Subgraph will require much lower gas fees. The lower fees, and the fact that curation bonding curves on L2 are flat, also make it easier for other Curators to curate on your Subgraph, increasing the rewards for Indexers on your Subgraph. This lower-cost environment also makes it cheaper for Indexers to index and serve your Subgraph. Indexing rewards will be increasing on Arbitrum and decreasing on Ethereum mainnet over the coming months, so more and more Indexers will be transferring their stake and setting up their operations on L2. -## Understanding what happens with signal, your L1 subgraph and query URLs +## Understanding what happens with signal, your L1 Subgraph and query URLs -Transferring a subgraph to Arbitrum uses the Arbitrum GRT bridge, which in turn uses the native Arbitrum bridge, to send the subgraph to L2. The "transfer" will deprecate the subgraph on mainnet and send the information to re-create the subgraph on L2 using the bridge. It will also include the subgraph owner's signaled GRT, which must be more than zero for the bridge to accept the transfer. +Transferring a Subgraph to Arbitrum uses the Arbitrum GRT bridge, which in turn uses the native Arbitrum bridge, to send the Subgraph to L2. The "transfer" will deprecate the Subgraph on mainnet and send the information to re-create the Subgraph on L2 using the bridge. It will also include the Subgraph owner's signaled GRT, which must be more than zero for the bridge to accept the transfer. -When you choose to transfer the subgraph, this will convert all of the subgraph's curation signal to GRT. This is equivalent to "deprecating" the subgraph on mainnet. The GRT corresponding to your curation will be sent to L2 together with the subgraph, where they will be used to mint signal on your behalf. +When you choose to transfer the Subgraph, this will convert all of the Subgraph's curation signal to GRT. This is equivalent to "deprecating" the Subgraph on mainnet. The GRT corresponding to your curation will be sent to L2 together with the Subgraph, where they will be used to mint signal on your behalf. -Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same subgraph. If a subgraph owner does not transfer their subgraph to L2 and manually deprecates it via a contract call, then Curators will be notified and will be able to withdraw their curation. +Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same Subgraph. If a Subgraph owner does not transfer their Subgraph to L2 and manually deprecates it via a contract call, then Curators will be notified and will be able to withdraw their curation. -As soon as the subgraph is transferred, since all curation is converted to GRT, Indexers will no longer receive rewards for indexing the subgraph. However, there will be Indexers that will 1) keep serving transferred subgraphs for 24 hours, and 2) immediately start indexing the subgraph on L2. Since these Indexers already have the subgraph indexed, there should be no need to wait for the subgraph to sync, and it will be possible to query the L2 subgraph almost immediately. +As soon as the Subgraph is transferred, since all curation is converted to GRT, Indexers will no longer receive rewards for indexing the Subgraph. However, there will be Indexers that will 1) keep serving transferred Subgraphs for 24 hours, and 2) immediately start indexing the Subgraph on L2. Since these Indexers already have the Subgraph indexed, there should be no need to wait for the Subgraph to sync, and it will be possible to query the L2 Subgraph almost immediately. -Queries to the L2 subgraph will need to be done to a different URL (on `arbitrum-gateway.thegraph.com`), but the L1 URL will continue working for at least 48 hours. After that, the L1 gateway will forward queries to the L2 gateway (for some time), but this will add latency so it is recommended to switch all your queries to the new URL as soon as possible. +Queries to the L2 Subgraph will need to be done to a different URL (on `arbitrum-gateway.thegraph.com`), but the L1 URL will continue working for at least 48 hours. After that, the L1 gateway will forward queries to the L2 gateway (for some time), but this will add latency so it is recommended to switch all your queries to the new URL as soon as possible. ## Choosing your L2 wallet -When you published your subgraph on mainnet, you used a connected wallet to create the subgraph, and this wallet owns the NFT that represents this subgraph and allows you to publish updates. +When you published your Subgraph on mainnet, you used a connected wallet to create the Subgraph, and this wallet owns the NFT that represents this Subgraph and allows you to publish updates. -When transferring the subgraph to Arbitrum, you can choose a different wallet that will own this subgraph NFT on L2. +When transferring the Subgraph to Arbitrum, you can choose a different wallet that will own this Subgraph NFT on L2. If you're using a "regular" wallet like MetaMask (an Externally Owned Account or EOA, i.e. a wallet that is not a smart contract), then this is optional and it is recommended to keep the same owner address as in L1. -If you're using a smart contract wallet, like a multisig (e.g. a Safe), then choosing a different L2 wallet address is mandatory, as it is most likely that this account only exists on mainnet and you will not be able to make transactions on Arbitrum using this wallet. If you want to keep using a smart contract wallet or multisig, create a new wallet on Arbitrum and use its address as the L2 owner of your subgraph. +If you're using a smart contract wallet, like a multisig (e.g. a Safe), then choosing a different L2 wallet address is mandatory, as it is most likely that this account only exists on mainnet and you will not be able to make transactions on Arbitrum using this wallet. If you want to keep using a smart contract wallet or multisig, create a new wallet on Arbitrum and use its address as the L2 owner of your Subgraph. -**It is very important to use a wallet address that you control, and that can make transactions on Arbitrum. Otherwise, the subgraph will be lost and cannot be recovered.** +**It is very important to use a wallet address that you control, and that can make transactions on Arbitrum. Otherwise, the Subgraph will be lost and cannot be recovered.** ## Preparing for the transfer: bridging some ETH -Transferring the subgraph involves sending a transaction through the bridge, and then executing another transaction on Arbitrum. The first transaction uses ETH on mainnet, and includes some ETH to pay for gas when the message is received on L2. However, if this gas is insufficient, you will have to retry the transaction and pay for the gas directly on L2 (this is "Step 3: Confirming the transfer" below). This step **must be executed within 7 days of starting the transfer**. Moreover, the second transaction ("Step 4: Finishing the transfer on L2") will be done directly on Arbitrum. For these reasons, you will need some ETH on an Arbitrum wallet. If you're using a multisig or smart contract account, the ETH will need to be in the regular (EOA) wallet that you are using to execute the transactions, not on the multisig wallet itself. +Transferring the Subgraph involves sending a transaction through the bridge, and then executing another transaction on Arbitrum. The first transaction uses ETH on mainnet, and includes some ETH to pay for gas when the message is received on L2. However, if this gas is insufficient, you will have to retry the transaction and pay for the gas directly on L2 (this is "Step 3: Confirming the transfer" below). This step **must be executed within 7 days of starting the transfer**. Moreover, the second transaction ("Step 4: Finishing the transfer on L2") will be done directly on Arbitrum. For these reasons, you will need some ETH on an Arbitrum wallet. If you're using a multisig or smart contract account, the ETH will need to be in the regular (EOA) wallet that you are using to execute the transactions, not on the multisig wallet itself. You can buy ETH on some exchanges and withdraw it directly to Arbitrum, or you can use the Arbitrum bridge to send ETH from a mainnet wallet to L2: [bridge.arbitrum.io](http://bridge.arbitrum.io). Since gas fees on Arbitrum are lower, you should only need a small amount. It is recommended that you start at a low threshold (0.e.g. 01 ETH) for your transaction to be approved. -## Finding the subgraph Transfer Tool +## Finding the Subgraph Transfer Tool -You can find the L2 Transfer Tool when you're looking at your subgraph's page on Subgraph Studio: +You can find the L2 Transfer Tool when you're looking at your Subgraph's page on Subgraph Studio: ![transfer tool](/img/L2-transfer-tool1.png) -It is also available on Explorer if you're connected with the wallet that owns a subgraph and on that subgraph's page on Explorer: +It is also available on Explorer if you're connected with the wallet that owns a Subgraph and on that Subgraph's page on Explorer: ![Transferring to L2](/img/transferToL2.png) @@ -60,19 +60,19 @@ Clicking on the Transfer to L2 button will open the transfer tool where you can ## Step 1: Starting the transfer -Before starting the transfer, you must decide which address will own the subgraph on L2 (see "Choosing your L2 wallet" above), and it is strongly recommend having some ETH for gas already bridged on Arbitrum (see "Preparing for the transfer: bridging some ETH" above). +Before starting the transfer, you must decide which address will own the Subgraph on L2 (see "Choosing your L2 wallet" above), and it is strongly recommend having some ETH for gas already bridged on Arbitrum (see "Preparing for the transfer: bridging some ETH" above). -Also please note transferring the subgraph requires having a nonzero amount of signal on the subgraph with the same account that owns the subgraph; if you haven't signaled on the subgraph you will have to add a bit of curation (adding a small amount like 1 GRT would suffice). +Also please note transferring the Subgraph requires having a nonzero amount of signal on the Subgraph with the same account that owns the Subgraph; if you haven't signaled on the Subgraph you will have to add a bit of curation (adding a small amount like 1 GRT would suffice). -After opening the Transfer Tool, you will be able to input the L2 wallet address into the "Receiving wallet address" field - **make sure you've entered the correct address here**. Clicking on Transfer Subgraph will prompt you to execute the transaction on your wallet (note some ETH value is included to pay for L2 gas); this will initiate the transfer and deprecate your L1 subgraph (see "Understanding what happens with signal, your L1 subgraph and query URLs" above for more details on what goes on behind the scenes). +After opening the Transfer Tool, you will be able to input the L2 wallet address into the "Receiving wallet address" field - **make sure you've entered the correct address here**. Clicking on Transfer Subgraph will prompt you to execute the transaction on your wallet (note some ETH value is included to pay for L2 gas); this will initiate the transfer and deprecate your L1 Subgraph (see "Understanding what happens with signal, your L1 Subgraph and query URLs" above for more details on what goes on behind the scenes). -If you execute this step, **make sure you proceed until completing step 3 in less than 7 days, or the subgraph and your signal GRT will be lost.** This is due to how L1-L2 messaging works on Arbitrum: messages that are sent through the bridge are "retry-able tickets" that must be executed within 7 days, and the initial execution might need a retry if there are spikes in the gas price on Arbitrum. +If you execute this step, **make sure you proceed until completing step 3 in less than 7 days, or the Subgraph and your signal GRT will be lost.** This is due to how L1-L2 messaging works on Arbitrum: messages that are sent through the bridge are "retry-able tickets" that must be executed within 7 days, and the initial execution might need a retry if there are spikes in the gas price on Arbitrum. ![Start the transfer to L2](/img/startTransferL2.png) -## Step 2: Waiting for the subgraph to get to L2 +## Step 2: Waiting for the Subgraph to get to L2 -After you start the transfer, the message that sends your L1 subgraph to L2 must propagate through the Arbitrum bridge. This takes approximately 20 minutes (the bridge waits for the mainnet block containing the transaction to be "safe" from potential chain reorgs). +After you start the transfer, the message that sends your L1 Subgraph to L2 must propagate through the Arbitrum bridge. This takes approximately 20 minutes (the bridge waits for the mainnet block containing the transaction to be "safe" from potential chain reorgs). Once this wait time is over, Arbitrum will attempt to auto-execute the transfer on the L2 contracts. @@ -80,7 +80,7 @@ Once this wait time is over, Arbitrum will attempt to auto-execute the transfer ## Step 3: Confirming the transfer -In most cases, this step will auto-execute as the L2 gas included in step 1 should be sufficient to execute the transaction that receives the subgraph on the Arbitrum contracts. In some cases, however, it is possible that a spike in gas prices on Arbitrum causes this auto-execution to fail. In this case, the "ticket" that sends your subgraph to L2 will be pending and require a retry within 7 days. +In most cases, this step will auto-execute as the L2 gas included in step 1 should be sufficient to execute the transaction that receives the Subgraph on the Arbitrum contracts. In some cases, however, it is possible that a spike in gas prices on Arbitrum causes this auto-execution to fail. In this case, the "ticket" that sends your Subgraph to L2 will be pending and require a retry within 7 days. If this is the case, you will need to connect using an L2 wallet that has some ETH on Arbitrum, switch your wallet network to Arbitrum, and click on "Confirm Transfer" to retry the transaction. @@ -88,33 +88,33 @@ If this is the case, you will need to connect using an L2 wallet that has some E ## Step 4: Finishing the transfer on L2 -At this point, your subgraph and GRT have been received on Arbitrum, but the subgraph is not published yet. You will need to connect using the L2 wallet that you chose as the receiving wallet, switch your wallet network to Arbitrum, and click "Publish Subgraph." +At this point, your Subgraph and GRT have been received on Arbitrum, but the Subgraph is not published yet. You will need to connect using the L2 wallet that you chose as the receiving wallet, switch your wallet network to Arbitrum, and click "Publish Subgraph." -![Publish the subgraph](/img/publishSubgraphL2TransferTools.png) +![Publish the Subgraph](/img/publishSubgraphL2TransferTools.png) -![Wait for the subgraph to be published](/img/waitForSubgraphToPublishL2TransferTools.png) +![Wait for the Subgraph to be published](/img/waitForSubgraphToPublishL2TransferTools.png) -This will publish the subgraph so that Indexers that are operating on Arbitrum can start serving it. It will also mint curation signal using the GRT that were transferred from L1. +This will publish the Subgraph so that Indexers that are operating on Arbitrum can start serving it. It will also mint curation signal using the GRT that were transferred from L1. ## Step 5: Updating the query URL -Your subgraph has been successfully transferred to Arbitrum! To query the subgraph, the new URL will be : +Your Subgraph has been successfully transferred to Arbitrum! To query the Subgraph, the new URL will be : `https://arbitrum-gateway.thegraph.com/api/[api-key]/subgraphs/id/[l2-subgraph-id]` -Note that the subgraph ID on Arbitrum will be a different than the one you had on mainnet, but you can always find it on Explorer or Studio. As mentioned above (see "Understanding what happens with signal, your L1 subgraph and query URLs") the old L1 URL will be supported for a short while, but you should switch your queries to the new address as soon as the subgraph has been synced on L2. +Note that the Subgraph ID on Arbitrum will be a different than the one you had on mainnet, but you can always find it on Explorer or Studio. As mentioned above (see "Understanding what happens with signal, your L1 Subgraph and query URLs") the old L1 URL will be supported for a short while, but you should switch your queries to the new address as soon as the Subgraph has been synced on L2. ## How to transfer your curation to Arbitrum (L2) -## Understanding what happens to curation on subgraph transfers to L2 +## Understanding what happens to curation on Subgraph transfers to L2 -When the owner of a subgraph transfers a subgraph to Arbitrum, all of the subgraph's signal is converted to GRT at the same time. This applies to "auto-migrated" signal, i.e. signal that is not specific to a subgraph version or deployment but that follows the latest version of a subgraph. +When the owner of a Subgraph transfers a Subgraph to Arbitrum, all of the Subgraph's signal is converted to GRT at the same time. This applies to "auto-migrated" signal, i.e. signal that is not specific to a Subgraph version or deployment but that follows the latest version of a Subgraph. -This conversion from signal to GRT is the same as what would happen if the subgraph owner deprecated the subgraph in L1. When the subgraph is deprecated or transferred, all curation signal is "burned" simultaneously (using the curation bonding curve) and the resulting GRT is held by the GNS smart contract (that is the contract that handles subgraph upgrades and auto-migrated signal). Each Curator on that subgraph therefore has a claim to that GRT proportional to the amount of shares they had for the subgraph. +This conversion from signal to GRT is the same as what would happen if the Subgraph owner deprecated the Subgraph in L1. When the Subgraph is deprecated or transferred, all curation signal is "burned" simultaneously (using the curation bonding curve) and the resulting GRT is held by the GNS smart contract (that is the contract that handles Subgraph upgrades and auto-migrated signal). Each Curator on that Subgraph therefore has a claim to that GRT proportional to the amount of shares they had for the Subgraph. -A fraction of these GRT corresponding to the subgraph owner is sent to L2 together with the subgraph. +A fraction of these GRT corresponding to the Subgraph owner is sent to L2 together with the Subgraph. -At this point, the curated GRT will not accrue any more query fees, so Curators can choose to withdraw their GRT or transfer it to the same subgraph on L2, where it can be used to mint new curation signal. There is no rush to do this as the GRT can be help indefinitely and everybody gets an amount proportional to their shares, irrespective of when they do it. +At this point, the curated GRT will not accrue any more query fees, so Curators can choose to withdraw their GRT or transfer it to the same Subgraph on L2, where it can be used to mint new curation signal. There is no rush to do this as the GRT can be help indefinitely and everybody gets an amount proportional to their shares, irrespective of when they do it. ## Choosing your L2 wallet @@ -130,9 +130,9 @@ If you're using a smart contract wallet, like a multisig (e.g. a Safe), then cho Before starting the transfer, you must decide which address will own the curation on L2 (see "Choosing your L2 wallet" above), and it is recommended having some ETH for gas already bridged on Arbitrum in case you need to retry the execution of the message on L2. You can buy ETH on some exchanges and withdraw it directly to Arbitrum, or you can use the Arbitrum bridge to send ETH from a mainnet wallet to L2: [bridge.arbitrum.io](http://bridge.arbitrum.io) - since gas fees on Arbitrum are so low, you should only need a small amount, e.g. 0.01 ETH will probably be more than enough. -If a subgraph that you curate to has been transferred to L2, you will see a message on Explorer telling you that you're curating to a transferred subgraph. +If a Subgraph that you curate to has been transferred to L2, you will see a message on Explorer telling you that you're curating to a transferred Subgraph. -When looking at the subgraph page, you can choose to withdraw or transfer the curation. Clicking on "Transfer Signal to Arbitrum" will open the transfer tool. +When looking at the Subgraph page, you can choose to withdraw or transfer the curation. Clicking on "Transfer Signal to Arbitrum" will open the transfer tool. ![Transfer signal](/img/transferSignalL2TransferTools.png) @@ -162,4 +162,4 @@ If this is the case, you will need to connect using an L2 wallet that has some E ## Withdrawing your curation on L1 -If you prefer not to send your GRT to L2, or you'd rather bridge the GRT manually, you can withdraw your curated GRT on L1. On the banner on the subgraph page, choose "Withdraw Signal" and confirm the transaction; the GRT will be sent to your Curator address. +If you prefer not to send your GRT to L2, or you'd rather bridge the GRT manually, you can withdraw your curated GRT on L1. On the banner on the Subgraph page, choose "Withdraw Signal" and confirm the transaction; the GRT will be sent to your Curator address. From 057bc691664751a73f685f2296568a361897b26b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:14:00 -0500 Subject: [PATCH 0332/1789] New translations l2-transfer-tools-guide.mdx (Chinese Simplified) --- .../arbitrum/l2-transfer-tools-guide.mdx | 76 +++++++++---------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/website/src/pages/zh/archived/arbitrum/l2-transfer-tools-guide.mdx b/website/src/pages/zh/archived/arbitrum/l2-transfer-tools-guide.mdx index da4756a834dd..9a951655eed2 100644 --- a/website/src/pages/zh/archived/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/src/pages/zh/archived/arbitrum/l2-transfer-tools-guide.mdx @@ -6,53 +6,53 @@ The Graph has made it easy to move to L2 on Arbitrum One. For each protocol part Some frequent questions about these tools are answered in the [L2 Transfer Tools FAQ](/archived/arbitrum/l2-transfer-tools-faq/). The FAQs contain in-depth explanations of how to use the tools, how they work, and things to keep in mind when using them. -## 如何将你的子图转移到 Arbitrum (L2) +## How to transfer your Subgraph to Arbitrum (L2) -## 将子图转移到 Arbitrum 的好处 +## Benefits of transferring your Subgraphs 过去一年里,Graph社区和核心开发人员一直在为迁移到 Arbitrum [做准备](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) 。Arbitrum 是一种二层网络或“L2”区块链,继承了以太坊的安全性,但提供了大幅降低的燃气费用。 -当您将子图发布或升级到Graph网络时,您将与协议上的智能合约进行交互,这需要使用以太币(ETH)支付燃气费用。通过将您的子图迁移到Arbitrum,将来对您的子图进行的任何更新将需要更低的燃气费用。较低的费用以及L2网络上平滑的曲线,使其他策展人更容易在您的子图上进行策展,从而增加了在您的子图上的索引人的奖励。这种较低成本的环境还使得索引人更便宜地对您的子图进行索引和服务。在接下来的几个月里,Arbitrum上的索引奖励将增加,而以太坊主网上的索引奖励将减少,因此越来越多的索引器将会将他们的质押迁移到L2网络并在该网络上设置运营。 +When you publish or upgrade your Subgraph to The Graph Network, you're interacting with smart contracts on the protocol and this requires paying for gas using ETH. By moving your Subgraphs to Arbitrum, any future updates to your Subgraph will require much lower gas fees. The lower fees, and the fact that curation bonding curves on L2 are flat, also make it easier for other Curators to curate on your Subgraph, increasing the rewards for Indexers on your Subgraph. This lower-cost environment also makes it cheaper for Indexers to index and serve your Subgraph. Indexing rewards will be increasing on Arbitrum and decreasing on Ethereum mainnet over the coming months, so more and more Indexers will be transferring their stake and setting up their operations on L2. -## 理解信号、你的 L1 子图和查询 URL 的变化 +## Understanding what happens with signal, your L1 Subgraph and query URLs -将子图转移到 Arbitrum 使用了 Arbitrum GRT 跨链桥,该跨链桥又使用了本机的 Arbitrum 跨链桥,将子图发送到 L2。这个“转账”操作会废弃主网上的子图,并使用跨链桥将重建子图所需的信息发送到 L2。它还包括子图所有者的信号 GRT,跨链桥需要接受转账时,这些信号 GRT 必须大于零。 +Transferring a Subgraph to Arbitrum uses the Arbitrum GRT bridge, which in turn uses the native Arbitrum bridge, to send the Subgraph to L2. The "transfer" will deprecate the Subgraph on mainnet and send the information to re-create the Subgraph on L2 using the bridge. It will also include the Subgraph owner's signaled GRT, which must be more than zero for the bridge to accept the transfer. -当你选择转移子图时,这将把所有子图的策展信号转换为 GRT。这相当于在主网上“废弃”子图。与你的策展相对应的 GRT 将与子图一起发送到 L2,其中它们将被用于代表你铸造信号。 +When you choose to transfer the Subgraph, this will convert all of the Subgraph's curation signal to GRT. This is equivalent to "deprecating" the Subgraph on mainnet. The GRT corresponding to your curation will be sent to L2 together with the Subgraph, where they will be used to mint signal on your behalf. -其他策展人可以选择是否提取他们所占份额的 GRT,或者将其转移到 L2 上的同一子图上,以铸造新的策展信号。如果一个子图所有者不将他们的子图转移到 L2 并通过合约调用手动废弃它,那么策展人将收到通知并可以提取他们的策展。 +Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same Subgraph. If a Subgraph owner does not transfer their Subgraph to L2 and manually deprecates it via a contract call, then Curators will be notified and will be able to withdraw their curation. -一旦子图转移完成,由于所有策展都转换为 GRT,索引器将不再因索引子图而获得奖励。但是,有些索引器会保持对转移的子图进行 24 小时的服务,并立即开始在 L2 上进行子图索引。由于这些索引人已经对子图进行了索引,所以无需等待子图同步,几乎可以立即查询 L2 子图。 +As soon as the Subgraph is transferred, since all curation is converted to GRT, Indexers will no longer receive rewards for indexing the Subgraph. However, there will be Indexers that will 1) keep serving transferred Subgraphs for 24 hours, and 2) immediately start indexing the Subgraph on L2. Since these Indexers already have the Subgraph indexed, there should be no need to wait for the Subgraph to sync, and it will be possible to query the L2 Subgraph almost immediately. -对 L2 子图的查询需要使用不同的 URL(on `arbitrum-gateway.thegraph.com`),但 L1 URL 将继续工作至少 48 小时。之后,L1 网关将把查询转发到 L2 网关(一段时间内),但这会增加延迟,因此建议尽快将所有查询切换到新的 URL。 +Queries to the L2 Subgraph will need to be done to a different URL (on `arbitrum-gateway.thegraph.com`), but the L1 URL will continue working for at least 48 hours. After that, the L1 gateway will forward queries to the L2 gateway (for some time), but this will add latency so it is recommended to switch all your queries to the new URL as soon as possible. ## 选择你的 L2 钱包 -当你在主网上发布子图时,你使用一个连接的钱包创建了子图,这个钱包拥有代表这个子图的 NFT,并允许你发布升级。 +When you published your Subgraph on mainnet, you used a connected wallet to create the Subgraph, and this wallet owns the NFT that represents this Subgraph and allows you to publish updates. -当将子图转移到 Arbitrum 时,你可以选择一个不同的钱包在 L2 上持有这个子图 NFT 。 +When transferring the Subgraph to Arbitrum, you can choose a different wallet that will own this Subgraph NFT on L2. 如果你使用的是像 MetaMask 这样的“常规”钱包(外部拥有账户或 EOA,即不是智能合约钱包),那么这是可选的,建议保持与 L1 中相同的所有者地址。 -如果你使用的是智能合约钱包,比如多签钱包(例如 Safe),那么选择不同的 L2 钱包地址是必需的,因为这个账户很可能只存在于主网上,你将无法使用这个钱包在 Arbitrum 上进行交易。如果你想继续使用智能合约钱包或多签钱包,那么可以在 Arbitrum 上创建一个新的钱包,并使用其地址作为你的子图的 L2 所有者。 +If you're using a smart contract wallet, like a multisig (e.g. a Safe), then choosing a different L2 wallet address is mandatory, as it is most likely that this account only exists on mainnet and you will not be able to make transactions on Arbitrum using this wallet. If you want to keep using a smart contract wallet or multisig, create a new wallet on Arbitrum and use its address as the L2 owner of your Subgraph. -**使用你能控制并且能在 Arbitrum 上进行交易的钱包地址非常重要,否则子图将丢失且无法恢复。** +**It is very important to use a wallet address that you control, and that can make transactions on Arbitrum. Otherwise, the Subgraph will be lost and cannot be recovered.** ## 为转移做准备:转移一些 ETH -转移子图涉及通过跨链桥发送一个交易,然后在 Arbitrum 上执行另一个交易。第一个交易使用主网上的 ETH,并包含一些 ETH 用于接收 L2 上的消息时支付燃气费用。然而,如果这个燃气费用不足,你将不得不重试交易,并直接在 L2 上支付燃气费用(这是下面的“第 3 步:确认转移”)。这一步必须在开始转移后的 7 天内执行。此外,第二个交易(“第 4 步:在 L2 上完成转移”)将直接在 Arbitrum 上执行。因此,你需要在 Arbitrum 钱包中拥有一些 ETH。如果你使用的是多签或智能合约账户,则 ETH 必须在你用于执行交易的常规(EOA)钱包中,而不是多签钱包本身。 +Transferring the Subgraph involves sending a transaction through the bridge, and then executing another transaction on Arbitrum. The first transaction uses ETH on mainnet, and includes some ETH to pay for gas when the message is received on L2. However, if this gas is insufficient, you will have to retry the transaction and pay for the gas directly on L2 (this is "Step 3: Confirming the transfer" below). This step **must be executed within 7 days of starting the transfer**. Moreover, the second transaction ("Step 4: Finishing the transfer on L2") will be done directly on Arbitrum. For these reasons, you will need some ETH on an Arbitrum wallet. If you're using a multisig or smart contract account, the ETH will need to be in the regular (EOA) wallet that you are using to execute the transactions, not on the multisig wallet itself. 你可以在一些交易所购买 ETH,并直接将其提取到 Arbitrum,或者你可以使用 Arbitrum 跨链桥将 ETH 从主网钱包发送到 L2:[bridge.arbitrum.io](http://bridge.arbitrum.io)。由于 Arbitrum 上的燃气费用较低,你只需要一小笔资金即可。建议你设置一个较低的阈值(例如 0.01 ETH),以便你的交易得到批准。 -## 查找子图转移工具 +## Finding the Subgraph Transfer Tool -在 Subgraph Studio 查看你的子图页面时,你可以找到 L2 转移工具: +You can find the L2 Transfer Tool when you're looking at your Subgraph's page on Subgraph Studio: ![transfer tool](/img/L2-transfer-tool1.png) -如果你使用拥有子图的钱包连接到浏览器,你还可以在浏览器上的子图页面上找到它: +It is also available on Explorer if you're connected with the wallet that owns a Subgraph and on that Subgraph's page on Explorer: ![Transferring to L2](/img/transferToL2.png) @@ -60,19 +60,19 @@ Some frequent questions about these tools are answered in the [L2 Transfer Tools ## 第 1 步:开始转移 -在开始转移之前,你必须决定哪个地址将在 L2 上拥有这个子图(参见上面的“选择你的 L2 钱包”),并且强烈建议提前转移一些 ETH到 Arbitrum (参见上面的“为转移做准备:转移一些 ETH”) +Before starting the transfer, you must decide which address will own the Subgraph on L2 (see "Choosing your L2 wallet" above), and it is strongly recommend having some ETH for gas already bridged on Arbitrum (see "Preparing for the transfer: bridging some ETH" above). -另外,请注意转移子图需要在拥有与子图相同账户的非零信号 GRT 的情况下进行;如果你没有对子图发出信号,你将需要添加一点策展(添加少量,如 1 GRT 就足够)。 +Also please note transferring the Subgraph requires having a nonzero amount of signal on the Subgraph with the same account that owns the Subgraph; if you haven't signaled on the Subgraph you will have to add a bit of curation (adding a small amount like 1 GRT would suffice). -在打开转移工具后,你将能够在“接收钱包地址”字段中输入 L2 钱包地址-请确保你在这里输入的地址是正确的。点击 "Transfer Subgraph" 将提示你在钱包上执行交易(注意,其中包含一定数量的 ETH,用于支付 L2 燃气费用);这将启动转移并废弃你的 L1 子图(关于背后发生的详细信息,请参见上面的“理解信号、你的 L1 子图和查询 URL 的变化”)。 +After opening the Transfer Tool, you will be able to input the L2 wallet address into the "Receiving wallet address" field - **make sure you've entered the correct address here**. Clicking on Transfer Subgraph will prompt you to execute the transaction on your wallet (note some ETH value is included to pay for L2 gas); this will initiate the transfer and deprecate your L1 Subgraph (see "Understanding what happens with signal, your L1 Subgraph and query URLs" above for more details on what goes on behind the scenes). -如果你执行了此步骤,确保在 7 天内完成第 3 步,否则子图和你的信号 GRT 将会丢失。这是由于 L1-L2 消息在 Arbitrum 上的工作方式:通过跨链桥发送的消息是“可重试的票据”,必须在 7 天内执行。如果 Arbitrum 上的燃气价格飙升,初始执行可能需要重试。 +If you execute this step, **make sure you proceed until completing step 3 in less than 7 days, or the Subgraph and your signal GRT will be lost.** This is due to how L1-L2 messaging works on Arbitrum: messages that are sent through the bridge are "retry-able tickets" that must be executed within 7 days, and the initial execution might need a retry if there are spikes in the gas price on Arbitrum. ![Start the transfer to L2](/img/startTransferL2.png) -## 第 2 步:等待子图到达 L2 +## Step 2: Waiting for the Subgraph to get to L2 -在开始转移后,发送你的 L1 子图到 L2 的消息必须通过 Arbitrum 跨链桥传播。这大约需要 20 分钟(跨链桥会等待包含交易的主网区块在潜在的链重组方面是“安全”的)。 +After you start the transfer, the message that sends your L1 Subgraph to L2 must propagate through the Arbitrum bridge. This takes approximately 20 minutes (the bridge waits for the mainnet block containing the transaction to be "safe" from potential chain reorgs). 等待时间结束后,Arbitrum 将尝试自动在 L2 合约上执行转移。 @@ -80,7 +80,7 @@ Some frequent questions about these tools are answered in the [L2 Transfer Tools ## 第 3 步:确认转移 -在大多数情况下,这一步将自动执行,因为步骤 1 中包含的 L2 燃气应该足以执行在 Arbitrum 合约上接收子图的交易。但是,在某些情况下,Arbitrum 上的燃气价格激增可能导致此自动执行失败。在这种情况下,发送你的子图到 L2 的“票据”将处于挂起状态,并在 7 天内需要重试。 +In most cases, this step will auto-execute as the L2 gas included in step 1 should be sufficient to execute the transaction that receives the Subgraph on the Arbitrum contracts. In some cases, however, it is possible that a spike in gas prices on Arbitrum causes this auto-execution to fail. In this case, the "ticket" that sends your Subgraph to L2 will be pending and require a retry within 7 days. 如果是这种情况,你将需要使用在 Arbitrum 上有一些 ETH 的 L2 钱包进行连接,将你的钱包网络切换到 Arbitrum,并点击“Confirm Transfer”以重试交易。 @@ -88,33 +88,33 @@ Some frequent questions about these tools are answered in the [L2 Transfer Tools ## 第 4 步:在 L2 上完成转移 -此时,你的子图和 GRT 已在 Arbitrum 上接收,但子图尚未发布。你需要使用你选择的 L2 钱包连接,将钱包网络切换到 Arbitrum,并点击“发布子图”。 +At this point, your Subgraph and GRT have been received on Arbitrum, but the Subgraph is not published yet. You will need to connect using the L2 wallet that you chose as the receiving wallet, switch your wallet network to Arbitrum, and click "Publish Subgraph." -![Publish the subgraph](/img/publishSubgraphL2TransferTools.png) +![Publish the Subgraph](/img/publishSubgraphL2TransferTools.png) -![Wait for the subgraph to be published](/img/waitForSubgraphToPublishL2TransferTools.png) +![Wait for the Subgraph to be published](/img/waitForSubgraphToPublishL2TransferTools.png) -这将发布子图,使在 Arbitrum 上运行的索引人可以开始提供服务。它还将使用从 L1 转移过来的 GRT 铸造策展信号。 +This will publish the Subgraph so that Indexers that are operating on Arbitrum can start serving it. It will also mint curation signal using the GRT that were transferred from L1. ## 第 5 步:更新查询 URL -你的子图已成功转移到 Arbitrum!要查询子图,新的 URL 将为: +Your Subgraph has been successfully transferred to Arbitrum! To query the Subgraph, the new URL will be : `https://arbitrum-gateway.thegraph.com/api/[api-key]/subgraphs/id/[l2-subgraph-id]` -请注意,Arbitrum 上的子图 ID 将与你在主网上的 ID 不同,但你始终可以在 Explorer 或 Studio 上找到它。如上所述(参见“理解信号、你的 L1 子图和查询 URL 的变化”),旧的 L1 URL 将在短期内支持,但一旦子图在 L2 上同步,应尽快切换查询到新的地址。 +Note that the Subgraph ID on Arbitrum will be a different than the one you had on mainnet, but you can always find it on Explorer or Studio. As mentioned above (see "Understanding what happens with signal, your L1 Subgraph and query URLs") the old L1 URL will be supported for a short while, but you should switch your queries to the new address as soon as the Subgraph has been synced on L2. ## 如何将你的策展转移到 Arbitrum(L2) -## 理解子图转移到 L2 时的策展处理 +## Understanding what happens to curation on Subgraph transfers to L2 -当子图所有者将子图转移到 Arbitrum 时,所有子图的策展信号都会同时转换为 GRT。这适用于“自动迁移”的信号,即不特定于子图版本或部署的信号,而是遵循子图的最新版本。 +When the owner of a Subgraph transfers a Subgraph to Arbitrum, all of the Subgraph's signal is converted to GRT at the same time. This applies to "auto-migrated" signal, i.e. signal that is not specific to a Subgraph version or deployment but that follows the latest version of a Subgraph. -这种信号到 GRT 的转换与子图所有者在 L1 上废弃子图时会发生的情况相同。当子图被废弃或转移时,所有策展信号都将同时被“销毁”(使用策展绑定曲线),并且由 GNS 智能合约(负责子图升级和自动迁移信号的合约)持有产生的 GRT。因此,每个在子图上的策展人都对那些 GRT 有一个按份额比例的提取权。 +This conversion from signal to GRT is the same as what would happen if the Subgraph owner deprecated the Subgraph in L1. When the Subgraph is deprecated or transferred, all curation signal is "burned" simultaneously (using the curation bonding curve) and the resulting GRT is held by the GNS smart contract (that is the contract that handles Subgraph upgrades and auto-migrated signal). Each Curator on that Subgraph therefore has a claim to that GRT proportional to the amount of shares they had for the Subgraph. -其中一部分与子图所有者对应的 GRT 会与子图一起发送到 L2。 +A fraction of these GRT corresponding to the Subgraph owner is sent to L2 together with the Subgraph. -此时,策展的GRT将不再累积任何查询费用,因此,策展人可以选择撤回其GRT或将其转移到L2上的同一子图中,这样可以用于铸造新的策展信号。没有必要匆忙进行此操作,因为GRT可以无限期持有,每个人的份额比例无关紧要。 +At this point, the curated GRT will not accrue any more query fees, so Curators can choose to withdraw their GRT or transfer it to the same Subgraph on L2, where it can be used to mint new curation signal. There is no rush to do this as the GRT can be help indefinitely and everybody gets an amount proportional to their shares, irrespective of when they do it. ## 选择你的 L2 钱包 @@ -130,9 +130,9 @@ Some frequent questions about these tools are answered in the [L2 Transfer Tools 在开始转移之前,您必须决定哪个地址将在L2上拥有策展信号(参见上文的“选择您的L2钱包”),并建议您当需要在L2上重试消息执行时,提前转移一些用于手续费的 ETH到Arbitrum 上。您可以在某些交易所购买 ETH,并直接提款到 Arbitrum,或者您可以使用 Arbitrum 跨链桥将 ETH 从主网钱包发送到 L2: [bridge.arbitrum.io](http://bridge.arbitrum.io) - 由于 Arbitrum 上的燃料费用非常低,您可能只需要一小笔金额,例如 0.01 ETH 应该足够了。 -如果您策展的子图已经转移到L2,您将在浏览器上看到一条消息,告诉您正在为转移的子图进行策展。 +If a Subgraph that you curate to has been transferred to L2, you will see a message on Explorer telling you that you're curating to a transferred Subgraph. -在查看子图页面时,您可以选择撤回或转移策展。点击“将信号转移到 Arbitrum”将打开转移工具。 +When looking at the Subgraph page, you can choose to withdraw or transfer the curation. Clicking on "Transfer Signal to Arbitrum" will open the transfer tool. ![Transfer signal](/img/transferSignalL2TransferTools.png) @@ -162,4 +162,4 @@ Some frequent questions about these tools are answered in the [L2 Transfer Tools ## 从L1上撤回您的策展 -如果您不希望将您的GRT发送到L2,或者您更愿意手动桥接GRT,您可以在L1上撤回您的策展GRT。在子图页面的横幅上,选择“撤回信号”并确认交易;GRT将被发送到您的策展者地址。 +If you prefer not to send your GRT to L2, or you'd rather bridge the GRT manually, you can withdraw your curated GRT on L1. On the banner on the Subgraph page, choose "Withdraw Signal" and confirm the transaction; the GRT will be sent to your Curator address. From f182dbabf7162d531d76304b3d9e73ba40d29b20 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:14:01 -0500 Subject: [PATCH 0333/1789] New translations l2-transfer-tools-guide.mdx (Urdu (Pakistan)) --- .../arbitrum/l2-transfer-tools-guide.mdx | 76 +++++++++---------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/website/src/pages/ur/archived/arbitrum/l2-transfer-tools-guide.mdx b/website/src/pages/ur/archived/arbitrum/l2-transfer-tools-guide.mdx index 2099dcb22749..4684fb754f05 100644 --- a/website/src/pages/ur/archived/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/src/pages/ur/archived/arbitrum/l2-transfer-tools-guide.mdx @@ -6,53 +6,53 @@ title: L2 ٹرانسفر ٹولز گائڈ Some frequent questions about these tools are answered in the [L2 Transfer Tools FAQ](/archived/arbitrum/l2-transfer-tools-faq/). The FAQs contain in-depth explanations of how to use the tools, how they work, and things to keep in mind when using them. -## اپنے سب گراف کو Arbitrum (L2) میں کیسے منتقل کریں +## How to transfer your Subgraph to Arbitrum (L2) -## اپنے سب گرافس منتقل کرنے کے فوائد +## Benefits of transferring your Subgraphs گراف کی کمیونٹی اور بنیادی ڈویلپرز پچھلے سال سے Arbitrum پر جانے کے [تیار کر رہے ہیں](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305)۔ Arbitrum، ایک لیئر 2 یا "L2" بلاکچین، ایتھیریم سے سیکورٹی وراثت میں ملتی ہے لیکن گیس کی فیس بہت کم فراہم کرتی ہے. -جب آپ گراف نیٹ ورک پر اپنا سب گراف شائع یا اپ گریڈ کرتے ہیں، آپ پروٹوکول پر سمارٹ کنٹریکٹ کے ساتھ تعامل کر رہے ہوتے ہیں اور اس کے لیے ایتھیریم کا استعمال کرتے ہوئے گیس کی ادائیگی کی ضرورت ہوتی ہے۔ اپنا سب گراف Arbitrum پر منتقل کر کے، آپ کے سب گراف کی آئندہ کسی بھی اپ ڈیٹ کے لیے گیس کی بہت کم فیس درکار ہو گی۔ کم فیس، اور حقیقت یہ ہے کہ L2 پر کیوریشن بانڈنگ منحنی خطوط فلیٹ ہیں، کیوریٹرز کے لیے آپ کے سب گراف پر کیوریٹ کرنا آسان بناتے ہیں، جس سے آپ کے سب گراف پر انڈیکسرز کے لیے انعامات بڑھ جاتے ہیں۔ یہ کم لاگت والا ماحول بھی انڈیکسرز کے لیے آپ کے سب گراف کو انڈیکس کرنا اور پیش کرنا سستا بناتا ہے۔ Arbitrum پر انڈیکسنگ کے انعامات بڑھیں گے اور آنے والے مہینوں میں ایتھیریم مین نیٹ پر کم ہوں گے، اس لیے زیادہ سے زیادہ انڈیکسرز اپنے حصص کو منتقل کر رہے ہوں گے اور L2 پر اپنی کارروائیاں ترتیب دیں گے. +When you publish or upgrade your Subgraph to The Graph Network, you're interacting with smart contracts on the protocol and this requires paying for gas using ETH. By moving your Subgraphs to Arbitrum, any future updates to your Subgraph will require much lower gas fees. The lower fees, and the fact that curation bonding curves on L2 are flat, also make it easier for other Curators to curate on your Subgraph, increasing the rewards for Indexers on your Subgraph. This lower-cost environment also makes it cheaper for Indexers to index and serve your Subgraph. Indexing rewards will be increasing on Arbitrum and decreasing on Ethereum mainnet over the coming months, so more and more Indexers will be transferring their stake and setting up their operations on L2. -## یہ سمجھنا کہ سگنل کے ساتھ کیا ہوتا ہے، آپ کا L1 سب گراف اور کیوری URLs +## Understanding what happens with signal, your L1 Subgraph and query URLs -سب گراف کو Arbitrum پر منتقل کرنا Arbitrum GRT بریج کا استعمال کرتا ہے، جو بدلے میں مقامی Arbitrum بریج استعمال کرتا ہے، سب گراف کو L2 پر بھیجنے کے لیے۔ "منتقلی" مین نیٹ پر سب گراف کو فرسودہ کر دے گی اور بریج کا استعمال کرتے ہوئے L2 پر سب گراف کو دوبارہ بنانے کے لیے معلومات بھیجے گی۔ اس میں سب گراف کے مالک کا سگنل شدہ GRT بھی شامل ہوگا، جو بریج کے لیے منتقلی کو قبول کرنے کے لیے صفر سے زیادہ ہونا چاہیے. +Transferring a Subgraph to Arbitrum uses the Arbitrum GRT bridge, which in turn uses the native Arbitrum bridge, to send the Subgraph to L2. The "transfer" will deprecate the Subgraph on mainnet and send the information to re-create the Subgraph on L2 using the bridge. It will also include the Subgraph owner's signaled GRT, which must be more than zero for the bridge to accept the transfer. -جب آپ سب گراف کو منتقل کرنے کا انتخاب کرتے ہیں، یہ تمام سب گراف کے کیوریشن سگنلز کو GRT میں تبدیل کر دے گا۔ یہ مین نیٹ پر سب گراف کو "فرسودہ" کرنے کے مترادف ہے۔ آپ کے کیوریشن کے مطابق GRT سب گراف کے ساتھ L2 کو بھیجا جائے گا، جہاں ان کا استعمال آپ کی جانب سے سگنل دینے کے لیے کیا جائے گا. +When you choose to transfer the Subgraph, this will convert all of the Subgraph's curation signal to GRT. This is equivalent to "deprecating" the Subgraph on mainnet. The GRT corresponding to your curation will be sent to L2 together with the Subgraph, where they will be used to mint signal on your behalf. -دوسرے کیوریٹرز یہ انتخاب کر سکتے ہیں کہ آیا اپنے GRT کا حصہ لینا ہے، یا اسی سب گراف پر اسے L2 پر منٹ سگنل پر منتقل کرنا ہے۔ اگر ایک سب گراف کا مالک اپنا سب گراف L2 میں منتقل نہیں کرتا ہے اور اسے کنٹریکٹ کال کے ذریعے دستی طور پر فرسودہ کرتا ہے، تو کیوریٹرز کو مطلع کیا جائے گا اور وہ اپنا کیوریشن واپس لے سکیں گے. +Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same Subgraph. If a Subgraph owner does not transfer their Subgraph to L2 and manually deprecates it via a contract call, then Curators will be notified and will be able to withdraw their curation. -جیسے ہی سب گراف منتقل ہو جائے گا، چونکہ تمام کیوریشن GRT میں تبدیل ہو چکی ہے، انڈیکسر کو سب گراف کو انڈیکس کرنے کے لیے مزید انعامات نہیں ملیں گے۔ البتہ، ایسے انڈیکسرز ہوں گے جو 1) منتقل ہونے والے سب گرافس کو 24 گھنٹوں تک پیش کرتے رہیں گے، اور 2) فوری طور پر L2 پر سب گراف انڈیکسنگ شروع کر دیں گے۔ چونکہ ان کے انڈیکسرز کے پاس پہلے سے ہی انڈیکسڈ سب گراف موجود ہے، اس لیے سب گراف کے مطابقت پذیر ہونے کا انتظار کرنے کی ضرورت نہیں ہے، اور L2 سب گراف سے تقریبآٓ فورآٓ کیوری کرنا ممکن ہو گا. +As soon as the Subgraph is transferred, since all curation is converted to GRT, Indexers will no longer receive rewards for indexing the Subgraph. However, there will be Indexers that will 1) keep serving transferred Subgraphs for 24 hours, and 2) immediately start indexing the Subgraph on L2. Since these Indexers already have the Subgraph indexed, there should be no need to wait for the Subgraph to sync, and it will be possible to query the L2 Subgraph almost immediately. -L2 سب گراف سے متعلق کیوریز ایک مختلف لنک پر کرنے کی ضرورت ہوگی (`arbitrum-gateway.thegraph.com` پر)، لیکن L1 لنک کم از کم 48 گھنٹے تک کام کرتا رہے گا۔ اس کے بعد، L1 گیٹ وے کیوریز کو L2 گیٹ وے (کچھ وقت کے لیے) پر بھیجے گا، لیکن اس سے تاخیر میں اضافہ ہو جائے گا، اس لیے یہ تجویز کیا جاتا ہے کہ آپ اپنے تمام کیوریز کو جلد از جلد نئے لنک میں تبدیل کر دیں۔ +Queries to the L2 Subgraph will need to be done to a different URL (on `arbitrum-gateway.thegraph.com`), but the L1 URL will continue working for at least 48 hours. After that, the L1 gateway will forward queries to the L2 gateway (for some time), but this will add latency so it is recommended to switch all your queries to the new URL as soon as possible. ## اپنا L2 والیٹ منتخب کرنا -کب آپ اپنا سب گراف مین نیٹ پر شائع کرتے ہیں، آپ نے اپنا سب گراف بنانے کے لیے کنیکٹڈ والیٹ کا استعمال کیا، اور یہ والیٹ NFT کا مالک ہے جو اس سب گراف کی نمائندگی کرتا ہے اور آپ کو اپ ڈیٹس شائع کرنے کی اجازت دیتا ہے۔ +When you published your Subgraph on mainnet, you used a connected wallet to create the Subgraph, and this wallet owns the NFT that represents this Subgraph and allows you to publish updates. -جب سب گراف کو Arbitrum پر منتقل کر رہے ہوں، آپ مختلف والیٹ استعمال کر سکتے ہیں جو L2 پر اس سب گراف NFT کا مالک ہو گا۔ +When transferring the Subgraph to Arbitrum, you can choose a different wallet that will own this Subgraph NFT on L2. اگر آپ میٹا ماسک کی طرح ایک عام والیٹ استعمال کر رہے ہیں (ایک بیرونی ملکیتی اکاؤنٹ یا EOA، یعنی ایک والیٹ جو سمارٹ کنٹریکٹ نہیں ہے)، تو یہ اختیاری ہے اور یہ نصیحت کی جاتی ہے کہ مالک کا وہی ایڈریس رکھا جائے جو L1 میں ہے۔ -اگر آپ سمارٹ کنٹریکٹ والیٹ کا استعمال کر رہے ہیں، جیسے کہ ملٹی سگ (مثال کے طور پر ایک تجوری)، پھر ایک مختلف والیٹ ایڈریس کا استعمال کرنا ضروری ہے، کیونکہ یہ زیادہ امکان ہیں کہ اکاؤنٹ صرف مین نیٹ پر ہو گا اور آپ Arbitrum پر اس والیٹ کا استعمال کرتے ہوئے ٹرانزیکشنز نہیں کر پائیں گے۔ اگر آپ سمارٹ کنٹریکٹ والیٹ یا ملٹی سگ کا استعمال جاری رکھنا چاہتے ہیں، Arbitrum پر نیا والیٹ بنائیں اور اس کا ایڈریس اپنے سب گراف کا L2 مالک ہونے کی حیثیت سے استعمال کریں۔ +If you're using a smart contract wallet, like a multisig (e.g. a Safe), then choosing a different L2 wallet address is mandatory, as it is most likely that this account only exists on mainnet and you will not be able to make transactions on Arbitrum using this wallet. If you want to keep using a smart contract wallet or multisig, create a new wallet on Arbitrum and use its address as the L2 owner of your Subgraph. -**یہ بہت ضروری ہے کہ آپ جو والیٹ ایڈریس استعمال کریں اس کا کنٹرول آپ کے پاس ہو، اور جو Arbitrum پر ٹرانزیکشنز کر سکے۔ ورنہ، سب گراف کھو جائے گا اور بازیافت نہیں ہو پائے گا۔** +**It is very important to use a wallet address that you control, and that can make transactions on Arbitrum. Otherwise, the Subgraph will be lost and cannot be recovered.** ## منتقلی کی تیاری: کچھ ایتھیریم بریج کرنا -سب گراف کی منتقلی میں بریج کے ذریعے ٹرانزیکشن بھیجنا شامل ہے، اور پھر Arbitrum پر ایک اور ٹرانزیکشن کو انجام دینا۔ پہلی ٹرانزیکشن مین نیٹ پر ایتھیریم کا استعمال کرتی ہے، اور L2 پر پیغام موصول ہونے پر گیس کی ادائیگی کے لیے کچھ ایتھیریم شامل کرتا ہے۔ تاہم، اگر یہ گیس ناکافی ہے، تو آپ کو ٹرانزیکشن کی دوبارہ کوشش کرنی ہوگی اور براہ راست L2 پر گیس کی ادائیگی کرنی ہوگی (یہ ذیل میں "مرحلہ 3: منتقلی کی تصدیق" ہے)۔ یہ مرحلہ **منتقلی شروع کرنے کے 7 دنوں کے اندر انجام دیا جانا چاہیے**۔ مزید یہ کہ، دوسری ٹرانزیکشن ("مرحلہ 4: L2 پر منتقلی کو ختم کرنا") براہ راست Arbitrum پر کیا جائے گا۔ ان وجوہات کی بناء پر، آپ کو Arbitrum والیٹ پر کچھ ایتھیریم کی ضرورت ہوگی۔ اگر آپ ملٹی سگ یا سمارٹ کنٹریکٹ اکاؤنٹ استعمال کر رہے ہیں، تو ایتھیریم کو باقاعدہ (EOA) والیٹ میں ہونا چاہیے جسے آپ ٹرانزیکشن کو انجام دینے کے لیے استعمال کر رہے ہیں، نہ کہ ملٹی سگ والیٹ پر۔ +Transferring the Subgraph involves sending a transaction through the bridge, and then executing another transaction on Arbitrum. The first transaction uses ETH on mainnet, and includes some ETH to pay for gas when the message is received on L2. However, if this gas is insufficient, you will have to retry the transaction and pay for the gas directly on L2 (this is "Step 3: Confirming the transfer" below). This step **must be executed within 7 days of starting the transfer**. Moreover, the second transaction ("Step 4: Finishing the transfer on L2") will be done directly on Arbitrum. For these reasons, you will need some ETH on an Arbitrum wallet. If you're using a multisig or smart contract account, the ETH will need to be in the regular (EOA) wallet that you are using to execute the transactions, not on the multisig wallet itself. آپ کچھ ایکسچینجیز سے ایتھیریم خرید سکتے ہیں اور سیدھا اسے Arbitrum میں مگوا سکتے ہیں، یا آپ ایتھیریم کو مین نیٹ والیٹ سے L2 پر Arbitrum بریج کا استعمال کرتے ہوئے کر سکتے ہیں: [bridge.arbitrum.io](http://bridge.arbitrum.io)۔ چونکہ Arbitrum پر گیس فیس کم ہوتے ہے، آپ کو صرف چھوٹی سی مقدار کی ضرورت پڑے گی۔ یہ تجویز کیا جاتا ہے کہ آپ اپنی ٹرانزیکشن کی منظوری کے لیے کم حد (مثال کے طور پر 0.01 ایتھیریم) سے شروع کریں۔ -## سب گراف ٹرانسفر ٹول تلاش کرنا +## Finding the Subgraph Transfer Tool -آپ L2 ٹرانسفر ٹول تلاش کر سکتے ہیں جب آپ سب گراف سٹوڈیو پر اپنا سب گراف کا پیج دیکھ رہے ہوں گے۔ +You can find the L2 Transfer Tool when you're looking at your Subgraph's page on Subgraph Studio: ![ٹرانسفر ٹول](/img/L2-transfer-tool1.png) -یہ ایکسپلورر پر بھی دستیاب ہے اگر آپ اس والیٹ سے کنیکٹڈ ہیں جس کے پاس سب گراف ہے اور ایکسپلورر پر اس سب گراف کے پیج پر: +It is also available on Explorer if you're connected with the wallet that owns a Subgraph and on that Subgraph's page on Explorer: ![L2 پر منتقل کرنا](/img/transferToL2.png) @@ -60,19 +60,19 @@ L2 پر منتقل کرنے کے بٹن پر کلک کرنے سے ٹرانسفر ## مرحلہ 1: منتقلی شروع کرنا -منتقلی شروع کرنے سے پہلے، آپ کو یہ فیصلہ کرنا ہو گا کہ L2 پر کون سا ایڈریس سب گراف کا مالک ہو گا (اوپر "اپنے L2 والیٹ کا انتخاب" دیکھیں)، اور یہ پرزور مشورہ دیا جاتا ہے کہ Arbitrum پر پہلے سے ہی گیس کے لیے کچھ ایتھیریم رکھیں (دیکھیں "منتقلی کی تیاری: کچھ ایتھیریم بریج کرنا" اوپر)۔ +Before starting the transfer, you must decide which address will own the Subgraph on L2 (see "Choosing your L2 wallet" above), and it is strongly recommend having some ETH for gas already bridged on Arbitrum (see "Preparing for the transfer: bridging some ETH" above). -یہ بھی نوٹ کریں کہ سب گراف کی منتقلی کے لیے سب گراف پر اسی اکاؤنٹ کے ساتھ سگنل کی غیر صفر مقدار کی ضرورت ہوتی ہے جس کے پاس سب گراف ہے۔ اگر آپ نے سب گراف پر اشارہ نہیں کیا ہے تو آپ کو تھوڑا سا کیوریشن شامل کرنا پڑے گا (ایک چھوٹی سی رقم جیسے ایک GRT شامل کرنا کافی ہوگا)۔ +Also please note transferring the Subgraph requires having a nonzero amount of signal on the Subgraph with the same account that owns the Subgraph; if you haven't signaled on the Subgraph you will have to add a bit of curation (adding a small amount like 1 GRT would suffice). -ٹرانسفر ٹول کھولنے کے بعد، آپ L2 والیٹ ایڈریس کو "ریسیونگ والیٹ ایڈریس" فیلڈ میں داخل کرنے کے قابل ہو جائیں گے- ** یقینی بنائیں کہ آپ نے یہاں درست ایڈریس لکھا ہے**۔ ٹرانسفر سب گراف پر کلک کرنے سے آپ کو اپنے والیٹ پر ٹرانزیکشن کرنے کا اشارہ ملے گا (نوٹ کریں کہ L2 گیس کی ادائیگی کے لیے کچھ ایتھیریم ویلیو شامل ہے)؛ یہ منتقلی کا آغاز کرے گا اور آپ کے L1 سب گراف کو فرسودہ کر دے گا (پردے کے پیچھے کیا ہو رہا ہے اس کے بارے میں مزید تفصیلات کے لیے اوپر دیکھیں "سگنل کے ساتھ کیا ہوتا ہے، آپ کا L1 سب گراف اور کیوری لنکس" دیکھیں)۔ +After opening the Transfer Tool, you will be able to input the L2 wallet address into the "Receiving wallet address" field - **make sure you've entered the correct address here**. Clicking on Transfer Subgraph will prompt you to execute the transaction on your wallet (note some ETH value is included to pay for L2 gas); this will initiate the transfer and deprecate your L1 Subgraph (see "Understanding what happens with signal, your L1 Subgraph and query URLs" above for more details on what goes on behind the scenes). -اگر آپ اس قدم پر عمل کرتے ہیں، تو **یقینی بنائیں کہ آپ 7 دنوں سے بھی کم وقت میں مرحلہ 3 مکمل کرنے تک آگے بڑھیں، ورنہ سب گراف اور آپ کا سگنل GRT ضائع ہو جائے گا۔** یہ اس وجہ سے ہے کہ L1-L2 پیغام رسانی Arbitrum پر کیسے کام کرتی ہے: پیغامات جو بریج کے ذریعے بھیجے گئے "دوبارہ کوشش کے قابل ٹکٹ" ہیں جن پر عمل درآمد 7 دنوں کے اندر ہونا ضروری ہے، اور اگر Arbitrum پر گیس کی قیمت میں اضافہ ہوتا ہے تو ابتدائی عمل درآمد کے لیے دوبارہ کوشش کی ضرورت پڑ سکتی ہے۔ +If you execute this step, **make sure you proceed until completing step 3 in less than 7 days, or the Subgraph and your signal GRT will be lost.** This is due to how L1-L2 messaging works on Arbitrum: messages that are sent through the bridge are "retry-able tickets" that must be executed within 7 days, and the initial execution might need a retry if there are spikes in the gas price on Arbitrum. ![Start the transfer to L2](/img/startTransferL2.png) -## مرحلہ 2: سب گراف کے L2 تک پہنچنے کا انتظار کرنا +## Step 2: Waiting for the Subgraph to get to L2 -منتقلی شروع کرنے بعد، وہ پیغام جو آپ کا L1 سب گراف L2 کو بھیجتا ہے اسے Arbitrum بریج کے ذریعے پھیلانا چاہیے۔ اس میں تقریبآٓ 20 منٹ لگتے ہیں (بریج مین نیٹ بلاک کا انتظار کرتا ہے جس میں ٹرانزیکشن کو ممکنہ چین کی بحالی سے "محفوظ" رکھا جاتا ہے)۔ +After you start the transfer, the message that sends your L1 Subgraph to L2 must propagate through the Arbitrum bridge. This takes approximately 20 minutes (the bridge waits for the mainnet block containing the transaction to be "safe" from potential chain reorgs). انتظار کا وقت ختم ہونے کے بعد، Arbitrum L2 کنٹریکٹس پر منتقلی کو خودکار طریقے سے انجام دینے کی کوشش کرے گا۔ @@ -80,7 +80,7 @@ L2 پر منتقل کرنے کے بٹن پر کلک کرنے سے ٹرانسفر ## مرحلہ 3: منتقلی کی تصدیق کرنا -زیادہ تر معاملات میں، یہ مرحلہ خود بخود عمل میں آجائے گا کیونکہ مرحلہ 1 میں شامل L2 گیس اس ٹرانزیکشن کو انجام دینے کے لیے کافی ہونی چاہیے جو Arbitrum کنٹریکٹس پر سب گراف وصول کرتی ہے۔ تاہم، بعض صورتوں میں، یہ ممکن ہے کہ Arbitrum پر گیس کی قیمتوں میں اضافہ اس خود کار طریقے سے عمل کو ناکام بنادے۔ اس صورت میں، "ٹکٹ" جو آپ کے سب گراف کو L2 پر بھیجتا ہے زیر التواء رہے گا اور اسے 7 دنوں کے اندر دوبارہ کوشش کرنے کی ضرورت ہوگی۔ +In most cases, this step will auto-execute as the L2 gas included in step 1 should be sufficient to execute the transaction that receives the Subgraph on the Arbitrum contracts. In some cases, however, it is possible that a spike in gas prices on Arbitrum causes this auto-execution to fail. In this case, the "ticket" that sends your Subgraph to L2 will be pending and require a retry within 7 days. اس صورت میں، آپ کو L2 والیٹ کنیکٹ کرنے کی ضرورت پڑے گی جس میں Arbitrum میں تھوڑا ایتھیریم موجود ہو، اپنے والیٹ نیٹ ورک کو Arbitrum میں سویچ کریں، اور "کنفرم ٹرانسفر" کو ٹرانزیکشن دہرانے کے لیے دبائیں. @@ -88,33 +88,33 @@ L2 پر منتقل کرنے کے بٹن پر کلک کرنے سے ٹرانسفر ## مرحلہ 4: L2 پر منتقلی ختم کریں -اس موقع پر، آپ کا سب گراف اور GRT آپ کے Arbitrum میں موصول ہو چکے ہیں، لیکن سب گراف ابھی تک شائع نہیں ہوا۔ آپ کو L2 والیٹ کا استعمال کرتے ہوئے منسلک کرنے کی ضرورت ہو گی جسے آپ نے وصول کرنے والے والیٹ کے طور پر منتخب کیا ہے، اپنے والیٹ نیٹ ورک کو Arbitrum میں سویچ کریں ، اور "سب گراف شائع کریں" پر کلک کریں۔ +At this point, your Subgraph and GRT have been received on Arbitrum, but the Subgraph is not published yet. You will need to connect using the L2 wallet that you chose as the receiving wallet, switch your wallet network to Arbitrum, and click "Publish Subgraph." -![سب گراف شائع کریں](/img/publishSubgraphL2TransferTools.png) +![Publish the Subgraph](/img/publishSubgraphL2TransferTools.png) -![سب گراف کے شائع ہونے کا انتظار کریں](/img/waitForSubgraphToPublishL2TransferTools.png) +![Wait for the Subgraph to be published](/img/waitForSubgraphToPublishL2TransferTools.png) -یہ سب گراف کو شائع کرے گا تا کہ انڈیکسرز جو Arbitrum پر کام کر رہے ہیں اسے پیش کرنا شروع کر سکیں۔ یہ GRT کا استعمال کرتے ہوئے کیوریشن سگنل بھی دے گا جو L1 سے منتقل کیا گیا ہے. +This will publish the Subgraph so that Indexers that are operating on Arbitrum can start serving it. It will also mint curation signal using the GRT that were transferred from L1. ## مرحلہ 5: کیوری لنک اپ ڈیٹ کریں -آپ کا سب گراف کامیابی کے ساتھ Arbitrum پر منتقل کر دیا گیا ہے! سب گراف کو کیوری کرنے کے لیے، نیا لنگ ہو گا: +Your Subgraph has been successfully transferred to Arbitrum! To query the Subgraph, the new URL will be : `https://arbitrum-gateway.thegraph.com/api/[api-key]/subgraphs/id/[l2-subgraph-id]` -نوٹ کریں کہ Arbitrum پر سب گراف ID آپ کے مین نیٹ پر موجود ایک سے مختلف ہوگی، لیکن آپ اسے ہمیشہ ایکسپلورر یا سٹوڈیو پر تلاش کر سکتے ہیں۔ جیسا کہ اوپر بتایا گیا ہے (دیکھیں "سگنل کے ساتھ کیا ہوتا ہے، آپ کے L1 سب گراف اور کیوری والے لنکس") پرانا L1 لنک تھوڑی دیر کے لیے سپورٹ کیا جائے گا، لیکن آپ کو اپنی کیوریز کو نئے ایڈریس پر تبدیل کر دینا چاہیے جیسے ہی سب گراف کی مطابقت پذیری L2 پر ہو جائے گی. +Note that the Subgraph ID on Arbitrum will be a different than the one you had on mainnet, but you can always find it on Explorer or Studio. As mentioned above (see "Understanding what happens with signal, your L1 Subgraph and query URLs") the old L1 URL will be supported for a short while, but you should switch your queries to the new address as soon as the Subgraph has been synced on L2. ## اپنی کیوریشن کو کیسے Arbitrum (L2) پر منتقل کیا جائے -## یہ سمجھنا کہ L2 میں سب گراف کی منتقلی پر کیوریشن کا کیا ہوتا ہے +## Understanding what happens to curation on Subgraph transfers to L2 -جب سب گراف کا مالک سب گراف کو Arbitrum پر منتقل کرتا ہے، سب گراف کے تمام سگنلز اسی وقت GRT میں تبدیل ہو جاتے ہیں۔ یہ "آٹو مائیگریٹڈ" پر لاگو ہوتا ہے، یعنی وہ سگنل جو سب گراف ورزن یا تعیناتی کے لیے مخصوص نہیں ہے لیکن یہ سب گراف کے تازہ ترین ورزن کی پیروی کرتا ہے۔ +When the owner of a Subgraph transfers a Subgraph to Arbitrum, all of the Subgraph's signal is converted to GRT at the same time. This applies to "auto-migrated" signal, i.e. signal that is not specific to a Subgraph version or deployment but that follows the latest version of a Subgraph. -سگنل سے GRT میں یہ تبدیلی وہی ہے جیسا کہ اگر سب گراف کے مالک نے L1 میں سب گراف کو فرسودہ کیا تو کیا ہوگا۔ جب سب گراف کو فرسودہ یا منتقل کیا جاتا ہے، تو تمام کیوریشن سگنل بیک وقت "برن" ہو جاتے ہیں (کیوریشن بانڈنگ کریو کا استعمال کرتے ہوئے) اور نتیجے میں GRT سمارٹ کنٹریکٹ GNS کے پاس ہوتا ہے (یہ وہ کنٹریکٹ ہے جو سب گراف اپ گریڈ اور خودکار منتقلی سگنل کو ہینڈل کرتا ہے)۔ اس لیے اس سب گراف پر ہر کیوریٹر کا دعویٰ ہے کہ وہ سب گراف کے حصص کی مقدار کے متناسب GRT پر ہے۔ +This conversion from signal to GRT is the same as what would happen if the Subgraph owner deprecated the Subgraph in L1. When the Subgraph is deprecated or transferred, all curation signal is "burned" simultaneously (using the curation bonding curve) and the resulting GRT is held by the GNS smart contract (that is the contract that handles Subgraph upgrades and auto-migrated signal). Each Curator on that Subgraph therefore has a claim to that GRT proportional to the amount of shares they had for the Subgraph. -سب گراف کے مالک کے مطابق ان GRT کا ایک حصہ سب گراف کے ساتھ L2 کو بھیجا جاتا ہے۔ +A fraction of these GRT corresponding to the Subgraph owner is sent to L2 together with the Subgraph. -اس مقام پر، کیویرٹڈ GRT مزید کیوری کی فیس جمع نہیں کرے گا، لہذا کیوریٹرز اپنا GRT واپس لینے یا اسے L2 پر اسی سب گراف میں منتقل کرنے کا انتخاب کر سکتے ہیں، جہاں اسے نئے کیویریشن سگنل کے لیے استعمال کیا جا سکتا یے۔ ایسا کرنے میں کوئی جلدی نہیں ہے کیونکہ GRT غیر معینہ مدت کے لیے مدد کی جا سکتی ہے اور ہر کسی کو اس کے حصص کے متناسب رقم ملتی ہے، چاہے وہ ایسا کرتے ہی کیوں نہ ہوں۔ +At this point, the curated GRT will not accrue any more query fees, so Curators can choose to withdraw their GRT or transfer it to the same Subgraph on L2, where it can be used to mint new curation signal. There is no rush to do this as the GRT can be help indefinitely and everybody gets an amount proportional to their shares, irrespective of when they do it. ## اپنا L2 والیٹ منتخب کرنا @@ -130,9 +130,9 @@ L2 پر منتقل کرنے کے بٹن پر کلک کرنے سے ٹرانسفر منتقلی شروع کرنے سے پہلے، آپ کو یہ فیصلہ کرنا ہوگا کہ L2 پر کیوریشن کا کون سا ایڈریس ہوگا (اوپر "اپنے L2 والیٹ کا انتخاب" دیکھیں)، اور یہ تجویز کی جاتی ہے کہ اگر آپ کو L2 پر پیغام کے نفاذ کی دوبارہ کوشش کرنے کی ضرورت ہو تو Arbitrum پر پہلے سے ہی بریج شدہ گیس کے لیے کچھ ایتھیریم رکھیں۔ آپ کچھ ایکسچینجز پر ایتھیریم خرید سکتے ہیں اور اسے براہ راست Arbitrum میں واپس لے سکتے ہیں، یا آپ ایتھیریم کو مین نیٹ والیٹ سے L2 پر بھیجنے کے لیے Arbitrum بریج کا استعمال کر سکتے ہیں: [bridge.arbitrum.io](http://bridge.arbitrum.io) - چونکہ Arbitrum پر گیس کی فیس بہت کم ہیں، آپ کو صرف تھوڑی سی رقم کی ضرورت ہوگی، جیسے۔ 0.01 ایتھیریم شاید کافی سے زیادہ ہو گا۔ -اگر جو سب گراف آپ کیویرٹ کر رہے ہیں L2 پر منتقل ہو گیا ہے، آپ ایکسپلورر پر ایک میسج دیکھیں گے جو بتا رہا ہو گا کہ آپ منتقل ہوئے سب گراف پر کیوریٹ کر رہے ہیں۔ +If a Subgraph that you curate to has been transferred to L2, you will see a message on Explorer telling you that you're curating to a transferred Subgraph. -سب گراف پیج پر دیکھتے ہوئے، آپ کیوریشن واپس لینے یا منتقل کرنے کا انتخاب کر سکتے ہیں۔ "Arbitrum پر سگنل منتقل کریں" پر کلک کرنے سے ٹرانسفر ٹول کھل جائے گا۔ +When looking at the Subgraph page, you can choose to withdraw or transfer the curation. Clicking on "Transfer Signal to Arbitrum" will open the transfer tool. ![ٹرانسفر سگنل](/img/transferSignalL2TransferTools.png) @@ -162,4 +162,4 @@ L2 پر منتقل کرنے کے بٹن پر کلک کرنے سے ٹرانسفر ## L1 پر اپنی کیوریشن واپس لینا -اگر آپ اپنے GRT کو L2 پر نہیں بھیجنا پسند کرتے ہیں، یا آپ GRT کو دستی طور پر بریج کرنا چاہتے ہیں، تو آپ L1 پر اپنا کیوریٹ شدہ GRT واپس لے سکتے ہیں۔ سب گراف کے پیج پر بینر پر، "سگنل واپس لیں" کا انتخاب کریں اور ٹرانزیکشن کی تصدیق کریں۔ GRT آپ کے کیوریٹر کے ایڈریس پر بھیج دیا جائے گا۔ +If you prefer not to send your GRT to L2, or you'd rather bridge the GRT manually, you can withdraw your curated GRT on L1. On the banner on the Subgraph page, choose "Withdraw Signal" and confirm the transaction; the GRT will be sent to your Curator address. From c61d4406d86517a672460ef246815cc8194652cf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:14:02 -0500 Subject: [PATCH 0334/1789] New translations l2-transfer-tools-guide.mdx (Vietnamese) --- .../arbitrum/l2-transfer-tools-guide.mdx | 76 +++++++++---------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/website/src/pages/vi/archived/arbitrum/l2-transfer-tools-guide.mdx b/website/src/pages/vi/archived/arbitrum/l2-transfer-tools-guide.mdx index 78ec8c82a911..e0b5aa2214fa 100644 --- a/website/src/pages/vi/archived/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/src/pages/vi/archived/arbitrum/l2-transfer-tools-guide.mdx @@ -6,53 +6,53 @@ The Graph has made it easy to move to L2 on Arbitrum One. For each protocol part Some frequent questions about these tools are answered in the [L2 Transfer Tools FAQ](/archived/arbitrum/l2-transfer-tools-faq/). The FAQs contain in-depth explanations of how to use the tools, how they work, and things to keep in mind when using them. -## How to transfer your subgraph to Arbitrum (L2) +## How to transfer your Subgraph to Arbitrum (L2) -## Benefits of transferring your subgraphs +## Benefits of transferring your Subgraphs The Graph's community and core devs have [been preparing](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) to move to Arbitrum over the past year. Arbitrum, a layer 2 or "L2" blockchain, inherits the security from Ethereum but provides drastically lower gas fees. -When you publish or upgrade your subgraph to The Graph Network, you're interacting with smart contracts on the protocol and this requires paying for gas using ETH. By moving your subgraphs to Arbitrum, any future updates to your subgraph will require much lower gas fees. The lower fees, and the fact that curation bonding curves on L2 are flat, also make it easier for other Curators to curate on your subgraph, increasing the rewards for Indexers on your subgraph. This lower-cost environment also makes it cheaper for Indexers to index and serve your subgraph. Indexing rewards will be increasing on Arbitrum and decreasing on Ethereum mainnet over the coming months, so more and more Indexers will be transferring their stake and setting up their operations on L2. +When you publish or upgrade your Subgraph to The Graph Network, you're interacting with smart contracts on the protocol and this requires paying for gas using ETH. By moving your Subgraphs to Arbitrum, any future updates to your Subgraph will require much lower gas fees. The lower fees, and the fact that curation bonding curves on L2 are flat, also make it easier for other Curators to curate on your Subgraph, increasing the rewards for Indexers on your Subgraph. This lower-cost environment also makes it cheaper for Indexers to index and serve your Subgraph. Indexing rewards will be increasing on Arbitrum and decreasing on Ethereum mainnet over the coming months, so more and more Indexers will be transferring their stake and setting up their operations on L2. -## Understanding what happens with signal, your L1 subgraph and query URLs +## Understanding what happens with signal, your L1 Subgraph and query URLs -Transferring a subgraph to Arbitrum uses the Arbitrum GRT bridge, which in turn uses the native Arbitrum bridge, to send the subgraph to L2. The "transfer" will deprecate the subgraph on mainnet and send the information to re-create the subgraph on L2 using the bridge. It will also include the subgraph owner's signaled GRT, which must be more than zero for the bridge to accept the transfer. +Transferring a Subgraph to Arbitrum uses the Arbitrum GRT bridge, which in turn uses the native Arbitrum bridge, to send the Subgraph to L2. The "transfer" will deprecate the Subgraph on mainnet and send the information to re-create the Subgraph on L2 using the bridge. It will also include the Subgraph owner's signaled GRT, which must be more than zero for the bridge to accept the transfer. -When you choose to transfer the subgraph, this will convert all of the subgraph's curation signal to GRT. This is equivalent to "deprecating" the subgraph on mainnet. The GRT corresponding to your curation will be sent to L2 together with the subgraph, where they will be used to mint signal on your behalf. +When you choose to transfer the Subgraph, this will convert all of the Subgraph's curation signal to GRT. This is equivalent to "deprecating" the Subgraph on mainnet. The GRT corresponding to your curation will be sent to L2 together with the Subgraph, where they will be used to mint signal on your behalf. -Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same subgraph. If a subgraph owner does not transfer their subgraph to L2 and manually deprecates it via a contract call, then Curators will be notified and will be able to withdraw their curation. +Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same Subgraph. If a Subgraph owner does not transfer their Subgraph to L2 and manually deprecates it via a contract call, then Curators will be notified and will be able to withdraw their curation. -As soon as the subgraph is transferred, since all curation is converted to GRT, Indexers will no longer receive rewards for indexing the subgraph. However, there will be Indexers that will 1) keep serving transferred subgraphs for 24 hours, and 2) immediately start indexing the subgraph on L2. Since these Indexers already have the subgraph indexed, there should be no need to wait for the subgraph to sync, and it will be possible to query the L2 subgraph almost immediately. +As soon as the Subgraph is transferred, since all curation is converted to GRT, Indexers will no longer receive rewards for indexing the Subgraph. However, there will be Indexers that will 1) keep serving transferred Subgraphs for 24 hours, and 2) immediately start indexing the Subgraph on L2. Since these Indexers already have the Subgraph indexed, there should be no need to wait for the Subgraph to sync, and it will be possible to query the L2 Subgraph almost immediately. -Queries to the L2 subgraph will need to be done to a different URL (on `arbitrum-gateway.thegraph.com`), but the L1 URL will continue working for at least 48 hours. After that, the L1 gateway will forward queries to the L2 gateway (for some time), but this will add latency so it is recommended to switch all your queries to the new URL as soon as possible. +Queries to the L2 Subgraph will need to be done to a different URL (on `arbitrum-gateway.thegraph.com`), but the L1 URL will continue working for at least 48 hours. After that, the L1 gateway will forward queries to the L2 gateway (for some time), but this will add latency so it is recommended to switch all your queries to the new URL as soon as possible. ## Chọn ví L2 của bạn -When you published your subgraph on mainnet, you used a connected wallet to create the subgraph, and this wallet owns the NFT that represents this subgraph and allows you to publish updates. +When you published your Subgraph on mainnet, you used a connected wallet to create the Subgraph, and this wallet owns the NFT that represents this Subgraph and allows you to publish updates. -When transferring the subgraph to Arbitrum, you can choose a different wallet that will own this subgraph NFT on L2. +When transferring the Subgraph to Arbitrum, you can choose a different wallet that will own this Subgraph NFT on L2. Nếu bạn đang sử dụng ví "thông thường" như MetaMask (Tài khoản thuộc sở hữu bên ngoài hoặc EOA, tức là ví không phải là hợp đồng thông minh), thì đây là tùy chọn và bạn nên giữ cùng địa chỉ chủ sở hữu như trong L1. -If you're using a smart contract wallet, like a multisig (e.g. a Safe), then choosing a different L2 wallet address is mandatory, as it is most likely that this account only exists on mainnet and you will not be able to make transactions on Arbitrum using this wallet. If you want to keep using a smart contract wallet or multisig, create a new wallet on Arbitrum and use its address as the L2 owner of your subgraph. +If you're using a smart contract wallet, like a multisig (e.g. a Safe), then choosing a different L2 wallet address is mandatory, as it is most likely that this account only exists on mainnet and you will not be able to make transactions on Arbitrum using this wallet. If you want to keep using a smart contract wallet or multisig, create a new wallet on Arbitrum and use its address as the L2 owner of your Subgraph. -**It is very important to use a wallet address that you control, and that can make transactions on Arbitrum. Otherwise, the subgraph will be lost and cannot be recovered.** +**It is very important to use a wallet address that you control, and that can make transactions on Arbitrum. Otherwise, the Subgraph will be lost and cannot be recovered.** ## Preparing for the transfer: bridging some ETH -Transferring the subgraph involves sending a transaction through the bridge, and then executing another transaction on Arbitrum. The first transaction uses ETH on mainnet, and includes some ETH to pay for gas when the message is received on L2. However, if this gas is insufficient, you will have to retry the transaction and pay for the gas directly on L2 (this is "Step 3: Confirming the transfer" below). This step **must be executed within 7 days of starting the transfer**. Moreover, the second transaction ("Step 4: Finishing the transfer on L2") will be done directly on Arbitrum. For these reasons, you will need some ETH on an Arbitrum wallet. If you're using a multisig or smart contract account, the ETH will need to be in the regular (EOA) wallet that you are using to execute the transactions, not on the multisig wallet itself. +Transferring the Subgraph involves sending a transaction through the bridge, and then executing another transaction on Arbitrum. The first transaction uses ETH on mainnet, and includes some ETH to pay for gas when the message is received on L2. However, if this gas is insufficient, you will have to retry the transaction and pay for the gas directly on L2 (this is "Step 3: Confirming the transfer" below). This step **must be executed within 7 days of starting the transfer**. Moreover, the second transaction ("Step 4: Finishing the transfer on L2") will be done directly on Arbitrum. For these reasons, you will need some ETH on an Arbitrum wallet. If you're using a multisig or smart contract account, the ETH will need to be in the regular (EOA) wallet that you are using to execute the transactions, not on the multisig wallet itself. You can buy ETH on some exchanges and withdraw it directly to Arbitrum, or you can use the Arbitrum bridge to send ETH from a mainnet wallet to L2: [bridge.arbitrum.io](http://bridge.arbitrum.io). Since gas fees on Arbitrum are lower, you should only need a small amount. It is recommended that you start at a low threshold (0.e.g. 01 ETH) for your transaction to be approved. -## Finding the subgraph Transfer Tool +## Finding the Subgraph Transfer Tool -You can find the L2 Transfer Tool when you're looking at your subgraph's page on Subgraph Studio: +You can find the L2 Transfer Tool when you're looking at your Subgraph's page on Subgraph Studio: ![công cụ chuyển](/img/L2-transfer-tool1.png) -It is also available on Explorer if you're connected with the wallet that owns a subgraph and on that subgraph's page on Explorer: +It is also available on Explorer if you're connected with the wallet that owns a Subgraph and on that Subgraph's page on Explorer: ![Chuyển sang L2](/img/transferToL2.png) @@ -60,19 +60,19 @@ Nhấp vào nút Chuyển sang L2 sẽ mở công cụ chuyển nơi bạn có t ## Step 1: Starting the transfer -Before starting the transfer, you must decide which address will own the subgraph on L2 (see "Choosing your L2 wallet" above), and it is strongly recommend having some ETH for gas already bridged on Arbitrum (see "Preparing for the transfer: bridging some ETH" above). +Before starting the transfer, you must decide which address will own the Subgraph on L2 (see "Choosing your L2 wallet" above), and it is strongly recommend having some ETH for gas already bridged on Arbitrum (see "Preparing for the transfer: bridging some ETH" above). -Also please note transferring the subgraph requires having a nonzero amount of signal on the subgraph with the same account that owns the subgraph; if you haven't signaled on the subgraph you will have to add a bit of curation (adding a small amount like 1 GRT would suffice). +Also please note transferring the Subgraph requires having a nonzero amount of signal on the Subgraph with the same account that owns the Subgraph; if you haven't signaled on the Subgraph you will have to add a bit of curation (adding a small amount like 1 GRT would suffice). -After opening the Transfer Tool, you will be able to input the L2 wallet address into the "Receiving wallet address" field - **make sure you've entered the correct address here**. Clicking on Transfer Subgraph will prompt you to execute the transaction on your wallet (note some ETH value is included to pay for L2 gas); this will initiate the transfer and deprecate your L1 subgraph (see "Understanding what happens with signal, your L1 subgraph and query URLs" above for more details on what goes on behind the scenes). +After opening the Transfer Tool, you will be able to input the L2 wallet address into the "Receiving wallet address" field - **make sure you've entered the correct address here**. Clicking on Transfer Subgraph will prompt you to execute the transaction on your wallet (note some ETH value is included to pay for L2 gas); this will initiate the transfer and deprecate your L1 Subgraph (see "Understanding what happens with signal, your L1 Subgraph and query URLs" above for more details on what goes on behind the scenes). -If you execute this step, **make sure you proceed until completing step 3 in less than 7 days, or the subgraph and your signal GRT will be lost.** This is due to how L1-L2 messaging works on Arbitrum: messages that are sent through the bridge are "retry-able tickets" that must be executed within 7 days, and the initial execution might need a retry if there are spikes in the gas price on Arbitrum. +If you execute this step, **make sure you proceed until completing step 3 in less than 7 days, or the Subgraph and your signal GRT will be lost.** This is due to how L1-L2 messaging works on Arbitrum: messages that are sent through the bridge are "retry-able tickets" that must be executed within 7 days, and the initial execution might need a retry if there are spikes in the gas price on Arbitrum. ![Start the transfer to L2](/img/startTransferL2.png) -## Step 2: Waiting for the subgraph to get to L2 +## Step 2: Waiting for the Subgraph to get to L2 -After you start the transfer, the message that sends your L1 subgraph to L2 must propagate through the Arbitrum bridge. This takes approximately 20 minutes (the bridge waits for the mainnet block containing the transaction to be "safe" from potential chain reorgs). +After you start the transfer, the message that sends your L1 Subgraph to L2 must propagate through the Arbitrum bridge. This takes approximately 20 minutes (the bridge waits for the mainnet block containing the transaction to be "safe" from potential chain reorgs). Once this wait time is over, Arbitrum will attempt to auto-execute the transfer on the L2 contracts. @@ -80,7 +80,7 @@ Once this wait time is over, Arbitrum will attempt to auto-execute the transfer ## Step 3: Confirming the transfer -In most cases, this step will auto-execute as the L2 gas included in step 1 should be sufficient to execute the transaction that receives the subgraph on the Arbitrum contracts. In some cases, however, it is possible that a spike in gas prices on Arbitrum causes this auto-execution to fail. In this case, the "ticket" that sends your subgraph to L2 will be pending and require a retry within 7 days. +In most cases, this step will auto-execute as the L2 gas included in step 1 should be sufficient to execute the transaction that receives the Subgraph on the Arbitrum contracts. In some cases, however, it is possible that a spike in gas prices on Arbitrum causes this auto-execution to fail. In this case, the "ticket" that sends your Subgraph to L2 will be pending and require a retry within 7 days. If this is the case, you will need to connect using an L2 wallet that has some ETH on Arbitrum, switch your wallet network to Arbitrum, and click on "Confirm Transfer" to retry the transaction. @@ -88,33 +88,33 @@ If this is the case, you will need to connect using an L2 wallet that has some E ## Step 4: Finishing the transfer on L2 -At this point, your subgraph and GRT have been received on Arbitrum, but the subgraph is not published yet. You will need to connect using the L2 wallet that you chose as the receiving wallet, switch your wallet network to Arbitrum, and click "Publish Subgraph." +At this point, your Subgraph and GRT have been received on Arbitrum, but the Subgraph is not published yet. You will need to connect using the L2 wallet that you chose as the receiving wallet, switch your wallet network to Arbitrum, and click "Publish Subgraph." -![Publish the subgraph](/img/publishSubgraphL2TransferTools.png) +![Publish the Subgraph](/img/publishSubgraphL2TransferTools.png) -![Wait for the subgraph to be published](/img/waitForSubgraphToPublishL2TransferTools.png) +![Wait for the Subgraph to be published](/img/waitForSubgraphToPublishL2TransferTools.png) -This will publish the subgraph so that Indexers that are operating on Arbitrum can start serving it. It will also mint curation signal using the GRT that were transferred from L1. +This will publish the Subgraph so that Indexers that are operating on Arbitrum can start serving it. It will also mint curation signal using the GRT that were transferred from L1. ## Bước 5: Cập nhật URL truy vấn -Your subgraph has been successfully transferred to Arbitrum! To query the subgraph, the new URL will be : +Your Subgraph has been successfully transferred to Arbitrum! To query the Subgraph, the new URL will be : `https://arbitrum-gateway.thegraph.com/api/[api-key]/subgraphs/id/[l2-subgraph-id]` -Note that the subgraph ID on Arbitrum will be a different than the one you had on mainnet, but you can always find it on Explorer or Studio. As mentioned above (see "Understanding what happens with signal, your L1 subgraph and query URLs") the old L1 URL will be supported for a short while, but you should switch your queries to the new address as soon as the subgraph has been synced on L2. +Note that the Subgraph ID on Arbitrum will be a different than the one you had on mainnet, but you can always find it on Explorer or Studio. As mentioned above (see "Understanding what happens with signal, your L1 Subgraph and query URLs") the old L1 URL will be supported for a short while, but you should switch your queries to the new address as soon as the Subgraph has been synced on L2. ## How to transfer your curation to Arbitrum (L2) -## Understanding what happens to curation on subgraph transfers to L2 +## Understanding what happens to curation on Subgraph transfers to L2 -When the owner of a subgraph transfers a subgraph to Arbitrum, all of the subgraph's signal is converted to GRT at the same time. This applies to "auto-migrated" signal, i.e. signal that is not specific to a subgraph version or deployment but that follows the latest version of a subgraph. +When the owner of a Subgraph transfers a Subgraph to Arbitrum, all of the Subgraph's signal is converted to GRT at the same time. This applies to "auto-migrated" signal, i.e. signal that is not specific to a Subgraph version or deployment but that follows the latest version of a Subgraph. -This conversion from signal to GRT is the same as what would happen if the subgraph owner deprecated the subgraph in L1. When the subgraph is deprecated or transferred, all curation signal is "burned" simultaneously (using the curation bonding curve) and the resulting GRT is held by the GNS smart contract (that is the contract that handles subgraph upgrades and auto-migrated signal). Each Curator on that subgraph therefore has a claim to that GRT proportional to the amount of shares they had for the subgraph. +This conversion from signal to GRT is the same as what would happen if the Subgraph owner deprecated the Subgraph in L1. When the Subgraph is deprecated or transferred, all curation signal is "burned" simultaneously (using the curation bonding curve) and the resulting GRT is held by the GNS smart contract (that is the contract that handles Subgraph upgrades and auto-migrated signal). Each Curator on that Subgraph therefore has a claim to that GRT proportional to the amount of shares they had for the Subgraph. -A fraction of these GRT corresponding to the subgraph owner is sent to L2 together with the subgraph. +A fraction of these GRT corresponding to the Subgraph owner is sent to L2 together with the Subgraph. -At this point, the curated GRT will not accrue any more query fees, so Curators can choose to withdraw their GRT or transfer it to the same subgraph on L2, where it can be used to mint new curation signal. There is no rush to do this as the GRT can be help indefinitely and everybody gets an amount proportional to their shares, irrespective of when they do it. +At this point, the curated GRT will not accrue any more query fees, so Curators can choose to withdraw their GRT or transfer it to the same Subgraph on L2, where it can be used to mint new curation signal. There is no rush to do this as the GRT can be help indefinitely and everybody gets an amount proportional to their shares, irrespective of when they do it. ## Chọn ví L2 của bạn @@ -130,9 +130,9 @@ Nếu bạn đang sử dụng ví hợp đồng thông minh, chẳng hạn như Before starting the transfer, you must decide which address will own the curation on L2 (see "Choosing your L2 wallet" above), and it is recommended having some ETH for gas already bridged on Arbitrum in case you need to retry the execution of the message on L2. You can buy ETH on some exchanges and withdraw it directly to Arbitrum, or you can use the Arbitrum bridge to send ETH from a mainnet wallet to L2: [bridge.arbitrum.io](http://bridge.arbitrum.io) - since gas fees on Arbitrum are so low, you should only need a small amount, e.g. 0.01 ETH will probably be more than enough. -If a subgraph that you curate to has been transferred to L2, you will see a message on Explorer telling you that you're curating to a transferred subgraph. +If a Subgraph that you curate to has been transferred to L2, you will see a message on Explorer telling you that you're curating to a transferred Subgraph. -When looking at the subgraph page, you can choose to withdraw or transfer the curation. Clicking on "Transfer Signal to Arbitrum" will open the transfer tool. +When looking at the Subgraph page, you can choose to withdraw or transfer the curation. Clicking on "Transfer Signal to Arbitrum" will open the transfer tool. ![Transfer signal](/img/transferSignalL2TransferTools.png) @@ -162,4 +162,4 @@ If this is the case, you will need to connect using an L2 wallet that has some E ## Withdrawing your curation on L1 -If you prefer not to send your GRT to L2, or you'd rather bridge the GRT manually, you can withdraw your curated GRT on L1. On the banner on the subgraph page, choose "Withdraw Signal" and confirm the transaction; the GRT will be sent to your Curator address. +If you prefer not to send your GRT to L2, or you'd rather bridge the GRT manually, you can withdraw your curated GRT on L1. On the banner on the Subgraph page, choose "Withdraw Signal" and confirm the transaction; the GRT will be sent to your Curator address. From 4b1ba9658744790ce9359a604c9e457ddd8eca3f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:14:03 -0500 Subject: [PATCH 0335/1789] New translations l2-transfer-tools-guide.mdx (Marathi) --- .../arbitrum/l2-transfer-tools-guide.mdx | 76 +++++++++---------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/website/src/pages/mr/archived/arbitrum/l2-transfer-tools-guide.mdx b/website/src/pages/mr/archived/arbitrum/l2-transfer-tools-guide.mdx index cb0215fe9cd0..32e1b7fc75f3 100644 --- a/website/src/pages/mr/archived/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/src/pages/mr/archived/arbitrum/l2-transfer-tools-guide.mdx @@ -6,53 +6,53 @@ title: L2 Transfer Tools Guide Some frequent questions about these tools are answered in the [L2 Transfer Tools FAQ](/archived/arbitrum/l2-transfer-tools-faq/). The FAQs contain in-depth explanations of how to use the tools, how they work, and things to keep in mind when using them. -## तुमचा सबग्राफ आर्बिट्रम (L2) वर कसा हस्तांतरित करायचा +## How to transfer your Subgraph to Arbitrum (L2) -## तुमचे सबग्राफ हस्तांतरित करण्याचे फायदे +## Benefits of transferring your Subgraphs मागील वर्षापासून, The Graph चे समुदाय आणि मुख्य डेव्हलपर [been preparing](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305)करीत होते त्याच्या गोष्टीसाठी आर्बिट्रमवर जाण्याची. आर्बिट्रम, एक श्रेणी 2 किंवा "L2" ब्लॉकचेन, ईथेरियमकिडून सुरक्षा अनुभवतो परंतु काही लोअर गॅस फी प्रदान करतो. -जेव्हा तुम्ही आपल्या सबग्राफला The Graph Network वर प्रकाशित किंवा अपग्रेड करता तेव्हा, तुम्ही प्रोटोकॉलवरच्या स्मार्ट कॉन्ट्रॅक्ट्ससोबत संवाद साधता आहात आणि हे ईथ वापरून गॅससाठी पैसे देता येतात. आर्बिट्रमवर तुमच्या सबग्राफला हल्लीक अपडेट्सची आवश्यकता असल्यामुळे आपल्याला खूप कमी गॅस फी परतण्यात आलेली आहे. या कमी फीस, आणि लोअर करण्याची बंद पट आर्बिट्रमवर असल्याचे, तुमच्या सबग्राफवर इतर क्युरेटरसाठी सुविधा असताना तुमच्या सबग्राफवर कुणासही क्युरेशन करणे सोपे होते, आणि तुमच्या सबग्राफवर इंडेक्सरसाठी पुरस्कारांची वाढ होतील. या किमतीसवर्गीय वातावरणात इंडेक्सरसाठी सबग्राफला सूचीबद्ध करणे आणि सेव करणे सोपे होते. आर्बिट्रमवर इंडेक्सिंग पुरस्कारे आणि ईथेरियम मेननेटवर किमतीची वाढ होणारी आहेत, आणि यामुळे अगदी अधिक इंडेक्सरस त्याची स्थानिकता हस्तांतरित करत आहेत आणि त्यांचे ऑपरेशन्स L2 वर स्थापित करत आहेत.". +When you publish or upgrade your Subgraph to The Graph Network, you're interacting with smart contracts on the protocol and this requires paying for gas using ETH. By moving your Subgraphs to Arbitrum, any future updates to your Subgraph will require much lower gas fees. The lower fees, and the fact that curation bonding curves on L2 are flat, also make it easier for other Curators to curate on your Subgraph, increasing the rewards for Indexers on your Subgraph. This lower-cost environment also makes it cheaper for Indexers to index and serve your Subgraph. Indexing rewards will be increasing on Arbitrum and decreasing on Ethereum mainnet over the coming months, so more and more Indexers will be transferring their stake and setting up their operations on L2. -## सिग्नल, तुमचा L1 सबग्राफ आणि क्वेरी URL सह काय होते हे समजून घेणे +## Understanding what happens with signal, your L1 Subgraph and query URLs -सबग्राफला आर्बिट्रमवर हस्तांतरित करण्यासाठी, आर्बिट्रम GRT सेतूक वापरला जातो, ज्याच्या परत आर्बिट्रमच्या मूळ सेतूकाचा वापर केला जातो, सबग्राफला L2 वर पाठवण्यासाठी. "हस्तांतरण" मुख्यनेटवर सबग्राफची वैल्यू कमी करणारा आहे आणि सेतूकाच्या ब्रिजच्या माध्यमातून लॉकल 2 वर सबग्राफ पुन्हा तयार करण्याची माहिती पाठवण्यात आली आहे. त्यामुळे हा "हस्तांतरण" मुख्यनेटवरील सबग्राफला अस्तित्वातून टाकेल आणि त्याची माहिती ब्रिजवार L2 वर पुन्हा तयार करण्यात आली आहे. हस्तांतरणात सबग्राफ मालकाची संकेतित GRT समाविष्ट केली आहे, ज्याची उपसंकेतित GRT मूळ सेतूकाच्या ब्रिजकडून हस्तांतरित करण्यासाठी जास्तीत जास्त शून्यापेक्षा असणे आवश्यक आहे. +Transferring a Subgraph to Arbitrum uses the Arbitrum GRT bridge, which in turn uses the native Arbitrum bridge, to send the Subgraph to L2. The "transfer" will deprecate the Subgraph on mainnet and send the information to re-create the Subgraph on L2 using the bridge. It will also include the Subgraph owner's signaled GRT, which must be more than zero for the bridge to accept the transfer. -जेव्हा तुम्ही सबग्राफला हस्तांतरित करण्याची निवड करता, हे सबग्राफचे सर्व क्युरेशन सिग्नल GRT मध्ये रूपांतरित होईल. ह्याचे मुख्यनेटवर "अप्रामाणिक" घेण्याच्या अर्थाने आहे. तुमच्या क्युरेशनसह संबंधित GRT सबग्राफसह पाठवली जाईल, त्यामुळे त्यांचा L2 वर पाठवला जाईल, त्यातून त्यांचा नमूद कुंडला तयार केला जाईल. +When you choose to transfer the Subgraph, this will convert all of the Subgraph's curation signal to GRT. This is equivalent to "deprecating" the Subgraph on mainnet. The GRT corresponding to your curation will be sent to L2 together with the Subgraph, where they will be used to mint signal on your behalf. -इतर क्युरेटरस स्वत: त्यांच्या भागाचा GRT परत घेण्याची किंवा त्याच्या एकल सबग्राफवर त्यांच्या सिग्नल तयार करण्यासाठी हस्तांतरित करण्याची पर्वानगी देऊ शकतात. जर सबग्राफ मालक त्याच्या सबग्राफला L2 वर हस्तांतरित करत नसता आणि त्याच्या कॉन्ट्रॅक्ट कॉलद्वारे मौना करतो, तर क्युरेटरसला सूचना दिली जाईल आणि त्यांना आपल्याच्या क्युरेशनची परवानगी वापरून परत घेतली जाईल. +Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same Subgraph. If a Subgraph owner does not transfer their Subgraph to L2 and manually deprecates it via a contract call, then Curators will be notified and will be able to withdraw their curation. -सबग्राफ हस्तांतरित केल्यानंतर, क्युरेशन सर्व GRT मध्ये रूपांतरित केल्यामुळे इंडेक्सरसला सबग्राफच्या इंडेक्सिंगसाठी पुरस्कार मिळवत नाही. परंतु, 24 तासांसाठी हस्तांतरित केलेल्या सबग्राफवर सेवा देणारे इंडेक्सर असतील आणि 2) L2 वर सबग्राफची इंडेक्सिंग प्रारंभ करतील. ह्या इंडेक्सरसांच्या पासून आधीपासूनच सबग्राफची इंडेक्सिंग आहे, म्हणून सबग्राफ सिंक होण्याची वाटचाल नसल्याची आवश्यकता नसून, आणि L2 सबग्राफची क्वेरी करण्यासाठी त्याच्यासाठी वाटचाल नसेल. +As soon as the Subgraph is transferred, since all curation is converted to GRT, Indexers will no longer receive rewards for indexing the Subgraph. However, there will be Indexers that will 1) keep serving transferred Subgraphs for 24 hours, and 2) immediately start indexing the Subgraph on L2. Since these Indexers already have the Subgraph indexed, there should be no need to wait for the Subgraph to sync, and it will be possible to query the L2 Subgraph almost immediately. -L2 सबग्राफला क्वेरीसाठी वेगवेगळे URL वापरण्याची आवश्यकता आहे ('arbitrum-gateway.thegraph.com' वरील), परंतु L1 URL किमान 48 तासांसाठी काम करणार आहे. त्यानंतर, L1 गेटवे वेगवेगळ्या क्वेरीला L2 गेटवेला पुर्वानुमान देईल (काही कालावधीसाठी), परंतु त्यामुळे द्रुतिकरण वाढतो, म्हणजे तुमच्या क्वेरीस सर्व किंवा नवीन URL वर स्विच करणे शक्य आहे. +Queries to the L2 Subgraph will need to be done to a different URL (on `arbitrum-gateway.thegraph.com`), but the L1 URL will continue working for at least 48 hours. After that, the L1 gateway will forward queries to the L2 gateway (for some time), but this will add latency so it is recommended to switch all your queries to the new URL as soon as possible. ## तुमचे L2 वॉलेट निवडत आहे -तुम्ही तुमच्या सबग्राफची मेननेटवर प्रकाशित केल्यास, तुम्ही सबग्राफ तयार करण्यासाठी एक संयुक्त केलेल्या वॉलेटचा वापर केला होता, आणि हा वॉलेट हा सबग्राफ प्रतिनिधित्व करणारा NFT मिळवतो, आणि तुम्हाला अपडेट प्रकाशित करण्याची परवानगी देतो. +When you published your Subgraph on mainnet, you used a connected wallet to create the Subgraph, and this wallet owns the NFT that represents this Subgraph and allows you to publish updates. -सबग्राफ आर्बिट्रममध्ये हस्तांतरित करताना, तुम्ही वेगळे वॉलेट निवडू शकता जे L2 वर या सबग्राफ NFT चे मालक असेल. +When transferring the Subgraph to Arbitrum, you can choose a different wallet that will own this Subgraph NFT on L2. आपल्याला "सामान्य" वॉलेट वापरत आहे किंवा MetaMask (एक बाह्यिकपणे मालकीत खाता किंवा EOA, अर्थात स्मार्ट कॉन्ट्रॅक्ट नसलेला वॉलेट), तर ह्या निवडनीय आहे आणि L1 मध्ये असलेल्या समान मालकीचे पत्ते ठेवणे शिफारसले जाते. -If you're using a smart contract wallet, like a multisig (e.g. a Safe), then choosing a different L2 wallet address is mandatory, as it is most likely that this account only exists on mainnet and you will not be able to make transactions on Arbitrum using this wallet. If you want to keep using a smart contract wallet or multisig, create a new wallet on Arbitrum and use its address as the L2 owner of your subgraph. +If you're using a smart contract wallet, like a multisig (e.g. a Safe), then choosing a different L2 wallet address is mandatory, as it is most likely that this account only exists on mainnet and you will not be able to make transactions on Arbitrum using this wallet. If you want to keep using a smart contract wallet or multisig, create a new wallet on Arbitrum and use its address as the L2 owner of your Subgraph. -**तुम्हाला एक वॉलेट पत्ता वापरण्याची महत्त्वाची आहे ज्याच्या तुम्ही नियंत्रण असता आणि त्याने Arbitrum वर व्यवहार करू शकतो. अन्यथा, सबग्राफ गमावला जाईल आणि त्याची पुनर्प्राप्ती केली जाऊ शकणार नाही.** +**It is very important to use a wallet address that you control, and that can make transactions on Arbitrum. Otherwise, the Subgraph will be lost and cannot be recovered.** ## हस्तांतरणाची तयारी: काही ETH ब्रिजिंग -सबग्राफला हस्तांतरित करण्यासाठी एक ट्रॅन्झॅक्शन सेंड करण्यात आल्यामुळे ब्रिजद्वारे एक ट्रॅन्झॅक्शन आणि नंतर आर्बिट्रमवर दुसर्या ट्रॅन्झॅक्शन चालवावा लागतो. पहिल्या ट्रॅन्झॅक्शनमध्ये मुख्यनेटवर ETH वापरले जाते, आणि L2 वर संदेश प्राप्त होण्यात आल्यावर गॅस देण्यासाठी काही ETH समाविष्ट केले जाते. हेच गॅस कमी असल्यास, तर तुम्ही ट्रॅन्झॅक्शन पुन्हा प्रयत्न करून लॅटन्सीसाठी त्याच्यावर थेट पैसे द्यायला हवे, त्याच्यामुळे हे "चरण 3: हस्तांतरणाची पुष्टी करणे" असते (खालीलपैकी). ह्या कदाचित्का **तुम्ही हस्तांतरण सुरू केल्याच्या 7 दिवसांच्या आत** हे प्रक्रिया पुर्ण करणे आवश्यक आहे. इतरत्र, दुसऱ्या ट्रॅन्झॅक्शन ("चरण 4: L2 वर हस्तांतरण समाप्त करणे") ही आपल्याला खासगी आर्बिट्रमवर आणण्यात आली आहे. ह्या कारणांसाठी, तुम्हाला किमानपर्यंत काही ETH आवश्यक आहे, एक मल्टीसिग किंवा स्मार्ट कॉन्ट्रॅक्ट खात्याच्या आवश्यक आहे, ETH रोजच्या (EOA) वॉलेटमध्ये असणे आवश्यक आहे, ज्याचा तुम्ही ट्रॅन्झॅक्शन चालवण्यासाठी वापरता, मल्टीसिग वॉलेट स्वत: नसतो. +Transferring the Subgraph involves sending a transaction through the bridge, and then executing another transaction on Arbitrum. The first transaction uses ETH on mainnet, and includes some ETH to pay for gas when the message is received on L2. However, if this gas is insufficient, you will have to retry the transaction and pay for the gas directly on L2 (this is "Step 3: Confirming the transfer" below). This step **must be executed within 7 days of starting the transfer**. Moreover, the second transaction ("Step 4: Finishing the transfer on L2") will be done directly on Arbitrum. For these reasons, you will need some ETH on an Arbitrum wallet. If you're using a multisig or smart contract account, the ETH will need to be in the regular (EOA) wallet that you are using to execute the transactions, not on the multisig wallet itself. तुम्ही किमानतरी एक्सचेंजेसवर ETH खरेदी करू शकता आणि त्याच्यामध्ये सीधे Arbitrum वर विद्यमान ठेवू शकता, किंवा तुम्ही Arbitrum ब्रिजवापरून ETH मुख्यनेटवरील एक वॉलेटपासून L2 वर पाठवू शकता: bridge.arbitrum.io. आर्बिट्रमवर गॅस फीस खूप कमी आहेत, म्हणजे तुम्हाला फक्त थोडेसे फक्त आवश्यक आहे. तुमच्या ट्रॅन्झॅक्शनसाठी मंजूरी मिळविण्यासाठी तुम्हाला किमान अंतरावर (उदा. 0.01 ETH) सुरुवात करणे शिफारसले जाते. -## सबग्राफ ट्रान्सफर टूल शोधत आहे +## Finding the Subgraph Transfer Tool -तुम्ही सबग्राफ स्टुडिओवर तुमच्या सबग्राफचे पेज पाहता तेव्हा तुम्हाला L2 ट्रान्सफर टूल सापडेल: +You can find the L2 Transfer Tool when you're looking at your Subgraph's page on Subgraph Studio: ![transfer tool](/img/L2-transfer-tool1.png) -हे तयार आहे Explorer वर, आपल्याला जर तुमच्याकडून एक सबग्राफच्या मालकीची वॉलेट असेल आणि Explorer सह कनेक्ट केले तर, आणि त्या सबग्राफच्या पृष्ठावर Explorer वरून मिळवू शकता: +It is also available on Explorer if you're connected with the wallet that owns a Subgraph and on that Subgraph's page on Explorer: ![Transferring to L2](/img/transferToL2.png) @@ -60,19 +60,19 @@ If you're using a smart contract wallet, like a multisig (e.g. a Safe), then cho ## पायरी 1: हस्तांतरण सुरू करत आहे -हस्तांतरण सुरू करण्यापूर्वी, तुम्ही L2 वर सबग्राफच्या मालकपत्रक्षयक्षमतेचे निर्णय करावे लागेल (वरील "तुमच्या L2 वॉलेटची निवड" पहा), आणि आपल्याला आर्बिट्रमवर पुर्न ठेवण्यासाठी आधीपासून काही ETH असणे अत्यंत शिफारसले जाते (वरील "हस्तांतरण साठी प्राप्ती करणे: काही ETH हस्तांतरित करणे" पहा). +Before starting the transfer, you must decide which address will own the Subgraph on L2 (see "Choosing your L2 wallet" above), and it is strongly recommend having some ETH for gas already bridged on Arbitrum (see "Preparing for the transfer: bridging some ETH" above). -कृपया लक्षात घ्या की सबग्राफ हस्तांतरित करण्यासाठी सबग्राफवर आपल्याला त्याच्या मालकपत्रक्षयक्षमतेसह अगदीच सिग्नल असावे; जर तुम्हाला सबग्राफवर सिग्नल केलेलं नसलं तर तुम्हाला थोडीसी क्युरेशन वाढवावी (एक थोडीसी असांतर किंवा 1 GRT आढवंच काही आहे). +Also please note transferring the Subgraph requires having a nonzero amount of signal on the Subgraph with the same account that owns the Subgraph; if you haven't signaled on the Subgraph you will have to add a bit of curation (adding a small amount like 1 GRT would suffice). -हस्तांतरण साधन उघडण्यात आल्यावर, तुम्ही "प्राप्ति वॉलेट पत्ता" क्षेत्रात L2 वॉलेट पत्ता भरू शकता - **तुम्ही येथे योग्य पत्ता नोंदवला आहे हे खात्री करा**. सबग्राफ हस्तांतरित करण्याच्या वर्तमानीत तुम्ही आपल्या वॉलेटवर ट्रॅन्झॅक्शन सुरू करण्याच्या आवश्यकता आहे (लक्षात घ्या की L2 गॅससाठी काही ETH मूळ आहे); हे हस्तांतरणाच्या प्रक्रियेचे सुरूवात करेल आणि आपल्या L1 सबग्राफला कमी करेल (अद्यतनसाठी "सिग्न. +After opening the Transfer Tool, you will be able to input the L2 wallet address into the "Receiving wallet address" field - **make sure you've entered the correct address here**. Clicking on Transfer Subgraph will prompt you to execute the transaction on your wallet (note some ETH value is included to pay for L2 gas); this will initiate the transfer and deprecate your L1 Subgraph (see "Understanding what happens with signal, your L1 Subgraph and query URLs" above for more details on what goes on behind the scenes). -जर तुम्ही हे कदम पूर्ण करता आहात, नुकसान होऊ नये हे सुनिश्चित करा की 7 दिवसांपेक्षा कमी वेळेत पुन्हा आपल्या क्रियान्वयनाचा तपास करा, किंवा सबग्राफ आणि तुमच्या सिग्नल GRT नष्ट होईल. हे त्याच्या कारणे आहे की आर्बिट्रमवर L1-L2 संदेशाचा कसा काम करतो: ब्रिजद्वारे पाठवलेले संदेश "पुन्हा प्रयत्नीय पर्यायपत्रे" आहेत ज्याचा क्रियान्वयन 7 दिवसांच्या आत अंदाजपत्री केला पाहिजे, आणि सुरुवातीचा क्रियान्वयन, आर्बिट्रमवर गॅस दरात वाढ असल्यास, पुन्हा प्रयत्न करण्याची आवश्यकता असेल. +If you execute this step, **make sure you proceed until completing step 3 in less than 7 days, or the Subgraph and your signal GRT will be lost.** This is due to how L1-L2 messaging works on Arbitrum: messages that are sent through the bridge are "retry-able tickets" that must be executed within 7 days, and the initial execution might need a retry if there are spikes in the gas price on Arbitrum. ![Start the transfer to L2](/img/startTransferL2.png) -## पायरी 2: सबग्राफ L2 वर येण्याची वाट पाहत आहे +## Step 2: Waiting for the Subgraph to get to L2 -तुम्ही हस्तांतरण सुरू केल्यानंतर, तुमच्या L1 सबग्राफला L2 वर हस्तांतरित करण्याचे संदेश Arbitrum ब्रिजद्वारे प्रसारित होणे आवश्यक आहे. हे किंवा. 20 मिनिटे लागतात (ब्रिज त्या व्यक्तिमत्वीकृत आहे की L1 मेननेट ब्लॉक जो लेनदार चेन reorgs साठी "सुरक्षित" आहे, त्यातील संदेश किंवा लेनदार चेन reorgs साठी "सुरक्षित" आहे, त्यातील संदेश होऊन जातो). +After you start the transfer, the message that sends your L1 Subgraph to L2 must propagate through the Arbitrum bridge. This takes approximately 20 minutes (the bridge waits for the mainnet block containing the transaction to be "safe" from potential chain reorgs). ही प्रतीक्षा वेळ संपल्यानंतर, आर्बिट्रम L2 करारांवर हस्तांतरण स्वयं-अंमलबजावणी करण्याचा प्रयत्न करेल. @@ -80,7 +80,7 @@ If you're using a smart contract wallet, like a multisig (e.g. a Safe), then cho ## पायरी 3: हस्तांतरणाची पुष्टी करणे -अधिकांश प्रकरणात, आपल्याला प्राथमिकपणे संघटित ल2 गॅस असेल, ज्यामुळे सबग्राफला आर्बिट्रम कॉन्ट्रॅक्टवर प्राप्त करण्याच्या ट्रॅन्झॅक्शनची स्वत: क्रियारत झाली पाहिजे. कितीतरी प्रकरणात, आर्बिट्रमवर गॅस दरात वाढ असल्यामुळे ह्या स्वत: क्रियान्वितीत अयशस्वीता आपल्याला काहीतरी किंवा काहीतरी संभावना आहे. ह्या प्रकारे, आपल्या सबग्राफला L2 वर पाठवण्याच्या "पर्यायपत्रास" क्रियारत बसण्यासाठी अपूर्ण ठरेल आणि 7 दिवसांच्या आत पुन्हा प्रयत्न करण्याची आवश्यकता आहे. +In most cases, this step will auto-execute as the L2 gas included in step 1 should be sufficient to execute the transaction that receives the Subgraph on the Arbitrum contracts. In some cases, however, it is possible that a spike in gas prices on Arbitrum causes this auto-execution to fail. In this case, the "ticket" that sends your Subgraph to L2 will be pending and require a retry within 7 days. असे असल्यास, तुम्हाला आर्बिट्रमवर काही ETH असलेले L2 वॉलेट वापरून कनेक्ट करावे लागेल, तुमचे वॉलेट नेटवर्क आर्बिट्रमवर स्विच करा आणि व्यवहाराचा पुन्हा प्रयत्न करण्यासाठी "हस्तांतरण पुष्टी करा" वर क्लिक करा. @@ -88,33 +88,33 @@ If you're using a smart contract wallet, like a multisig (e.g. a Safe), then cho ## पायरी 4: L2 वर हस्तांतरण पूर्ण करणे -आता, आपला सबग्राफ आणि GRT आर्बिट्रमवर प्राप्त झालेले आहेत, परंतु सबग्राफ अद्याप प्रकाशित झालेला नाही. आपल्याला प्राप्ति वॉलेटसाठी निवडलेल्या L2 वॉलेटशी कनेक्ट करण्याची आवश्यकता आहे, आपला वॉलेट नेटवर्क आर्बिट्रमवर स्विच करण्याची आणि "पब्लिश सबग्राफ" वर क्लिक करण्याची आवश्यकता आहे +At this point, your Subgraph and GRT have been received on Arbitrum, but the Subgraph is not published yet. You will need to connect using the L2 wallet that you chose as the receiving wallet, switch your wallet network to Arbitrum, and click "Publish Subgraph." -![Publish the subgraph](/img/publishSubgraphL2TransferTools.png) +![Publish the Subgraph](/img/publishSubgraphL2TransferTools.png) -![Wait for the subgraph to be published](/img/waitForSubgraphToPublishL2TransferTools.png) +![Wait for the Subgraph to be published](/img/waitForSubgraphToPublishL2TransferTools.png) -हे सबग्राफ प्रकाशित करेल आहे, त्यामुळे त्याचे सेवन करणारे इंडेक्सर्स आर्बिट्रमवर संचालित आहेत, आणि त्यामुळे ला ट्रान्सफर केलेल्या GRT वापरून संवाद सिग्नल क्युरेशन निर्माणित केले जाईल. +This will publish the Subgraph so that Indexers that are operating on Arbitrum can start serving it. It will also mint curation signal using the GRT that were transferred from L1. ## पायरी 5: क्वेरी URL अपडेट करत आहे -तुमचा सबग्राफ आर्बिट्रममध्ये यशस्वीरित्या हस्तांतरित केला गेला आहे! सबग्राफची क्वेरी करण्यासाठी, नवीन URL असेल: +Your Subgraph has been successfully transferred to Arbitrum! To query the Subgraph, the new URL will be : `https://arbitrum-gateway.thegraph.com/api/[api-key]/subgraphs/id/[l2-subgraph-id]` -लक्षात घ्या की आर्बिट्रमवर सबग्राफचे ID मुख्यनेटवर आपल्याला आहे आणि त्याच्या परिपर्यंत आपल्याला आर्बिट्रमवर आहे, परंतु आपल्याला वेगवेगळा सबग्राफ ID असेल, परंतु तुम्ही सदैव तो Explorer किंवा Studio वर शोधू शकता. उपरोक्त (वरील "सिग्नलसह, आपल्या L1 सबग्राफसह आणि क्वेरी URLसह काय करता येईल" पहा) म्हणजे पुराणे L1 URL थोडेसे वेळाने समर्थित राहील, परंतु आपल्याला सबग्राफ L2 वर सिंक केल्यानंतर आपल्या क्वेरीजला त्वरित नवीन पत्ता देणे शिफारसले जाते. +Note that the Subgraph ID on Arbitrum will be a different than the one you had on mainnet, but you can always find it on Explorer or Studio. As mentioned above (see "Understanding what happens with signal, your L1 Subgraph and query URLs") the old L1 URL will be supported for a short while, but you should switch your queries to the new address as soon as the Subgraph has been synced on L2. ## तुमचे क्युरेशन आर्बिट्रम (L2) वर कसे हस्तांतरित करावे -## L2 मध्ये सबग्राफ ट्रान्सफरवरील क्युरेशनचे काय होते हे समजून घेणे +## Understanding what happens to curation on Subgraph transfers to L2 -सबग्राफच्या मालकाने सबग्राफला आर्बिट्रमवर हस्तांतरित केल्यास, सर्व सबग्राफच्या सिग्नलला एकाच वेळी GRT मध्ये रूपांतरित केला जातो. ही "ऑटो-माइग्रेटेड" सिग्नलसाठी लागू होते, अर्थात सबग्राफाच्या कोणत्याही संस्करण किंवा डिप्लॉयमेंटसाठी नसलेली सिग्नल किंवा नवीन संस्करणाच्या आधीच्या सबग्राफच्या आवृत्तीस पुरावीत केली जाते. +When the owner of a Subgraph transfers a Subgraph to Arbitrum, all of the Subgraph's signal is converted to GRT at the same time. This applies to "auto-migrated" signal, i.e. signal that is not specific to a Subgraph version or deployment but that follows the latest version of a Subgraph. -सिग्नलपासून GRTमध्ये असे रूपांतरण होण्याचे त्याचे आपल्याला उदाहरण दिले आहे ज्याच्यासाठी जर सबग्राफमालक सबग्राफला L1मध्ये पुरावा दिला तर. सबग्राफ विकल्प किंवा हस्तांतरित केला जाता तेव्हा सर्व सिग्नलला समयानुसार "दहन" केला जातो (क्युरेशन बोंडिंग कर्वच्या वापराने) आणि निकाललेल्या GRTने GNS स्मार्ट कॉन्ट्रॅक्टने (जो सबग्राफ अपग्रेड्स आणि ऑटो-माइग्रेटेड सिग्नलच्या व्यवस्थापनासाठी जबाबदार आहे) साठवलेले आहे. प्रत्येक क्युरेटरने त्या सबग्राफसाठी कितीशेअर्स आहेत त्या प्रमाणे त्याच्याकडे गणना असते, आणि त्यामुळे त्याच्या शेअर्सचा GRTचा दावा असतो. +This conversion from signal to GRT is the same as what would happen if the Subgraph owner deprecated the Subgraph in L1. When the Subgraph is deprecated or transferred, all curation signal is "burned" simultaneously (using the curation bonding curve) and the resulting GRT is held by the GNS smart contract (that is the contract that handles Subgraph upgrades and auto-migrated signal). Each Curator on that Subgraph therefore has a claim to that GRT proportional to the amount of shares they had for the Subgraph. -सबग्राफ मालकाशी संबंधित या GRT चा एक अंश सबग्राफसह L2 ला पाठविला जातो. +A fraction of these GRT corresponding to the Subgraph owner is sent to L2 together with the Subgraph. -आत्ताच, संशोधित GRTमध्ये कोणतीही अधिक क्वेरी फीस घटना आहे नसून, क्युरेटर्सला आपली GRT वापरण्याची किंवा त्याची L2वर त्याच्या आपल्या वर्णनासाठी हस्तांतरित करण्याची पर्वानगी आहे, ज्याच्या माध्यमातून नवीन क्युरेशन सिग्नल तयार केला जाऊ शकतो. हे करण्यासाठी त्वरित किंवा अनिश्चित काळासाठी कोणतीही जरूरत नाही कारण GRT अनश्वास पाहिजे आणि प्रत्येकाला त्याच्या शेअर्सच्या प्रमाणानुसार एक निश्चित वस्तु मिळणार आहे, कोणत्या वेळीही. +At this point, the curated GRT will not accrue any more query fees, so Curators can choose to withdraw their GRT or transfer it to the same Subgraph on L2, where it can be used to mint new curation signal. There is no rush to do this as the GRT can be help indefinitely and everybody gets an amount proportional to their shares, irrespective of when they do it. ## तुमचे L2 वॉलेट निवडत आहे @@ -130,9 +130,9 @@ If you're using a smart contract wallet, like a multisig (e.g. a Safe), then cho हस्तांतरण सुरू करण्यापूर्वी, तुम्ही त्याच्या L2 वर क्युरेशनचा मालक होणारा पत्ता निवडणे आवश्यक आहे (वरील "तुमच्या L2 वॉलेटची निवड" पाहा), आणि आर्बिट्रमवर संदेशाच्या क्रियान्वयनाचा पुन्हा प्रयत्न केल्यास लागणारे गॅससाठी काही ETH आधीच्या पुलाकीत सांडलेले असले पर्याय सुरुवातीच्या वेळी किंवा पुन्हा प्रयत्नीय पर्यायसाठी. आपल्याला काही एक्सचेंजवरून ETH खरेदी करून त्याची तुमच्या आर्बिट्रमवर स्थानांतरित करून सुरू आहे, किंवा आपल्याला मुख्यनेटवरून L2 वर ETH पाठवण्याच्या आर्बिट्रम ब्रिजचा वापर करून किंवा ETH खरेदी करून L2 वर पाठवण्याच्या कामाकरीत करण्याची शक्यता आहे: [bridge.arbitrum.io](http://bridge.arbitrum.io)- आर्बिट्रमवर गॅस दरात तोंड असल्यामुळे, तुम्हाला केवळ किंवा 0.01 ETH ची किंमत दरम्यानची आवश्यकता असेल. -आपल्याला संवादित केलेल्या सबग्राफ्टला L2 वर हस्तांतरित केले आहे तर, आपल्याला एक संदेश दिलेला जाईल ज्याच्या माध्यमातून Explorer वरून आपल्याला सांगण्यात येईल की आपण हस्तांतरित सबग्राफ्टच्या संवादनी आहात. +If a Subgraph that you curate to has been transferred to L2, you will see a message on Explorer telling you that you're curating to a transferred Subgraph. -सबग्राफ्ट पेज पाहताना, आपण संवादनाची पुनर्प्राप्ती किंवा हस्तांतरित करण्याचा निवड करू शकता. "Transfer Signal to Arbitrum" वर क्लिक केल्यास, हस्तांतरण साधने उघडतील. +When looking at the Subgraph page, you can choose to withdraw or transfer the curation. Clicking on "Transfer Signal to Arbitrum" will open the transfer tool. ![Transfer signal](/img/transferSignalL2TransferTools.png) @@ -162,4 +162,4 @@ If you're using a smart contract wallet, like a multisig (e.g. a Safe), then cho ## L1 वर तुमचे क्युरेशन मागे घेत आहे -जर आपल्याला आपल्या GRT ला L2 वर पाठवायचं आवडत नसलं तर किंवा आपल्याला GRT ला मॅन्युअली ब्रिज करण्याची प्राथमिकता आहे, तर आपल्याला L1 वरील आपल्या क्युरेटेड GRT ला काढून घ्यायला दिले आहे. सबग्राफच्या पृष्ठाच्या बॅनरवरून "Withdraw Signal" निवडा आणि व्यवस्थापन प्रक्रियेची पुष्टी करा; GRT आपल्या क्युरेटर पत्त्याला पाठविला जाईल. +If you prefer not to send your GRT to L2, or you'd rather bridge the GRT manually, you can withdraw your curated GRT on L1. On the banner on the Subgraph page, choose "Withdraw Signal" and confirm the transaction; the GRT will be sent to your Curator address. From 0a9ab5ca5884fb999d10f2c349bc4c59e7de9b70 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:14:04 -0500 Subject: [PATCH 0336/1789] New translations l2-transfer-tools-guide.mdx (Hindi) --- .../arbitrum/l2-transfer-tools-guide.mdx | 76 +++++++++---------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/website/src/pages/hi/archived/arbitrum/l2-transfer-tools-guide.mdx b/website/src/pages/hi/archived/arbitrum/l2-transfer-tools-guide.mdx index 22cea8b3617f..1f9088c5439b 100644 --- a/website/src/pages/hi/archived/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/src/pages/hi/archived/arbitrum/l2-transfer-tools-guide.mdx @@ -6,53 +6,53 @@ The Graph has made it easy to move to L2 on Arbitrum One. For each protocol part इन टूल्स के बारे में कुछ सामान्य प्रश्नों के उत्तर [L2 Transfer Tools FAQ](/archived/arbitrum/l2-transfer-tools-faq/) में दिए गए हैं। FAQs में इन टूल्स का उपयोग कैसे करें, वे कैसे काम करते हैं, और उनका उपयोग करते समय ध्यान में रखने वाली बातें विस्तृत रूप से समझाई गई हैं। -## अपने सबग्राफ को आर्बिट्रम (L2) में कैसे स्थानांतरित करें +## How to transfer your Subgraph to Arbitrum (L2) -## अपने सबग्राफ़ स्थानांतरित करने के लाभ +## Benefits of transferring your Subgraphs ग्राफ़ का समुदाय और मुख्य डेवलपर पिछले वर्ष से आर्बिट्रम में जाने की तैयारी कर रहे हैं (https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305)। आर्बिट्रम, एक परत 2 या "एल2" ब्लॉकचेन, एथेरियम से सुरक्षा प्राप्त करता है लेकिन काफी कम गैस शुल्क प्रदान करता है। -जब आप अपने सबग्राफ को दी ग्राफ नेटवर्क पर प्रकाशित या अपग्रेड करते हैं, तो आप प्रोटोकॉल पर स्मार्ट कॉन्ट्रैक्ट्स के साथ इंटरैक्ट कर रहे होते हैं और इसके लिए ईथरियम (ETH) का उपयोग करके गैस के लिए भुगतान करना आवश्यक होता है। अपने सबग्राफ को Arbitrum पर स्थानांतरित करके, आपके सबग्राफ के किसी भी भविष्य के अपडेट के लिए गैस शुल्क बहुत कम होगा। कम शुल्कों के साथ, और L2 पर क्यूरेशन बॉन्डिंग कर्व्स फ्लैट होने के कारण, अन्य क्यूरेटर्स को भी आपके सबग्राफ पर क्यूरेट करने में आसानी होगी, जिससे आपके सबग्राफ पर इंडेक्सर्स के लिए पुरस्कार बढ़ेंगे। इस कम लागत वाले वातावरण से इंडेक्सर्स को आपके सबग्राफ को इंडेक्स करने और सेव करने में सस्तापन होगा। आगामी महीनों में Arbitrum पर इंडेक्सिंग पुरस्कार बढ़ जाएगा और ईथिरियम मेननेट पर कम हो जाएगा, इसलिए और भी अधिक इंडेक्सर्स अपने स्टेक को स्थानांतरित करेंगे और उनके संचालन को L2 पर सेटअप करेंगे। +When you publish or upgrade your Subgraph to The Graph Network, you're interacting with smart contracts on the protocol and this requires paying for gas using ETH. By moving your Subgraphs to Arbitrum, any future updates to your Subgraph will require much lower gas fees. The lower fees, and the fact that curation bonding curves on L2 are flat, also make it easier for other Curators to curate on your Subgraph, increasing the rewards for Indexers on your Subgraph. This lower-cost environment also makes it cheaper for Indexers to index and serve your Subgraph. Indexing rewards will be increasing on Arbitrum and decreasing on Ethereum mainnet over the coming months, so more and more Indexers will be transferring their stake and setting up their operations on L2. -## सिग्नल, आपके L1 सबग्राफ और क्वेरी URL के साथ जो होता है, उसे समझने की प्रक्रिया: +## Understanding what happens with signal, your L1 Subgraph and query URLs -Transferring a subgraph to Arbitrum uses the Arbitrum GRT bridge, which in turn uses the native Arbitrum bridge, to send the subgraph to L2. The "transfer" will deprecate the subgraph on mainnet and send the information to re-create the subgraph on L2 using the bridge. It will also include the subgraph owner's signaled GRT, which must be more than zero for the bridge to accept the transfer. +Transferring a Subgraph to Arbitrum uses the Arbitrum GRT bridge, which in turn uses the native Arbitrum bridge, to send the Subgraph to L2. The "transfer" will deprecate the Subgraph on mainnet and send the information to re-create the Subgraph on L2 using the bridge. It will also include the Subgraph owner's signaled GRT, which must be more than zero for the bridge to accept the transfer. -जब आप सबग्राफ को स्थानांतरित करने का विकल्प चुनते हैं, तो यह सबग्राफ के सभी क्यूरेशन सिग्नल को GRT में रूपांतरित कर देगा। इसका मतलब है कि मुख्यनेट पर सबग्राफ को "विलीन" किया जाएगा। आपके क्यूरेशन के अनुरूप GRT को सबग्राफ के साथ L2 पर भेजा जाएगा, जहां वे आपके प्रतिनिधित्व में सिग्नल निर्माण करने के लिए उपयोग होंगे। +When you choose to transfer the Subgraph, this will convert all of the Subgraph's curation signal to GRT. This is equivalent to "deprecating" the Subgraph on mainnet. The GRT corresponding to your curation will be sent to L2 together with the Subgraph, where they will be used to mint signal on your behalf. -अन्य क्यूरेटर्स का विकल्प होता है कि क्या वे अपने अंशिक GRT को विद्वेष्टित करें या उसे भी L2 पर स्थानांतरित करें ताकि वे उसी सबग्राफ पर सिग्नल निर्मित कर सकें। अगर कोई सबग्राफ का मालिक अपने सबग्राफ को L2 पर स्थानांतरित नहीं करता है और अधिकारिक रूप से उसे एक कॉन्ट्रैक्ट कॉल के माध्यम से विलीन करता है, तो क्यूरेटर्स को सूचित किया जाएगा और उन्हें उनके क्यूरेशन को वापस लेने का अधिकार होगा। +Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same Subgraph. If a Subgraph owner does not transfer their Subgraph to L2 and manually deprecates it via a contract call, then Curators will be notified and will be able to withdraw their curation. -Subgraph को स्थानांतरित करते ही, curation को GRT में रूपांतरित किये जाने के कारण Indexers को subgraph को index करने के लिए अब और rewards नहीं मिलेगा। हालांकि, ऐसे Indexers भी होंगे जो 1) स्थानांतरित subgraphs की सेवा 24 घंटे तक करते रहेंगे और 2) तुरंत L2 पर subgraph को indexing करने की प्रारंभ करेंगे। क्योंकि इन Indexers ने पहले से ही subgraph को indexed किया होता है, इसलिए subgraph को sync करने की प्रतीक्षा करने की आवश्यकता नहीं होगी, और L2 subgraph को तकनीकी रूप से तुरंत carry किया जा सकेगा। +As soon as the Subgraph is transferred, since all curation is converted to GRT, Indexers will no longer receive rewards for indexing the Subgraph. However, there will be Indexers that will 1) keep serving transferred Subgraphs for 24 hours, and 2) immediately start indexing the Subgraph on L2. Since these Indexers already have the Subgraph indexed, there should be no need to wait for the Subgraph to sync, and it will be possible to query the L2 Subgraph almost immediately. -L2 सबग्राफ के क्वेरी को एक विभिन्न URL पर ( 'arbitrum-gateway.thegraph.com' पर) किया जाना चाहिए, लेकिन L1 URL काम करना जारी रखेगा कम से कम 48 घंटे तक। उसके बाद, L1 गेटवे क्वेरी को L2 गेटवे के लिए आगे प्रेषित करेगा (कुछ समय के लिए), लेकिन इससे लैटेंसी बढ़ सकती है, इसलिए संभावना है कि आपको सभी क्वेरी को नए URL पर जल्द से जल्द स्विच कर लेने की सिफारिश की जाए। +Queries to the L2 Subgraph will need to be done to a different URL (on `arbitrum-gateway.thegraph.com`), but the L1 URL will continue working for at least 48 hours. After that, the L1 gateway will forward queries to the L2 gateway (for some time), but this will add latency so it is recommended to switch all your queries to the new URL as soon as possible. ## अपना L2 वॉलेट चुनना -जब आपने मुख्यनेट पर अपने सबग्राफ को प्रकाशित किया, तो आपने एक कनेक्टेड वॉलेट का उपयोग सबग्राफ बनाने के लिए किया और यह वॉलेट वह NFT स्वामित्व करता है जो इस सबग्राफ का प्रतिनिधित्व करता है और आपको अपडेट प्रकाशित करने की अनुमति देता है। +When you published your Subgraph on mainnet, you used a connected wallet to create the Subgraph, and this wallet owns the NFT that represents this Subgraph and allows you to publish updates. -सबग्राफ को Arbitrum पर स्थानांतरित करते समय, आप एक विभिन्न वॉलेट का चयन कर सकते हैं जो L2 पर इस सबग्राफ NFT का स्वामित्व करेगा। +When transferring the Subgraph to Arbitrum, you can choose a different wallet that will own this Subgraph NFT on L2. अगर आप "सामान्य" wallet जैसे MetaMask का उपयोग कर रहे हैं (जिसे बाह्यिक अधिकारित खाता या EOA कहा जाता है, यानी एक wallet जो smart contract नहीं है), तो यह वैकल्पिक है और सिफारिश की जाती है कि आप L1 में के समान मालिक पता बनाए रखें।बटुआ -अगर आप स्मार्ट कॉन्ट्रैक्ट वॉलेट का उपयोग कर रहे हैं, जैसे कि मल्टिसिग (उदाहरणस्वरूप, एक सेफ), तो एक विभिन्न L2 वॉलेट पता चुनना अनिवार्य है, क्योंकि यह बहुत संभावना है कि यह खाता केवल मुख्यनेट पर मौजूद है और आप इस वॉलेट का उपयोग अर्बिट्रम पर लेन-देन करने के लिए नहीं कर सकते हैं। अगर आप स्मार्ट कॉन्ट्रैक्ट वॉलेट या मल्टिसिग का उपयोग करना चाहते हैं, तो अर्बिट्रम पर एक नया वॉलेट बनाएं और उसका पता अपने सबग्राफ के L2 मालिक के रूप में उपयोग करें। +If you're using a smart contract wallet, like a multisig (e.g. a Safe), then choosing a different L2 wallet address is mandatory, as it is most likely that this account only exists on mainnet and you will not be able to make transactions on Arbitrum using this wallet. If you want to keep using a smart contract wallet or multisig, create a new wallet on Arbitrum and use its address as the L2 owner of your Subgraph. -**यह महत्वपूर्ण है कि आप एक वॉलेट पता का उपयोग करें जिस पर आपका नियंत्रण है, और जिससे आप अर्बिट्रम पर लेन-देन कर सकते हैं। अन्यथा, सबग्राफ हानि हो जाएगा और उसे पुनः प्राप्त नहीं किया जा सकता।** +**It is very important to use a wallet address that you control, and that can make transactions on Arbitrum. Otherwise, the Subgraph will be lost and cannot be recovered.** ## स्थानांतरण के लिए तैयारी: कुछ ETH को ब्रिज करना -सबग्राफ को स्थानांतरित करने में एक लेन-देन को ब्रिज के माध्यम से भेजना शामिल है, और फिर अर्बिट्रम पर एक और लेन-देन को प्रारंभ करना। पहली लेन-देन मुख्यनेट पर ETH का उपयोग करता है, और जब संदेश L2 पर प्राप्त होता है, तो गैस के भुगतान के लिए कुछ ETH को शामिल करता है। हालांकि, अगर यह गैस पर्याप्त नहीं होता है, तो आपको लेन-देन को पुनः प्रयास करना होगा और गैस के लिए सीधे L2 पर भुगतान करना होगा (यह "चरण 3: स्थानांतरण की पुष्टि करना" है, नीचे दिए गए हैं)। यह कदम **स्थानांतरण की प्रारंभिक करने के 7 दिनों के भीतर कार्यान्वित किया जाना चाहिए।** इसके अलावा, दूसरी लेन-देन ("चरण 4: L2 पर स्थानांतरण को समाप्त करना") को सीधे अर्बिट्रम पर किया जाएगा। इन कारणों से, आपको किसी एक Arbitrum वॉलेट पर कुछ ETH की आवश्यकता होगी। यदि आप मल्टिसिग या स्मार्ट कॉन्ट्रैक्ट खाता का उपयोग कर रहे हैं, तो ETH को उन्हीं सामान्य (EOA) वॉलेट में होना चाहिए जिसका आप लेन-देन कार्यान्वित करने के लिए उपयोग कर रहे हैं, मल्टिसिग वॉलेट में नहीं। +Transferring the Subgraph involves sending a transaction through the bridge, and then executing another transaction on Arbitrum. The first transaction uses ETH on mainnet, and includes some ETH to pay for gas when the message is received on L2. However, if this gas is insufficient, you will have to retry the transaction and pay for the gas directly on L2 (this is "Step 3: Confirming the transfer" below). This step **must be executed within 7 days of starting the transfer**. Moreover, the second transaction ("Step 4: Finishing the transfer on L2") will be done directly on Arbitrum. For these reasons, you will need some ETH on an Arbitrum wallet. If you're using a multisig or smart contract account, the ETH will need to be in the regular (EOA) wallet that you are using to execute the transactions, not on the multisig wallet itself. आप कुछ एक्सचेंजों पर ETH खरीद सकते हैं और उसे सीधे अर्बिट्रम में विद्वेष्टित कर सकते हैं, या आप अर्बिट्रम ब्रिज का उपयोग करके ETH को मुख्यनेट वॉलेट से L2 में भेज सकते हैं: [bridge.arbitrum.io](http://bridge.arbitrum.io)। क्योंकि अर्बिट्रम पर गैस शुल्क कम होते हैं, आपको केवल थोड़ी सी राशि की आवश्यकता होनी चाहिए। यह सिफारिश की जाती है कि आप अपने लेन-देन को स्वीकृति प्राप्त करने के लिए कम थ्रेशहोल्ड (उदाहरणस्वरूप 0.01 ETH) से प्रारंभ करें। -## सबग्राफ ट्रांसफर टूल ढूँढना +## Finding the Subgraph Transfer Tool -आप सबग्राफ स्टूडियो पर अपने सबग्राफ के पेज को देखते समय L2 ट्रांसफर टूल पा सकते हैं: +You can find the L2 Transfer Tool when you're looking at your Subgraph's page on Subgraph Studio: ![transfer tool](/img/L2-transfer-tool1.png) -यह भी उपलब्ध है एक्सप्लोरर पर अगर आप ऐसे वॉलेट से कनेक्ट हो जाते हैं जिसका सबग्राफ का स्वामित्व है, और उस सबग्राफ के पेज पर एक्सप्लोरर पर: +It is also available on Explorer if you're connected with the wallet that owns a Subgraph and on that Subgraph's page on Explorer: ![Transferring to L2](/img/transferToL2.png) @@ -60,19 +60,19 @@ L2 सबग्राफ के क्वेरी को एक विभिन ## चरण 1: स्थानांतरण की प्रारंभिक कदम -स्थानांतरण की प्रारंभिक करने से पहले, आपको तय करना होगा कि L2 पर सबग्राफ का स्वामित्व किस पते पर होगा (ऊपर "अपने L2 वॉलेट का चयन करना" देखें), और यह मजबूती से सिफारिश की जाती है कि अर्बिट्रम पर गैस के लिए कुछ ETH ब्रिज कर दिया गया हो (ऊपर "स्थानांतरण की तैयारी: कुछ ETH को ब्रिज करना" देखें)। +Before starting the transfer, you must decide which address will own the Subgraph on L2 (see "Choosing your L2 wallet" above), and it is strongly recommend having some ETH for gas already bridged on Arbitrum (see "Preparing for the transfer: bridging some ETH" above). -यह भी ध्यान दें कि सबग्राफ को स्थानांतरित करने के लिए सबग्राफ के साथ एक ही खाते में कोई भी सिग्नल की गई राशि होनी चाहिए; अगर आपने सबग्राफ पर सिग्नल नहीं किया है तो आपको थोड़ी सी क्यूरेशन जोड़नी होगी (एक छोटी राशि जैसे 1 GRT जोड़ना काफी होगा)। +Also please note transferring the Subgraph requires having a nonzero amount of signal on the Subgraph with the same account that owns the Subgraph; if you haven't signaled on the Subgraph you will have to add a bit of curation (adding a small amount like 1 GRT would suffice). -स्थानांतरण टूल खोलने के बाद, आपको "प्राप्ति वॉलेट पता" फ़ील्ड में L2 वॉलेट पता दर्ज करने की अनुमति मिलेगी - **सुनिश्चित करें कि आपने यहाँ सही पता डाला है।** "सबग्राफ स्थानांतरित करें" पर क्लिक करने से आपको अपने वॉलेट पर लेन-देन कार्यान्वित करने के लिए प्रोम्प्ट किया जाएगा (ध्यान दें कि L2 गैस के भुगतान के लिए कुछ ETH मान शामिल है)। इससे स्थानांतरण प्रारंभ होगा और आपका L1 सबग्राफ विलीन हो जाएगा (इसके पीछे के प्रक्रिया के बारे में अधिक जानकारी के लिए "सिग्नल, आपके L1 सबग्राफ और क्वेरी URL के साथ क्या होता है की समझ" देखें)। +After opening the Transfer Tool, you will be able to input the L2 wallet address into the "Receiving wallet address" field - **make sure you've entered the correct address here**. Clicking on Transfer Subgraph will prompt you to execute the transaction on your wallet (note some ETH value is included to pay for L2 gas); this will initiate the transfer and deprecate your L1 Subgraph (see "Understanding what happens with signal, your L1 Subgraph and query URLs" above for more details on what goes on behind the scenes). -इस कदम को कार्यान्वित करते समय, **सुनिश्चित करें कि आप 7 दिन से कम समय में चरण 3 को पूरा करने जाते हैं, अन्यथा सबग्राफ और आपका सिग्नल GRT हानि हो सकते हैं।** यह अर्बिट्रम पर L1-L2 संदेशिकरण कैसे काम करता है के कारण है: ब्रिज के माध्यम से भेजे गए संदेश "पुनः प्रयासनीय टिकट" होते हैं जिन्हें 7 दिन के भीतर कार्यान्वित किया जाना चाहिए, और पहले कार्यान्वयन में अगर अर्बिट्रम पर गैस की मूल्य में वृद्धि होती है तो पुनः प्रयास की आवश्यकता हो सकती है। +If you execute this step, **make sure you proceed until completing step 3 in less than 7 days, or the Subgraph and your signal GRT will be lost.** This is due to how L1-L2 messaging works on Arbitrum: messages that are sent through the bridge are "retry-able tickets" that must be executed within 7 days, and the initial execution might need a retry if there are spikes in the gas price on Arbitrum. ![Start the transfer to L2](/img/startTransferL2.png) -## चरण 2: सबग्राफ को L2 तक पहुँचने की प्रतीक्षा करना +## Step 2: Waiting for the Subgraph to get to L2 -जब आप स्थानांतरण की प्रारंभिक करते हैं, तो आपके L1 सबग्राफ को L2 भेजने वाले संदेश को अर्बिट्रम ब्रिज के माध्यम से प्रसारित होना चाहिए। यह लगभग 20 मिनट लगता है (ब्रिज मुख्यनेट ब्लॉक को "सुरक्षित" बनाने के लिए प्रत्येक लेनदेन के मुख्यनेट ब्लॉक के लिए प्रतीक्षा करता है, जिसमें संभावित चेन रीआर्ग से बचाया जा सकता है)। +After you start the transfer, the message that sends your L1 Subgraph to L2 must propagate through the Arbitrum bridge. This takes approximately 20 minutes (the bridge waits for the mainnet block containing the transaction to be "safe" from potential chain reorgs). इस प्रतीक्षा काल के बाद, अर्बिट्रम ल2 अनुबंधों पर स्थानांतरण को स्वतः कार्यान्वित करने का प्रयास करेगा। @@ -80,7 +80,7 @@ L2 सबग्राफ के क्वेरी को एक विभिन ## चरण 3: स्थानांतरण की पुष्टि करना -अधिकांश मामलों में, यह कदम स्वचालित रूप से क्रियान्वित हो जाएगा क्योंकि स्टेप 1 में शामिल एल2 गैस काफी होता है ताकि आर्बिट्रम कॉन्ट्रैक्ट पर सबग्राफ प्राप्त करने वाले लेनदेन को क्रियान्वित किया जा सके। हालांकि, कुछ मामलों में, यह संभावित है कि आर्बिट्रम पर गैस मूल्यों में एक उछाल के कारण यह स्वचालित क्रियान्वित होने में विफल हो सकता है। इस मामले में, जो "टिकट" आपके सबग्राफ को एल2 पर भेजता है, वह लंबित हो जाएगा और 7 दिनों के भीतर पुनः प्रयास की आवश्यकता होगी। +In most cases, this step will auto-execute as the L2 gas included in step 1 should be sufficient to execute the transaction that receives the Subgraph on the Arbitrum contracts. In some cases, however, it is possible that a spike in gas prices on Arbitrum causes this auto-execution to fail. In this case, the "ticket" that sends your Subgraph to L2 will be pending and require a retry within 7 days. यदि यह मामला आपके साथ होता है, तो आपको ऐसे L2 वॉलेट का उपयोग करके कनेक्ट करना होगा जिसमें आर्बिट्रम पर कुछ ETH हो, अपनी वॉलेट नेटवर्क को आर्बिट्रम पर स्विच करना होगा, और "पुनः प्रायोग की पुष्टि करें" पर क्लिक करके लेन-देन को पुनः प्रयास करने के लिए। @@ -88,33 +88,33 @@ L2 सबग्राफ के क्वेरी को एक विभिन ## चरण 4: L2 पर स्थानांतरण समाप्त करना -इस बिंदु पर, आपका सबग्राफ और GRT आर्बिट्रम पर प्राप्त हो चुके हैं, लेकिन सबग्राफ अबतक प्रकाशित नहीं हुआ है। आपको वह एल2 वॉलेट का उपयोग करके कनेक्ट करना होगा जिसे आपने प्राप्ति वॉलेट के रूप में चुना है, अपने वॉलेट नेटवर्क को आर्बिट्रम पर स्विच करना होगा, और "पब्लिश सबग्राफ" पर क्लिक करना होगा। +At this point, your Subgraph and GRT have been received on Arbitrum, but the Subgraph is not published yet. You will need to connect using the L2 wallet that you chose as the receiving wallet, switch your wallet network to Arbitrum, and click "Publish Subgraph." -![Publish the subgraph](/img/publishSubgraphL2TransferTools.png) +![Publish the Subgraph](/img/publishSubgraphL2TransferTools.png) -![Wait for the subgraph to be published](/img/waitForSubgraphToPublishL2TransferTools.png) +![Wait for the Subgraph to be published](/img/waitForSubgraphToPublishL2TransferTools.png) -इससे सबग्राफ प्रकाशित हो जाएगा ताकि Arbitrum पर काम करने वाले इंडेक्सर उसकी सेवा करना शुरू कर सकें। यह भी उसी GRT का करेशन सिग्नल मिन्ट करेगा जो L1 से स्थानांतरित हुए थे। +This will publish the Subgraph so that Indexers that are operating on Arbitrum can start serving it. It will also mint curation signal using the GRT that were transferred from L1. ## Step 5: query Step 5 को Update करना -आपकी सबग्राफ सफलतापूर्वक Arbitrum में स्थानांतरित की गई है! सबग्राफ का प्रश्न करने के लिए, नया URL होगा: +Your Subgraph has been successfully transferred to Arbitrum! To query the Subgraph, the new URL will be : `https://arbitrum-gateway.thegraph.com/api/[api-key]/subgraphs/id/[l2-subgraph-id]` -ध्यान दें कि आर्बिट्रम पर सबग्राफ आईडी मुख्यनेट पर जितना भिन्न होगा, लेकिन आप हमेशा इसे एक्सप्लोरर या स्टूडियो पर ढूंढ सकते हैं। जैसा कि पहले उल्लिखित किया गया है ("सिग्नल, आपके L1 सबग्राफ और क्वेरी URL के साथ क्या होता है" देखें), पुराना L1 URL कुछ समय तक समर्थित किया जाएगा, लेकिन आपको सबग्राफ को L2 पर सिंक होने के बाद नए पते पर अपने क्वेरी को स्विच कर देना चाहिए। +Note that the Subgraph ID on Arbitrum will be a different than the one you had on mainnet, but you can always find it on Explorer or Studio. As mentioned above (see "Understanding what happens with signal, your L1 Subgraph and query URLs") the old L1 URL will be supported for a short while, but you should switch your queries to the new address as soon as the Subgraph has been synced on L2. ## अपने क्यूरेशन को आर्बिट्रम (L2) में कैसे स्थानांतरित करें -## यह समझना कि एल2 में सबग्राफ़ स्थानांतरण पर क्यूरेशन का क्या होता है +## Understanding what happens to curation on Subgraph transfers to L2 -जब कोई सबग्राफ के मालिक सबग्राफ को आर्बिट्रम पर ट्रांसफर करते हैं, तो सबग्राफ की सभी सिग्नल को एक साथ GRT में रूपांतरित किया जाता है। यह "ऑटो-माइग्रेटेड" सिग्नल के लिए भी लागू होता है, अर्थात्, सिग्नल जो सबग्राफ के किसी वर्शन या डिप्लॉयमेंट के लिए विशिष्ट नहीं है, लेकिन जो सबग्राफ के नवीनतम संस्करण का पालन करते हैं। +When the owner of a Subgraph transfers a Subgraph to Arbitrum, all of the Subgraph's signal is converted to GRT at the same time. This applies to "auto-migrated" signal, i.e. signal that is not specific to a Subgraph version or deployment but that follows the latest version of a Subgraph. -सिग्नल से GRT में इस परिवर्तन को वही होता है जो होता है अगर सबग्राफ के मालिक ने L1 में सबग्राफ को विच्छेद किया होता। जब सबग्राफ को विच्छेदित या स्थानांतरित किया जाता है, तो सभी क्यूरेशन सिग्नल को समयानुसार "जलाया" जाता है (क्यूरेशन बॉन्डिंग कर्व का उपयोग करके) और परिणित GRT को GNS स्मार्ट कॉन्ट्रैक्ट द्वारा रखा जाता है (जो सबग्राफ अपग्रेड और ऑटो-माइग्रेटेड सिग्नल को संभालता है)। इस प्रकार, उस सबग्राफ के प्रत्येक क्यूरेटर के पास उस GRT का दावा होता है जो उनके लिए उपग्रहानुशासित था। +This conversion from signal to GRT is the same as what would happen if the Subgraph owner deprecated the Subgraph in L1. When the Subgraph is deprecated or transferred, all curation signal is "burned" simultaneously (using the curation bonding curve) and the resulting GRT is held by the GNS smart contract (that is the contract that handles Subgraph upgrades and auto-migrated signal). Each Curator on that Subgraph therefore has a claim to that GRT proportional to the amount of shares they had for the Subgraph. -इन जीआरटी की एक भाग, जो सबग्राफ के मालिक के संवर्ग के साथ मेल खाते हैं, वह एल2 में भेजे जाते हैं। +A fraction of these GRT corresponding to the Subgraph owner is sent to L2 together with the Subgraph. -इस बिंदु पर, क्यूरेटेड GRT को अब और क्वेरी शुल्क नहीं बढ़ेंगे, इसलिए क्यूरेटर्स अपने GRT को वापस निकालने का चयन कर सकते हैं या उसे L2 पर उसी सबग्राफ में ट्रांसफर कर सकते हैं, जहां उसे नई क्यूरेशन सिग्नल बनाने के लिए उपयोग किया जा सकता है। इसे करने के लिए कोई जल्दी नहीं है क्योंकि GRT को अनिश्चितकाल तक रखा जा सकता है और हर कोई अपने हिस्से के अनुपात में एक निश्चित राशि प्राप्त करता है, चाहे वो जब भी करे। +At this point, the curated GRT will not accrue any more query fees, so Curators can choose to withdraw their GRT or transfer it to the same Subgraph on L2, where it can be used to mint new curation signal. There is no rush to do this as the GRT can be help indefinitely and everybody gets an amount proportional to their shares, irrespective of when they do it. ## अपना L2 वॉलेट चुनना @@ -130,9 +130,9 @@ L2 सबग्राफ के क्वेरी को एक विभिन ट्रांसफर शुरू करने से पहले, आपको निर्णय लेना होगा कि L2 पर क्यूरेशन किस पते का स्वामित्व करेगा (ऊपर "अपने L2 वॉलेट का चयन करना" देखें), और संदेश को L2 पर पुनः क्रियान्वित करने की आवश्यकता पड़ने पर आपके पास गैस के लिए पहले से ही कुछ ETH होने की सिफारिश की जाती है। आप कुछ एक्सचेंजों पर ETH खरीद सकते हैं और उसे सीधे Arbitrum पर निकाल सकते हैं, या आप मुख्यनेट वॉलेट से L2 में ETH भेजने के लिए आर्बिट्रम ब्रिज का उपयोग कर सकते हैं: [bridge.arbitrum.io](http://bridge.arbitrum.io) - क्योंकि आर्बिट्रम पर गैस शुल्क इतने कम होते हैं, तो आपको केवल थोड़ी सी राशि की आवश्यकता होगी, जैसे कि 0.01 ETH शायद पर्याप्त हो। -अगर वह सबग्राफ जिसे आप करेशन कर रहे हैं L2 पर स्थानांतरित किया गया है, तो आपको एक संदेश दिखाई देगा जो आपको एक स्थानांतरित सबग्राफ करेशन की जानकारी देगा। +If a Subgraph that you curate to has been transferred to L2, you will see a message on Explorer telling you that you're curating to a transferred Subgraph. -सबग्राफ पेज को देखते समय, आपको करेशन को वापस लेने या स्थानांतरित करने का चयन करने का विकल्प होता है। "Transfer Signal to Arbitrum" पर क्लिक करने से स्थानांतरण उपकरण खुल जाता है। +When looking at the Subgraph page, you can choose to withdraw or transfer the curation. Clicking on "Transfer Signal to Arbitrum" will open the transfer tool. ![Transfer signal](/img/transferSignalL2TransferTools.png) @@ -162,4 +162,4 @@ L2 सबग्राफ के क्वेरी को एक विभिन ## L1 पर अपना कार्यकाल वापस ले रहा हूँ -अगर आप चाहते हैं कि आप अपने GRT को L2 पर नहीं भेजें, या फिर आप पसंद करते हैं कि GRT को मैन्युअल रूप से ब्रिज करें, तो आप अपने क्यूरेटेड GRT को L1 पर निकाल सकते हैं। सबग्राफ पृष्ठ पर बैनर पर, "सिग्नल निकालें" चुनें और लेनदेन की पुष्टि करें; GRT आपके क्यूरेटर पते पर भेज दिया जाएगा। +If you prefer not to send your GRT to L2, or you'd rather bridge the GRT manually, you can withdraw your curated GRT on L1. On the banner on the Subgraph page, choose "Withdraw Signal" and confirm the transaction; the GRT will be sent to your Curator address. From 2330cf6be049021a68459336dfc8477441fa8a2c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:14:05 -0500 Subject: [PATCH 0337/1789] New translations l2-transfer-tools-guide.mdx (Swahili) --- .../arbitrum/l2-transfer-tools-guide.mdx | 165 ++++++++++++++++++ 1 file changed, 165 insertions(+) create mode 100644 website/src/pages/sw/archived/arbitrum/l2-transfer-tools-guide.mdx diff --git a/website/src/pages/sw/archived/arbitrum/l2-transfer-tools-guide.mdx b/website/src/pages/sw/archived/arbitrum/l2-transfer-tools-guide.mdx new file mode 100644 index 000000000000..4a34da9bad0e --- /dev/null +++ b/website/src/pages/sw/archived/arbitrum/l2-transfer-tools-guide.mdx @@ -0,0 +1,165 @@ +--- +title: L2 Transfer Tools Guide +--- + +The Graph has made it easy to move to L2 on Arbitrum One. For each protocol participant, there are a set of L2 Transfer Tools to make transferring to L2 seamless for all network participants. These tools will require you to follow a specific set of steps depending on what you are transferring. + +Some frequent questions about these tools are answered in the [L2 Transfer Tools FAQ](/archived/arbitrum/l2-transfer-tools-faq/). The FAQs contain in-depth explanations of how to use the tools, how they work, and things to keep in mind when using them. + +## How to transfer your Subgraph to Arbitrum (L2) + + + +## Benefits of transferring your Subgraphs + +The Graph's community and core devs have [been preparing](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) to move to Arbitrum over the past year. Arbitrum, a layer 2 or "L2" blockchain, inherits the security from Ethereum but provides drastically lower gas fees. + +When you publish or upgrade your Subgraph to The Graph Network, you're interacting with smart contracts on the protocol and this requires paying for gas using ETH. By moving your Subgraphs to Arbitrum, any future updates to your Subgraph will require much lower gas fees. The lower fees, and the fact that curation bonding curves on L2 are flat, also make it easier for other Curators to curate on your Subgraph, increasing the rewards for Indexers on your Subgraph. This lower-cost environment also makes it cheaper for Indexers to index and serve your Subgraph. Indexing rewards will be increasing on Arbitrum and decreasing on Ethereum mainnet over the coming months, so more and more Indexers will be transferring their stake and setting up their operations on L2. + +## Understanding what happens with signal, your L1 Subgraph and query URLs + +Transferring a Subgraph to Arbitrum uses the Arbitrum GRT bridge, which in turn uses the native Arbitrum bridge, to send the Subgraph to L2. The "transfer" will deprecate the Subgraph on mainnet and send the information to re-create the Subgraph on L2 using the bridge. It will also include the Subgraph owner's signaled GRT, which must be more than zero for the bridge to accept the transfer. + +When you choose to transfer the Subgraph, this will convert all of the Subgraph's curation signal to GRT. This is equivalent to "deprecating" the Subgraph on mainnet. The GRT corresponding to your curation will be sent to L2 together with the Subgraph, where they will be used to mint signal on your behalf. + +Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same Subgraph. If a Subgraph owner does not transfer their Subgraph to L2 and manually deprecates it via a contract call, then Curators will be notified and will be able to withdraw their curation. + +As soon as the Subgraph is transferred, since all curation is converted to GRT, Indexers will no longer receive rewards for indexing the Subgraph. However, there will be Indexers that will 1) keep serving transferred Subgraphs for 24 hours, and 2) immediately start indexing the Subgraph on L2. Since these Indexers already have the Subgraph indexed, there should be no need to wait for the Subgraph to sync, and it will be possible to query the L2 Subgraph almost immediately. + +Queries to the L2 Subgraph will need to be done to a different URL (on `arbitrum-gateway.thegraph.com`), but the L1 URL will continue working for at least 48 hours. After that, the L1 gateway will forward queries to the L2 gateway (for some time), but this will add latency so it is recommended to switch all your queries to the new URL as soon as possible. + +## Choosing your L2 wallet + +When you published your Subgraph on mainnet, you used a connected wallet to create the Subgraph, and this wallet owns the NFT that represents this Subgraph and allows you to publish updates. + +When transferring the Subgraph to Arbitrum, you can choose a different wallet that will own this Subgraph NFT on L2. + +If you're using a "regular" wallet like MetaMask (an Externally Owned Account or EOA, i.e. a wallet that is not a smart contract), then this is optional and it is recommended to keep the same owner address as in L1. + +If you're using a smart contract wallet, like a multisig (e.g. a Safe), then choosing a different L2 wallet address is mandatory, as it is most likely that this account only exists on mainnet and you will not be able to make transactions on Arbitrum using this wallet. If you want to keep using a smart contract wallet or multisig, create a new wallet on Arbitrum and use its address as the L2 owner of your Subgraph. + +**It is very important to use a wallet address that you control, and that can make transactions on Arbitrum. Otherwise, the Subgraph will be lost and cannot be recovered.** + +## Preparing for the transfer: bridging some ETH + +Transferring the Subgraph involves sending a transaction through the bridge, and then executing another transaction on Arbitrum. The first transaction uses ETH on mainnet, and includes some ETH to pay for gas when the message is received on L2. However, if this gas is insufficient, you will have to retry the transaction and pay for the gas directly on L2 (this is "Step 3: Confirming the transfer" below). This step **must be executed within 7 days of starting the transfer**. Moreover, the second transaction ("Step 4: Finishing the transfer on L2") will be done directly on Arbitrum. For these reasons, you will need some ETH on an Arbitrum wallet. If you're using a multisig or smart contract account, the ETH will need to be in the regular (EOA) wallet that you are using to execute the transactions, not on the multisig wallet itself. + +You can buy ETH on some exchanges and withdraw it directly to Arbitrum, or you can use the Arbitrum bridge to send ETH from a mainnet wallet to L2: [bridge.arbitrum.io](http://bridge.arbitrum.io). Since gas fees on Arbitrum are lower, you should only need a small amount. It is recommended that you start at a low threshold (0.e.g. 01 ETH) for your transaction to be approved. + +## Finding the Subgraph Transfer Tool + +You can find the L2 Transfer Tool when you're looking at your Subgraph's page on Subgraph Studio: + +![transfer tool](/img/L2-transfer-tool1.png) + +It is also available on Explorer if you're connected with the wallet that owns a Subgraph and on that Subgraph's page on Explorer: + +![Transferring to L2](/img/transferToL2.png) + +Clicking on the Transfer to L2 button will open the transfer tool where you can start the transfer process. + +## Step 1: Starting the transfer + +Before starting the transfer, you must decide which address will own the Subgraph on L2 (see "Choosing your L2 wallet" above), and it is strongly recommend having some ETH for gas already bridged on Arbitrum (see "Preparing for the transfer: bridging some ETH" above). + +Also please note transferring the Subgraph requires having a nonzero amount of signal on the Subgraph with the same account that owns the Subgraph; if you haven't signaled on the Subgraph you will have to add a bit of curation (adding a small amount like 1 GRT would suffice). + +After opening the Transfer Tool, you will be able to input the L2 wallet address into the "Receiving wallet address" field - **make sure you've entered the correct address here**. Clicking on Transfer Subgraph will prompt you to execute the transaction on your wallet (note some ETH value is included to pay for L2 gas); this will initiate the transfer and deprecate your L1 Subgraph (see "Understanding what happens with signal, your L1 Subgraph and query URLs" above for more details on what goes on behind the scenes). + +If you execute this step, **make sure you proceed until completing step 3 in less than 7 days, or the Subgraph and your signal GRT will be lost.** This is due to how L1-L2 messaging works on Arbitrum: messages that are sent through the bridge are "retry-able tickets" that must be executed within 7 days, and the initial execution might need a retry if there are spikes in the gas price on Arbitrum. + +![Start the transfer to L2](/img/startTransferL2.png) + +## Step 2: Waiting for the Subgraph to get to L2 + +After you start the transfer, the message that sends your L1 Subgraph to L2 must propagate through the Arbitrum bridge. This takes approximately 20 minutes (the bridge waits for the mainnet block containing the transaction to be "safe" from potential chain reorgs). + +Once this wait time is over, Arbitrum will attempt to auto-execute the transfer on the L2 contracts. + +![Wait screen](/img/screenshotOfWaitScreenL2.png) + +## Step 3: Confirming the transfer + +In most cases, this step will auto-execute as the L2 gas included in step 1 should be sufficient to execute the transaction that receives the Subgraph on the Arbitrum contracts. In some cases, however, it is possible that a spike in gas prices on Arbitrum causes this auto-execution to fail. In this case, the "ticket" that sends your Subgraph to L2 will be pending and require a retry within 7 days. + +If this is the case, you will need to connect using an L2 wallet that has some ETH on Arbitrum, switch your wallet network to Arbitrum, and click on "Confirm Transfer" to retry the transaction. + +![Confirm the transfer to L2](/img/confirmTransferToL2.png) + +## Step 4: Finishing the transfer on L2 + +At this point, your Subgraph and GRT have been received on Arbitrum, but the Subgraph is not published yet. You will need to connect using the L2 wallet that you chose as the receiving wallet, switch your wallet network to Arbitrum, and click "Publish Subgraph." + +![Publish the Subgraph](/img/publishSubgraphL2TransferTools.png) + +![Wait for the Subgraph to be published](/img/waitForSubgraphToPublishL2TransferTools.png) + +This will publish the Subgraph so that Indexers that are operating on Arbitrum can start serving it. It will also mint curation signal using the GRT that were transferred from L1. + +## Step 5: Updating the query URL + +Your Subgraph has been successfully transferred to Arbitrum! To query the Subgraph, the new URL will be : + +`https://arbitrum-gateway.thegraph.com/api/[api-key]/subgraphs/id/[l2-subgraph-id]` + +Note that the Subgraph ID on Arbitrum will be a different than the one you had on mainnet, but you can always find it on Explorer or Studio. As mentioned above (see "Understanding what happens with signal, your L1 Subgraph and query URLs") the old L1 URL will be supported for a short while, but you should switch your queries to the new address as soon as the Subgraph has been synced on L2. + +## How to transfer your curation to Arbitrum (L2) + +## Understanding what happens to curation on Subgraph transfers to L2 + +When the owner of a Subgraph transfers a Subgraph to Arbitrum, all of the Subgraph's signal is converted to GRT at the same time. This applies to "auto-migrated" signal, i.e. signal that is not specific to a Subgraph version or deployment but that follows the latest version of a Subgraph. + +This conversion from signal to GRT is the same as what would happen if the Subgraph owner deprecated the Subgraph in L1. When the Subgraph is deprecated or transferred, all curation signal is "burned" simultaneously (using the curation bonding curve) and the resulting GRT is held by the GNS smart contract (that is the contract that handles Subgraph upgrades and auto-migrated signal). Each Curator on that Subgraph therefore has a claim to that GRT proportional to the amount of shares they had for the Subgraph. + +A fraction of these GRT corresponding to the Subgraph owner is sent to L2 together with the Subgraph. + +At this point, the curated GRT will not accrue any more query fees, so Curators can choose to withdraw their GRT or transfer it to the same Subgraph on L2, where it can be used to mint new curation signal. There is no rush to do this as the GRT can be help indefinitely and everybody gets an amount proportional to their shares, irrespective of when they do it. + +## Choosing your L2 wallet + +If you decide to transfer your curated GRT to L2, you can choose a different wallet that will own the curation signal on L2. + +If you're using a "regular" wallet like Metamask (an Externally Owned Account or EOA, i.e. a wallet that is not a smart contract), then this is optional and it is recommended to keep the same Curator address as in L1. + +If you're using a smart contract wallet, like a multisig (e.g. a Safe), then choosing a different L2 wallet address is mandatory, as it is most likely that this account only exists on mainnet and you will not be able to make transactions on Arbitrum using this wallet. If you want to keep using a smart contract wallet or multisig, create a new wallet on Arbitrum and use its address as the L2 receiving wallet address. + +**It is very important to use a wallet address that you control, and that can make transactions on Arbitrum, as otherwise the curation will be lost and cannot be recovered.** + +## Sending curation to L2: Step 1 + +Before starting the transfer, you must decide which address will own the curation on L2 (see "Choosing your L2 wallet" above), and it is recommended having some ETH for gas already bridged on Arbitrum in case you need to retry the execution of the message on L2. You can buy ETH on some exchanges and withdraw it directly to Arbitrum, or you can use the Arbitrum bridge to send ETH from a mainnet wallet to L2: [bridge.arbitrum.io](http://bridge.arbitrum.io) - since gas fees on Arbitrum are so low, you should only need a small amount, e.g. 0.01 ETH will probably be more than enough. + +If a Subgraph that you curate to has been transferred to L2, you will see a message on Explorer telling you that you're curating to a transferred Subgraph. + +When looking at the Subgraph page, you can choose to withdraw or transfer the curation. Clicking on "Transfer Signal to Arbitrum" will open the transfer tool. + +![Transfer signal](/img/transferSignalL2TransferTools.png) + +After opening the Transfer Tool, you may be prompted to add some ETH to your wallet if you don't have any. Then you will be able to input the L2 wallet address into the "Receiving wallet address" field - **make sure you've entered the correct address here**. Clicking on Transfer Signal will prompt you to execute the transaction on your wallet (note some ETH value is included to pay for L2 gas); this will initiate the transfer. + +If you execute this step, **make sure you proceed until completing step 3 in less than 7 days, or your signal GRT will be lost.** This is due to how L1-L2 messaging works on Arbitrum: messages that are sent through the bridge are "retryable tickets" that must be executed within 7 days, and the initial execution might need a retry if there are spikes in the gas price on Arbitrum. + +## Sending curation to L2: step 2 + +Starting the transfer: + +![Send signal to L2](/img/sendingCurationToL2Step2First.png) + +After you start the transfer, the message that sends your L1 curation to L2 must propagate through the Arbitrum bridge. This takes approximately 20 minutes (the bridge waits for the mainnet block containing the transaction to be "safe" from potential chain reorgs). + +Once this wait time is over, Arbitrum will attempt to auto-execute the transfer on the L2 contracts. + +![Sending curation signal to L2](/img/sendingCurationToL2Step2Second.png) + +## Sending curation to L2: step 3 + +In most cases, this step will auto-execute as the L2 gas included in step 1 should be sufficient to execute the transaction that receives the curation on the Arbitrum contracts. In some cases, however, it is possible that a spike in gas prices on Arbitrum causes this auto-execution to fail. In this case, the "ticket" that sends your curation to L2 will be pending and require a retry within 7 days. + +If this is the case, you will need to connect using an L2 wallet that has some ETH on Arbitrum, switch your wallet network to Arbitrum, and click on "Confirm Transfer" to retry the transaction. + +![Send signal to L2](/img/L2TransferToolsFinalCurationImage.png) + +## Withdrawing your curation on L1 + +If you prefer not to send your GRT to L2, or you'd rather bridge the GRT manually, you can withdraw your curated GRT on L1. On the banner on the Subgraph page, choose "Withdraw Signal" and confirm the transaction; the GRT will be sent to your Curator address. From 3ba8b0f534077269a39c295d2189c6210b36fc91 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:14:06 -0500 Subject: [PATCH 0338/1789] New translations sunrise.mdx (Romanian) --- website/src/pages/ro/archived/sunrise.mdx | 42 +++++++++++------------ 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/website/src/pages/ro/archived/sunrise.mdx b/website/src/pages/ro/archived/sunrise.mdx index eb18a93c506c..71262f22e7d8 100644 --- a/website/src/pages/ro/archived/sunrise.mdx +++ b/website/src/pages/ro/archived/sunrise.mdx @@ -7,61 +7,61 @@ sidebarTitle: Post-Sunrise Upgrade FAQ ## What was the Sunrise of Decentralized Data? -The Sunrise of Decentralized Data was an initiative spearheaded by Edge & Node. This initiative enabled subgraph developers to upgrade to The Graph’s decentralized network seamlessly. +The Sunrise of Decentralized Data was an initiative spearheaded by Edge & Node. This initiative enabled Subgraph developers to upgrade to The Graph’s decentralized network seamlessly. -This plan drew on previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published subgraphs. +This plan drew on previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published Subgraphs. ### What happened to the hosted service? -The hosted service query endpoints are no longer available, and developers cannot deploy new subgraphs on the hosted service. +The hosted service query endpoints are no longer available, and developers cannot deploy new Subgraphs on the hosted service. -During the upgrade process, owners of hosted service subgraphs could upgrade their subgraphs to The Graph Network. Additionally, developers were able to claim auto-upgraded subgraphs. +During the upgrade process, owners of hosted service Subgraphs could upgrade their Subgraphs to The Graph Network. Additionally, developers were able to claim auto-upgraded Subgraphs. ### Was Subgraph Studio impacted by this upgrade? No, Subgraph Studio was not impacted by Sunrise. Subgraphs were immediately available for querying, powered by the upgrade Indexer, which uses the same infrastructure as the hosted service. -### Why were subgraphs published to Arbitrum, did it start indexing a different network? +### Why were Subgraphs published to Arbitrum, did it start indexing a different network? -The Graph Network was initially deployed on Ethereum mainnet but was later moved to Arbitrum One in order to lower gas costs for all users. As a result, all new subgraphs are published to The Graph Network on Arbitrum so that Indexers can support them. Arbitrum is the network that subgraphs are published to, but subgraphs can index any of the [supported networks](/supported-networks/) +The Graph Network was initially deployed on Ethereum mainnet but was later moved to Arbitrum One in order to lower gas costs for all users. As a result, all new Subgraphs are published to The Graph Network on Arbitrum so that Indexers can support them. Arbitrum is the network that Subgraphs are published to, but Subgraphs can index any of the [supported networks](/supported-networks/) ## About the Upgrade Indexer > The upgrade Indexer is currently active. -The upgrade Indexer was implemented to improve the experience of upgrading subgraphs from the hosted service to The Graph Network and support new versions of existing subgraphs that had not yet been indexed. +The upgrade Indexer was implemented to improve the experience of upgrading Subgraphs from the hosted service to The Graph Network and support new versions of existing Subgraphs that had not yet been indexed. ### What does the upgrade Indexer do? -- It bootstraps chains that have yet to receive indexing rewards on The Graph Network and ensures that an Indexer is available to serve queries as quickly as possible after a subgraph is published. +- It bootstraps chains that have yet to receive indexing rewards on The Graph Network and ensures that an Indexer is available to serve queries as quickly as possible after a Subgraph is published. - It supports chains that were previously only available on the hosted service. Find a comprehensive list of supported chains [here](/supported-networks/). -- Indexers that operate an upgrade Indexer do so as a public service to support new subgraphs and additional chains that lack indexing rewards before The Graph Council approves them. +- Indexers that operate an upgrade Indexer do so as a public service to support new Subgraphs and additional chains that lack indexing rewards before The Graph Council approves them. ### Why is Edge & Node running the upgrade Indexer? -Edge & Node historically maintained the hosted service and, as a result, already have synced data for hosted service subgraphs. +Edge & Node historically maintained the hosted service and, as a result, already have synced data for hosted service Subgraphs. ### What does the upgrade indexer mean for existing Indexers? Chains previously only supported on the hosted service were made available to developers on The Graph Network without indexing rewards at first. -However, this action unlocked query fees for any interested Indexer and increased the number of subgraphs published on The Graph Network. As a result, Indexers have more opportunities to index and serve these subgraphs in exchange for query fees, even before indexing rewards are enabled for a chain. +However, this action unlocked query fees for any interested Indexer and increased the number of Subgraphs published on The Graph Network. As a result, Indexers have more opportunities to index and serve these Subgraphs in exchange for query fees, even before indexing rewards are enabled for a chain. -The upgrade Indexer also provides the Indexer community with information about the potential demand for subgraphs and new chains on The Graph Network. +The upgrade Indexer also provides the Indexer community with information about the potential demand for Subgraphs and new chains on The Graph Network. ### What does this mean for Delegators? -The upgrade Indexer offers a powerful opportunity for Delegators. As it allowed more subgraphs to be upgraded from the hosted service to The Graph Network, Delegators benefit from the increased network activity. +The upgrade Indexer offers a powerful opportunity for Delegators. As it allowed more Subgraphs to be upgraded from the hosted service to The Graph Network, Delegators benefit from the increased network activity. ### Did the upgrade Indexer compete with existing Indexers for rewards? -No, the upgrade Indexer only allocates the minimum amount per subgraph and does not collect indexing rewards. +No, the upgrade Indexer only allocates the minimum amount per Subgraph and does not collect indexing rewards. -It operates on an “as needed” basis, serving as a fallback until sufficient service quality is achieved by at least three other Indexers in the network for respective chains and subgraphs. +It operates on an “as needed” basis, serving as a fallback until sufficient service quality is achieved by at least three other Indexers in the network for respective chains and Subgraphs. -### How does this affect subgraph developers? +### How does this affect Subgraph developers? -Subgraph developers can query their subgraphs on The Graph Network almost immediately after upgrading from the hosted service or [publishing from Subgraph Studio](/subgraphs/developing/publishing/publishing-a-subgraph/), as no lead time was required for indexing. Please note that [creating a subgraph](/developing/creating-a-subgraph/) was not impacted by this upgrade. +Subgraph developers can query their Subgraphs on The Graph Network almost immediately after upgrading from the hosted service or [publishing from Subgraph Studio](/subgraphs/developing/publishing/publishing-a-subgraph/), as no lead time was required for indexing. Please note that [creating a Subgraph](/developing/creating-a-subgraph/) was not impacted by this upgrade. ### How does the upgrade Indexer benefit data consumers? @@ -71,10 +71,10 @@ The upgrade Indexer enables chains on the network that were previously only supp The upgrade Indexer prices queries at the market rate to avoid influencing the query fee market. -### When will the upgrade Indexer stop supporting a subgraph? +### When will the upgrade Indexer stop supporting a Subgraph? -The upgrade Indexer supports a subgraph until at least 3 other Indexers successfully and consistently serve queries made to it. +The upgrade Indexer supports a Subgraph until at least 3 other Indexers successfully and consistently serve queries made to it. -Furthermore, the upgrade Indexer stops supporting a subgraph if it has not been queried in the last 30 days. +Furthermore, the upgrade Indexer stops supporting a Subgraph if it has not been queried in the last 30 days. -Other Indexers are incentivized to support subgraphs with ongoing query volume. The query volume to the upgrade Indexer should trend towards zero, as it has a small allocation size, and other Indexers should be chosen for queries ahead of it. +Other Indexers are incentivized to support Subgraphs with ongoing query volume. The query volume to the upgrade Indexer should trend towards zero, as it has a small allocation size, and other Indexers should be chosen for queries ahead of it. From 4433873f4846a154e7aac3f6ce62b289622b30c5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:14:07 -0500 Subject: [PATCH 0339/1789] New translations sunrise.mdx (French) --- website/src/pages/fr/archived/sunrise.mdx | 42 +++++++++++------------ 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/website/src/pages/fr/archived/sunrise.mdx b/website/src/pages/fr/archived/sunrise.mdx index 575d138c0f55..dc20e31aee77 100644 --- a/website/src/pages/fr/archived/sunrise.mdx +++ b/website/src/pages/fr/archived/sunrise.mdx @@ -7,61 +7,61 @@ sidebarTitle: Post-Sunrise Upgrade FAQ ## What was the Sunrise of Decentralized Data? -The Sunrise of Decentralized Data was an initiative spearheaded by Edge & Node. This initiative enabled subgraph developers to upgrade to The Graph’s decentralized network seamlessly. +The Sunrise of Decentralized Data was an initiative spearheaded by Edge & Node. This initiative enabled Subgraph developers to upgrade to The Graph’s decentralized network seamlessly. -This plan drew on previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published subgraphs. +This plan drew on previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published Subgraphs. ### What happened to the hosted service? -The hosted service query endpoints are no longer available, and developers cannot deploy new subgraphs on the hosted service. +The hosted service query endpoints are no longer available, and developers cannot deploy new Subgraphs on the hosted service. -During the upgrade process, owners of hosted service subgraphs could upgrade their subgraphs to The Graph Network. Additionally, developers were able to claim auto-upgraded subgraphs. +During the upgrade process, owners of hosted service Subgraphs could upgrade their Subgraphs to The Graph Network. Additionally, developers were able to claim auto-upgraded Subgraphs. ### Was Subgraph Studio impacted by this upgrade? No, Subgraph Studio was not impacted by Sunrise. Subgraphs were immediately available for querying, powered by the upgrade Indexer, which uses the same infrastructure as the hosted service. -### Why were subgraphs published to Arbitrum, did it start indexing a different network? +### Why were Subgraphs published to Arbitrum, did it start indexing a different network? -The Graph Network was initially deployed on Ethereum mainnet but was later moved to Arbitrum One in order to lower gas costs for all users. As a result, all new subgraphs are published to The Graph Network on Arbitrum so that Indexers can support them. Arbitrum is the network that subgraphs are published to, but subgraphs can index any of the [supported networks](/supported-networks/) +The Graph Network was initially deployed on Ethereum mainnet but was later moved to Arbitrum One in order to lower gas costs for all users. As a result, all new Subgraphs are published to The Graph Network on Arbitrum so that Indexers can support them. Arbitrum is the network that Subgraphs are published to, but Subgraphs can index any of the [supported networks](/supported-networks/) ## À propos de l'indexeur de mise à niveau > The upgrade Indexer is currently active. -The upgrade Indexer was implemented to improve the experience of upgrading subgraphs from the hosted service to The Graph Network and support new versions of existing subgraphs that had not yet been indexed. +The upgrade Indexer was implemented to improve the experience of upgrading Subgraphs from the hosted service to The Graph Network and support new versions of existing Subgraphs that had not yet been indexed. ### What does the upgrade Indexer do? -- It bootstraps chains that have yet to receive indexing rewards on The Graph Network and ensures that an Indexer is available to serve queries as quickly as possible after a subgraph is published. +- It bootstraps chains that have yet to receive indexing rewards on The Graph Network and ensures that an Indexer is available to serve queries as quickly as possible after a Subgraph is published. - It supports chains that were previously only available on the hosted service. Find a comprehensive list of supported chains [here](/supported-networks/). -- Indexers that operate an upgrade Indexer do so as a public service to support new subgraphs and additional chains that lack indexing rewards before The Graph Council approves them. +- Indexers that operate an upgrade Indexer do so as a public service to support new Subgraphs and additional chains that lack indexing rewards before The Graph Council approves them. ### Pourquoi Edge & Node exécutent-ils l'indexeur de mise à niveau ? -Edge & Node historically maintained the hosted service and, as a result, already have synced data for hosted service subgraphs. +Edge & Node historically maintained the hosted service and, as a result, already have synced data for hosted service Subgraphs. ### Que signifie la mise à niveau de l'indexeur pour les indexeurs existants ? Chains previously only supported on the hosted service were made available to developers on The Graph Network without indexing rewards at first. -However, this action unlocked query fees for any interested Indexer and increased the number of subgraphs published on The Graph Network. As a result, Indexers have more opportunities to index and serve these subgraphs in exchange for query fees, even before indexing rewards are enabled for a chain. +However, this action unlocked query fees for any interested Indexer and increased the number of Subgraphs published on The Graph Network. As a result, Indexers have more opportunities to index and serve these Subgraphs in exchange for query fees, even before indexing rewards are enabled for a chain. -The upgrade Indexer also provides the Indexer community with information about the potential demand for subgraphs and new chains on The Graph Network. +The upgrade Indexer also provides the Indexer community with information about the potential demand for Subgraphs and new chains on The Graph Network. ### What does this mean for Delegators? -The upgrade Indexer offers a powerful opportunity for Delegators. As it allowed more subgraphs to be upgraded from the hosted service to The Graph Network, Delegators benefit from the increased network activity. +The upgrade Indexer offers a powerful opportunity for Delegators. As it allowed more Subgraphs to be upgraded from the hosted service to The Graph Network, Delegators benefit from the increased network activity. ### Did the upgrade Indexer compete with existing Indexers for rewards? -No, the upgrade Indexer only allocates the minimum amount per subgraph and does not collect indexing rewards. +No, the upgrade Indexer only allocates the minimum amount per Subgraph and does not collect indexing rewards. -It operates on an “as needed” basis, serving as a fallback until sufficient service quality is achieved by at least three other Indexers in the network for respective chains and subgraphs. +It operates on an “as needed” basis, serving as a fallback until sufficient service quality is achieved by at least three other Indexers in the network for respective chains and Subgraphs. -### How does this affect subgraph developers? +### How does this affect Subgraph developers? -Subgraph developers can query their subgraphs on The Graph Network almost immediately after upgrading from the hosted service or [publishing from Subgraph Studio](/subgraphs/developing/publishing/publishing-a-subgraph/), as no lead time was required for indexing. Please note that [creating a subgraph](/developing/creating-a-subgraph/) was not impacted by this upgrade. +Subgraph developers can query their Subgraphs on The Graph Network almost immediately after upgrading from the hosted service or [publishing from Subgraph Studio](/subgraphs/developing/publishing/publishing-a-subgraph/), as no lead time was required for indexing. Please note that [creating a Subgraph](/developing/creating-a-subgraph/) was not impacted by this upgrade. ### How does the upgrade Indexer benefit data consumers? @@ -71,10 +71,10 @@ L'indexeur de mise à niveau active les chaînes sur le réseau qui n'étaient a The upgrade Indexer prices queries at the market rate to avoid influencing the query fee market. -### When will the upgrade Indexer stop supporting a subgraph? +### When will the upgrade Indexer stop supporting a Subgraph? -The upgrade Indexer supports a subgraph until at least 3 other Indexers successfully and consistently serve queries made to it. +The upgrade Indexer supports a Subgraph until at least 3 other Indexers successfully and consistently serve queries made to it. -Furthermore, the upgrade Indexer stops supporting a subgraph if it has not been queried in the last 30 days. +Furthermore, the upgrade Indexer stops supporting a Subgraph if it has not been queried in the last 30 days. -Other Indexers are incentivized to support subgraphs with ongoing query volume. The query volume to the upgrade Indexer should trend towards zero, as it has a small allocation size, and other Indexers should be chosen for queries ahead of it. +Other Indexers are incentivized to support Subgraphs with ongoing query volume. The query volume to the upgrade Indexer should trend towards zero, as it has a small allocation size, and other Indexers should be chosen for queries ahead of it. From 8b8b41a6c9ab4d63ef8bb902ea6399fe7ea0472a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:14:08 -0500 Subject: [PATCH 0340/1789] New translations sunrise.mdx (Spanish) --- website/src/pages/es/archived/sunrise.mdx | 42 +++++++++++------------ 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/website/src/pages/es/archived/sunrise.mdx b/website/src/pages/es/archived/sunrise.mdx index eb18a93c506c..71262f22e7d8 100644 --- a/website/src/pages/es/archived/sunrise.mdx +++ b/website/src/pages/es/archived/sunrise.mdx @@ -7,61 +7,61 @@ sidebarTitle: Post-Sunrise Upgrade FAQ ## What was the Sunrise of Decentralized Data? -The Sunrise of Decentralized Data was an initiative spearheaded by Edge & Node. This initiative enabled subgraph developers to upgrade to The Graph’s decentralized network seamlessly. +The Sunrise of Decentralized Data was an initiative spearheaded by Edge & Node. This initiative enabled Subgraph developers to upgrade to The Graph’s decentralized network seamlessly. -This plan drew on previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published subgraphs. +This plan drew on previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published Subgraphs. ### What happened to the hosted service? -The hosted service query endpoints are no longer available, and developers cannot deploy new subgraphs on the hosted service. +The hosted service query endpoints are no longer available, and developers cannot deploy new Subgraphs on the hosted service. -During the upgrade process, owners of hosted service subgraphs could upgrade their subgraphs to The Graph Network. Additionally, developers were able to claim auto-upgraded subgraphs. +During the upgrade process, owners of hosted service Subgraphs could upgrade their Subgraphs to The Graph Network. Additionally, developers were able to claim auto-upgraded Subgraphs. ### Was Subgraph Studio impacted by this upgrade? No, Subgraph Studio was not impacted by Sunrise. Subgraphs were immediately available for querying, powered by the upgrade Indexer, which uses the same infrastructure as the hosted service. -### Why were subgraphs published to Arbitrum, did it start indexing a different network? +### Why were Subgraphs published to Arbitrum, did it start indexing a different network? -The Graph Network was initially deployed on Ethereum mainnet but was later moved to Arbitrum One in order to lower gas costs for all users. As a result, all new subgraphs are published to The Graph Network on Arbitrum so that Indexers can support them. Arbitrum is the network that subgraphs are published to, but subgraphs can index any of the [supported networks](/supported-networks/) +The Graph Network was initially deployed on Ethereum mainnet but was later moved to Arbitrum One in order to lower gas costs for all users. As a result, all new Subgraphs are published to The Graph Network on Arbitrum so that Indexers can support them. Arbitrum is the network that Subgraphs are published to, but Subgraphs can index any of the [supported networks](/supported-networks/) ## About the Upgrade Indexer > The upgrade Indexer is currently active. -The upgrade Indexer was implemented to improve the experience of upgrading subgraphs from the hosted service to The Graph Network and support new versions of existing subgraphs that had not yet been indexed. +The upgrade Indexer was implemented to improve the experience of upgrading Subgraphs from the hosted service to The Graph Network and support new versions of existing Subgraphs that had not yet been indexed. ### What does the upgrade Indexer do? -- It bootstraps chains that have yet to receive indexing rewards on The Graph Network and ensures that an Indexer is available to serve queries as quickly as possible after a subgraph is published. +- It bootstraps chains that have yet to receive indexing rewards on The Graph Network and ensures that an Indexer is available to serve queries as quickly as possible after a Subgraph is published. - It supports chains that were previously only available on the hosted service. Find a comprehensive list of supported chains [here](/supported-networks/). -- Indexers that operate an upgrade Indexer do so as a public service to support new subgraphs and additional chains that lack indexing rewards before The Graph Council approves them. +- Indexers that operate an upgrade Indexer do so as a public service to support new Subgraphs and additional chains that lack indexing rewards before The Graph Council approves them. ### Why is Edge & Node running the upgrade Indexer? -Edge & Node historically maintained the hosted service and, as a result, already have synced data for hosted service subgraphs. +Edge & Node historically maintained the hosted service and, as a result, already have synced data for hosted service Subgraphs. ### What does the upgrade indexer mean for existing Indexers? Chains previously only supported on the hosted service were made available to developers on The Graph Network without indexing rewards at first. -However, this action unlocked query fees for any interested Indexer and increased the number of subgraphs published on The Graph Network. As a result, Indexers have more opportunities to index and serve these subgraphs in exchange for query fees, even before indexing rewards are enabled for a chain. +However, this action unlocked query fees for any interested Indexer and increased the number of Subgraphs published on The Graph Network. As a result, Indexers have more opportunities to index and serve these Subgraphs in exchange for query fees, even before indexing rewards are enabled for a chain. -The upgrade Indexer also provides the Indexer community with information about the potential demand for subgraphs and new chains on The Graph Network. +The upgrade Indexer also provides the Indexer community with information about the potential demand for Subgraphs and new chains on The Graph Network. ### What does this mean for Delegators? -The upgrade Indexer offers a powerful opportunity for Delegators. As it allowed more subgraphs to be upgraded from the hosted service to The Graph Network, Delegators benefit from the increased network activity. +The upgrade Indexer offers a powerful opportunity for Delegators. As it allowed more Subgraphs to be upgraded from the hosted service to The Graph Network, Delegators benefit from the increased network activity. ### Did the upgrade Indexer compete with existing Indexers for rewards? -No, the upgrade Indexer only allocates the minimum amount per subgraph and does not collect indexing rewards. +No, the upgrade Indexer only allocates the minimum amount per Subgraph and does not collect indexing rewards. -It operates on an “as needed” basis, serving as a fallback until sufficient service quality is achieved by at least three other Indexers in the network for respective chains and subgraphs. +It operates on an “as needed” basis, serving as a fallback until sufficient service quality is achieved by at least three other Indexers in the network for respective chains and Subgraphs. -### How does this affect subgraph developers? +### How does this affect Subgraph developers? -Subgraph developers can query their subgraphs on The Graph Network almost immediately after upgrading from the hosted service or [publishing from Subgraph Studio](/subgraphs/developing/publishing/publishing-a-subgraph/), as no lead time was required for indexing. Please note that [creating a subgraph](/developing/creating-a-subgraph/) was not impacted by this upgrade. +Subgraph developers can query their Subgraphs on The Graph Network almost immediately after upgrading from the hosted service or [publishing from Subgraph Studio](/subgraphs/developing/publishing/publishing-a-subgraph/), as no lead time was required for indexing. Please note that [creating a Subgraph](/developing/creating-a-subgraph/) was not impacted by this upgrade. ### How does the upgrade Indexer benefit data consumers? @@ -71,10 +71,10 @@ The upgrade Indexer enables chains on the network that were previously only supp The upgrade Indexer prices queries at the market rate to avoid influencing the query fee market. -### When will the upgrade Indexer stop supporting a subgraph? +### When will the upgrade Indexer stop supporting a Subgraph? -The upgrade Indexer supports a subgraph until at least 3 other Indexers successfully and consistently serve queries made to it. +The upgrade Indexer supports a Subgraph until at least 3 other Indexers successfully and consistently serve queries made to it. -Furthermore, the upgrade Indexer stops supporting a subgraph if it has not been queried in the last 30 days. +Furthermore, the upgrade Indexer stops supporting a Subgraph if it has not been queried in the last 30 days. -Other Indexers are incentivized to support subgraphs with ongoing query volume. The query volume to the upgrade Indexer should trend towards zero, as it has a small allocation size, and other Indexers should be chosen for queries ahead of it. +Other Indexers are incentivized to support Subgraphs with ongoing query volume. The query volume to the upgrade Indexer should trend towards zero, as it has a small allocation size, and other Indexers should be chosen for queries ahead of it. From d4d2c56dce5226565ff3b65c698a81e9fa9301d6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:14:09 -0500 Subject: [PATCH 0341/1789] New translations sunrise.mdx (Arabic) --- website/src/pages/ar/archived/sunrise.mdx | 42 +++++++++++------------ 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/website/src/pages/ar/archived/sunrise.mdx b/website/src/pages/ar/archived/sunrise.mdx index eb18a93c506c..71262f22e7d8 100644 --- a/website/src/pages/ar/archived/sunrise.mdx +++ b/website/src/pages/ar/archived/sunrise.mdx @@ -7,61 +7,61 @@ sidebarTitle: Post-Sunrise Upgrade FAQ ## What was the Sunrise of Decentralized Data? -The Sunrise of Decentralized Data was an initiative spearheaded by Edge & Node. This initiative enabled subgraph developers to upgrade to The Graph’s decentralized network seamlessly. +The Sunrise of Decentralized Data was an initiative spearheaded by Edge & Node. This initiative enabled Subgraph developers to upgrade to The Graph’s decentralized network seamlessly. -This plan drew on previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published subgraphs. +This plan drew on previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published Subgraphs. ### What happened to the hosted service? -The hosted service query endpoints are no longer available, and developers cannot deploy new subgraphs on the hosted service. +The hosted service query endpoints are no longer available, and developers cannot deploy new Subgraphs on the hosted service. -During the upgrade process, owners of hosted service subgraphs could upgrade their subgraphs to The Graph Network. Additionally, developers were able to claim auto-upgraded subgraphs. +During the upgrade process, owners of hosted service Subgraphs could upgrade their Subgraphs to The Graph Network. Additionally, developers were able to claim auto-upgraded Subgraphs. ### Was Subgraph Studio impacted by this upgrade? No, Subgraph Studio was not impacted by Sunrise. Subgraphs were immediately available for querying, powered by the upgrade Indexer, which uses the same infrastructure as the hosted service. -### Why were subgraphs published to Arbitrum, did it start indexing a different network? +### Why were Subgraphs published to Arbitrum, did it start indexing a different network? -The Graph Network was initially deployed on Ethereum mainnet but was later moved to Arbitrum One in order to lower gas costs for all users. As a result, all new subgraphs are published to The Graph Network on Arbitrum so that Indexers can support them. Arbitrum is the network that subgraphs are published to, but subgraphs can index any of the [supported networks](/supported-networks/) +The Graph Network was initially deployed on Ethereum mainnet but was later moved to Arbitrum One in order to lower gas costs for all users. As a result, all new Subgraphs are published to The Graph Network on Arbitrum so that Indexers can support them. Arbitrum is the network that Subgraphs are published to, but Subgraphs can index any of the [supported networks](/supported-networks/) ## About the Upgrade Indexer > The upgrade Indexer is currently active. -The upgrade Indexer was implemented to improve the experience of upgrading subgraphs from the hosted service to The Graph Network and support new versions of existing subgraphs that had not yet been indexed. +The upgrade Indexer was implemented to improve the experience of upgrading Subgraphs from the hosted service to The Graph Network and support new versions of existing Subgraphs that had not yet been indexed. ### What does the upgrade Indexer do? -- It bootstraps chains that have yet to receive indexing rewards on The Graph Network and ensures that an Indexer is available to serve queries as quickly as possible after a subgraph is published. +- It bootstraps chains that have yet to receive indexing rewards on The Graph Network and ensures that an Indexer is available to serve queries as quickly as possible after a Subgraph is published. - It supports chains that were previously only available on the hosted service. Find a comprehensive list of supported chains [here](/supported-networks/). -- Indexers that operate an upgrade Indexer do so as a public service to support new subgraphs and additional chains that lack indexing rewards before The Graph Council approves them. +- Indexers that operate an upgrade Indexer do so as a public service to support new Subgraphs and additional chains that lack indexing rewards before The Graph Council approves them. ### Why is Edge & Node running the upgrade Indexer? -Edge & Node historically maintained the hosted service and, as a result, already have synced data for hosted service subgraphs. +Edge & Node historically maintained the hosted service and, as a result, already have synced data for hosted service Subgraphs. ### What does the upgrade indexer mean for existing Indexers? Chains previously only supported on the hosted service were made available to developers on The Graph Network without indexing rewards at first. -However, this action unlocked query fees for any interested Indexer and increased the number of subgraphs published on The Graph Network. As a result, Indexers have more opportunities to index and serve these subgraphs in exchange for query fees, even before indexing rewards are enabled for a chain. +However, this action unlocked query fees for any interested Indexer and increased the number of Subgraphs published on The Graph Network. As a result, Indexers have more opportunities to index and serve these Subgraphs in exchange for query fees, even before indexing rewards are enabled for a chain. -The upgrade Indexer also provides the Indexer community with information about the potential demand for subgraphs and new chains on The Graph Network. +The upgrade Indexer also provides the Indexer community with information about the potential demand for Subgraphs and new chains on The Graph Network. ### What does this mean for Delegators? -The upgrade Indexer offers a powerful opportunity for Delegators. As it allowed more subgraphs to be upgraded from the hosted service to The Graph Network, Delegators benefit from the increased network activity. +The upgrade Indexer offers a powerful opportunity for Delegators. As it allowed more Subgraphs to be upgraded from the hosted service to The Graph Network, Delegators benefit from the increased network activity. ### Did the upgrade Indexer compete with existing Indexers for rewards? -No, the upgrade Indexer only allocates the minimum amount per subgraph and does not collect indexing rewards. +No, the upgrade Indexer only allocates the minimum amount per Subgraph and does not collect indexing rewards. -It operates on an “as needed” basis, serving as a fallback until sufficient service quality is achieved by at least three other Indexers in the network for respective chains and subgraphs. +It operates on an “as needed” basis, serving as a fallback until sufficient service quality is achieved by at least three other Indexers in the network for respective chains and Subgraphs. -### How does this affect subgraph developers? +### How does this affect Subgraph developers? -Subgraph developers can query their subgraphs on The Graph Network almost immediately after upgrading from the hosted service or [publishing from Subgraph Studio](/subgraphs/developing/publishing/publishing-a-subgraph/), as no lead time was required for indexing. Please note that [creating a subgraph](/developing/creating-a-subgraph/) was not impacted by this upgrade. +Subgraph developers can query their Subgraphs on The Graph Network almost immediately after upgrading from the hosted service or [publishing from Subgraph Studio](/subgraphs/developing/publishing/publishing-a-subgraph/), as no lead time was required for indexing. Please note that [creating a Subgraph](/developing/creating-a-subgraph/) was not impacted by this upgrade. ### How does the upgrade Indexer benefit data consumers? @@ -71,10 +71,10 @@ The upgrade Indexer enables chains on the network that were previously only supp The upgrade Indexer prices queries at the market rate to avoid influencing the query fee market. -### When will the upgrade Indexer stop supporting a subgraph? +### When will the upgrade Indexer stop supporting a Subgraph? -The upgrade Indexer supports a subgraph until at least 3 other Indexers successfully and consistently serve queries made to it. +The upgrade Indexer supports a Subgraph until at least 3 other Indexers successfully and consistently serve queries made to it. -Furthermore, the upgrade Indexer stops supporting a subgraph if it has not been queried in the last 30 days. +Furthermore, the upgrade Indexer stops supporting a Subgraph if it has not been queried in the last 30 days. -Other Indexers are incentivized to support subgraphs with ongoing query volume. The query volume to the upgrade Indexer should trend towards zero, as it has a small allocation size, and other Indexers should be chosen for queries ahead of it. +Other Indexers are incentivized to support Subgraphs with ongoing query volume. The query volume to the upgrade Indexer should trend towards zero, as it has a small allocation size, and other Indexers should be chosen for queries ahead of it. From 1a020c7065b392821d6725f4ed189cf4db2ecc2c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:14:10 -0500 Subject: [PATCH 0342/1789] New translations sunrise.mdx (Czech) --- website/src/pages/cs/archived/sunrise.mdx | 42 +++++++++++------------ 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/website/src/pages/cs/archived/sunrise.mdx b/website/src/pages/cs/archived/sunrise.mdx index 71b86ac159ff..52e8c90d7708 100644 --- a/website/src/pages/cs/archived/sunrise.mdx +++ b/website/src/pages/cs/archived/sunrise.mdx @@ -7,61 +7,61 @@ sidebarTitle: Post-Sunrise Upgrade FAQ ## Jaký byl úsvit decentralizovaných dat? -Úsvit decentralizovaných dat byla iniciativa, kterou vedla společnost Edge & Node. Tato iniciativa umožnila vývojářům podgrafů bezproblémově přejít na decentralizovanou síť Graf. +The Sunrise of Decentralized Data was an initiative spearheaded by Edge & Node. This initiative enabled Subgraph developers to upgrade to The Graph’s decentralized network seamlessly. -This plan drew on previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published subgraphs. +This plan drew on previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published Subgraphs. ### Co se stalo s hostovanou službou? -Koncové body dotazů hostované služby již nejsou k dispozici a vývojáři nemohou v hostované službě nasadit nové podgrafy. +The hosted service query endpoints are no longer available, and developers cannot deploy new Subgraphs on the hosted service. -Během procesu aktualizace mohli vlastníci podgrafů hostovaných služeb aktualizovat své podgrafy na síť Graf. Vývojáři navíc mohli nárokovat automatickou aktualizaci podgrafů. +During the upgrade process, owners of hosted service Subgraphs could upgrade their Subgraphs to The Graph Network. Additionally, developers were able to claim auto-upgraded Subgraphs. ### Měla tato aktualizace vliv na Podgraf Studio? Ne, na Podgraf Studio neměl Sunrise vliv. Podgrafy byly okamžitě k dispozici pro dotazování, a to díky aktualizačnímu indexeru, který využívá stejnou infrastrukturu jako hostovaná služba. -### Proč byly podgrafy zveřejněny na Arbitrum, začalo indexovat jinou síť? +### Why were Subgraphs published to Arbitrum, did it start indexing a different network? -The Graph Network was initially deployed on Ethereum mainnet but was later moved to Arbitrum One in order to lower gas costs for all users. As a result, all new subgraphs are published to The Graph Network on Arbitrum so that Indexers can support them. Arbitrum is the network that subgraphs are published to, but subgraphs can index any of the [supported networks](/supported-networks/) +The Graph Network was initially deployed on Ethereum mainnet but was later moved to Arbitrum One in order to lower gas costs for all users. As a result, all new Subgraphs are published to The Graph Network on Arbitrum so that Indexers can support them. Arbitrum is the network that Subgraphs are published to, but Subgraphs can index any of the [supported networks](/supported-networks/) ## O Upgrade Indexer > Aktualizace Indexer je v současné době aktivní. -Upgrade Indexer byl implementován za účelem zlepšení zkušeností s upgradem podgrafů z hostované služby do sit' Graf a podpory nových verzí stávajících podgrafů, které dosud nebyly indexovány. +The upgrade Indexer was implemented to improve the experience of upgrading Subgraphs from the hosted service to The Graph Network and support new versions of existing Subgraphs that had not yet been indexed. ### Co dělá upgrade Indexer? -- Zavádí řetězce, které ještě nezískaly odměnu za indexaci v síti Graf, a zajišťuje, aby byl po zveřejnění podgrafu co nejrychleji k dispozici indexátor pro obsluhu dotazů. +- It bootstraps chains that have yet to receive indexing rewards on The Graph Network and ensures that an Indexer is available to serve queries as quickly as possible after a Subgraph is published. - It supports chains that were previously only available on the hosted service. Find a comprehensive list of supported chains [here](/supported-networks/). -- Indexátoři, kteří provozují upgrade indexátoru, tak činí jako veřejnou službu pro podporu nových podgrafů a dalších řetězců, kterým chybí indexační odměny, než je Rada grafů schválí. +- Indexers that operate an upgrade Indexer do so as a public service to support new Subgraphs and additional chains that lack indexing rewards before The Graph Council approves them. ### Proč Edge & Node spouští aktualizaci Indexer? -Edge & Node historicky udržovaly hostovanou službu, a proto již mají synchronizovaná data pro podgrafy hostované služby. +Edge & Node historically maintained the hosted service and, as a result, already have synced data for hosted service Subgraphs. ### Co znamená upgrade indexeru pro stávající indexery? Řetězce, které byly dříve podporovány pouze v hostované službě, byly vývojářům zpřístupněny v síti Graf nejprve bez odměn za indexování. -Tato akce však uvolnila poplatky za dotazy pro všechny zájemce o indexování a zvýšila počet podgrafů zveřejněných v síti Graf. V důsledku toho mají indexátoři více příležitostí indexovat a obsluhovat tyto podgrafy výměnou za poplatky za dotazy, a to ještě předtím, než jsou odměny za indexování pro řetězec povoleny. +However, this action unlocked query fees for any interested Indexer and increased the number of Subgraphs published on The Graph Network. As a result, Indexers have more opportunities to index and serve these Subgraphs in exchange for query fees, even before indexing rewards are enabled for a chain. -Upgrade Indexer také poskytuje komunitě Indexer informace o potenciální poptávce po podgrafech a nových řetězcích v síti grafů. +The upgrade Indexer also provides the Indexer community with information about the potential demand for Subgraphs and new chains on The Graph Network. ### Co to znamená pro delegáti? -The upgrade Indexer offers a powerful opportunity for Delegators. As it allowed more subgraphs to be upgraded from the hosted service to The Graph Network, Delegators benefit from the increased network activity. +The upgrade Indexer offers a powerful opportunity for Delegators. As it allowed more Subgraphs to be upgraded from the hosted service to The Graph Network, Delegators benefit from the increased network activity. ### Did the upgrade Indexer compete with existing Indexers for rewards? -No, the upgrade Indexer only allocates the minimum amount per subgraph and does not collect indexing rewards. +No, the upgrade Indexer only allocates the minimum amount per Subgraph and does not collect indexing rewards. -It operates on an “as needed” basis, serving as a fallback until sufficient service quality is achieved by at least three other Indexers in the network for respective chains and subgraphs. +It operates on an “as needed” basis, serving as a fallback until sufficient service quality is achieved by at least three other Indexers in the network for respective chains and Subgraphs. -### How does this affect subgraph developers? +### How does this affect Subgraph developers? -Subgraph developers can query their subgraphs on The Graph Network almost immediately after upgrading from the hosted service or [publishing from Subgraph Studio](/subgraphs/developing/publishing/publishing-a-subgraph/), as no lead time was required for indexing. Please note that [creating a subgraph](/developing/creating-a-subgraph/) was not impacted by this upgrade. +Subgraph developers can query their Subgraphs on The Graph Network almost immediately after upgrading from the hosted service or [publishing from Subgraph Studio](/subgraphs/developing/publishing/publishing-a-subgraph/), as no lead time was required for indexing. Please note that [creating a Subgraph](/developing/creating-a-subgraph/) was not impacted by this upgrade. ### How does the upgrade Indexer benefit data consumers? @@ -71,10 +71,10 @@ Aktualizace Indexeru umožňuje podporu blockchainů v síti, které byly dřív The upgrade Indexer prices queries at the market rate to avoid influencing the query fee market. -### When will the upgrade Indexer stop supporting a subgraph? +### When will the upgrade Indexer stop supporting a Subgraph? -The upgrade Indexer supports a subgraph until at least 3 other Indexers successfully and consistently serve queries made to it. +The upgrade Indexer supports a Subgraph until at least 3 other Indexers successfully and consistently serve queries made to it. -Furthermore, the upgrade Indexer stops supporting a subgraph if it has not been queried in the last 30 days. +Furthermore, the upgrade Indexer stops supporting a Subgraph if it has not been queried in the last 30 days. -Other Indexers are incentivized to support subgraphs with ongoing query volume. The query volume to the upgrade Indexer should trend towards zero, as it has a small allocation size, and other Indexers should be chosen for queries ahead of it. +Other Indexers are incentivized to support Subgraphs with ongoing query volume. The query volume to the upgrade Indexer should trend towards zero, as it has a small allocation size, and other Indexers should be chosen for queries ahead of it. From bb976d72f6aca89c23f972d5bf21d81eb28fbed5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:14:11 -0500 Subject: [PATCH 0343/1789] New translations sunrise.mdx (German) --- website/src/pages/de/archived/sunrise.mdx | 42 +++++++++++------------ 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/website/src/pages/de/archived/sunrise.mdx b/website/src/pages/de/archived/sunrise.mdx index 398fe1ca72f7..375de947caa9 100644 --- a/website/src/pages/de/archived/sunrise.mdx +++ b/website/src/pages/de/archived/sunrise.mdx @@ -7,61 +7,61 @@ sidebarTitle: Post-Sunrise Upgrade FAQ ## Was war die Sunrise der dezentralisierten Daten? -Die Sunrise of Decentralized Data war eine Initiative, die von Edge & Node angeführt wurde. Diese Initiative ermöglichte es Subgraph-Entwicklern, nahtlos auf das dezentrale Netzwerk von The Graph zu wechseln. +The Sunrise of Decentralized Data was an initiative spearheaded by Edge & Node. This initiative enabled Subgraph developers to upgrade to The Graph’s decentralized network seamlessly. -Dieser Plan stützt sich auf frühere Entwicklungen des Graph-Ökosystems, einschließlich eines aktualisierten Indexers, der Abfragen auf neu veröffentlichte Subgraphen ermöglicht. +This plan drew on previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published Subgraphs. ### Was ist mit dem gehosteten Dienst passiert? -Die Query-Endpunkte des gehosteten Dienstes sind nicht mehr verfügbar, und Entwickler können keine neuen Subgraphen für den gehosteten Dienst bereitstellen. +The hosted service query endpoints are no longer available, and developers cannot deploy new Subgraphs on the hosted service. -Während des Upgrade-Prozesses konnten die Besitzer von gehosteten Service-Subgraphen ihre Subgraphen auf The Graph Network aktualisieren. Außerdem konnten Entwickler automatisch aktualisierte Subgraphen beanspruchen. +During the upgrade process, owners of hosted service Subgraphs could upgrade their Subgraphs to The Graph Network. Additionally, developers were able to claim auto-upgraded Subgraphs. ### Wurde Subgraph Studio durch dieses Upgrade beeinträchtigt? Nein, Subgraph Studio wurde durch Sunrise nicht beeinträchtigt. Subgraphen standen sofort für Abfragen zur Verfügung, angetrieben durch den Upgrade Indexer, der die gleiche Infrastruktur wie der gehostete Dienst nutzt. -### Warum wurden Subgraphen auf Arbitrum veröffentlicht, hat es begonnen, ein anderes Netzwerk zu indizieren? +### Why were Subgraphs published to Arbitrum, did it start indexing a different network? -Das Graph Network wurde zunächst auf dem Ethereum Mainnet eingesetzt, wurde aber später auf Arbitrum One verschoben, um die Gaskosten für alle Nutzer zu senken. Infolgedessen werden alle neuen Subgraphen im Graph Network auf Arbitrum veröffentlicht, damit Indexer sie unterstützen können. Arbitrum ist das Netzwerk, in dem Subgraphen veröffentlicht werden, aber Subgraphen können jedes der [unterstützten Netzwerke](/supported-networks/) indizieren. +The Graph Network was initially deployed on Ethereum mainnet but was later moved to Arbitrum One in order to lower gas costs for all users. As a result, all new Subgraphs are published to The Graph Network on Arbitrum so that Indexers can support them. Arbitrum is the network that Subgraphs are published to, but Subgraphs can index any of the [supported networks](/supported-networks/) ## Über den Upgrade Indexer > Der Upgrade Indexer ist derzeit aktiv. -Der Upgrade Indexer wurde implementiert, um das Upgrade von Subgraphen vom gehosteten Dienst zu The Graph Network zu verbessern und neue Versionen von bestehenden Subgraphen, die noch nicht indiziert wurden, zu unterstützen. +The upgrade Indexer was implemented to improve the experience of upgrading Subgraphs from the hosted service to The Graph Network and support new versions of existing Subgraphs that had not yet been indexed. ### Was macht der Upgrade Indexer? -- Er bootet Ketten, die noch keinen Reward für die Indizierung im The Graph Network erhalten haben, und stellt sicher, dass ein Indexer zur Verfügung steht, um Anfragen so schnell wie möglich zu bedienen, nachdem ein Subgraph veröffentlicht wurde. +- It bootstraps chains that have yet to receive indexing rewards on The Graph Network and ensures that an Indexer is available to serve queries as quickly as possible after a Subgraph is published. - Er unterstützt Ketten, die bisher nur über den gehosteten Dienst verfügbar waren. Eine umfassende Liste der unterstützten Ketten finden Sie [hier](/supported-networks/). -- Indexer, die einen Upgrade Indexer betreiben, tun dies als öffentlichen Dienst, um neue Subgraphen und zusätzliche Ketten zu unterstützen, für die es noch keine Rewards gibt, bevor sie vom The Graph Council genehmigt werden. +- Indexers that operate an upgrade Indexer do so as a public service to support new Subgraphs and additional chains that lack indexing rewards before The Graph Council approves them. ### Warum führen Edge & Node den Upgrade Indexer aus? -Edge & Node haben in der Vergangenheit den gehosteten Dienst gewartet und verfügen daher bereits über synchronisierte Daten für Subgraphen des gehosteten Dienstes. +Edge & Node historically maintained the hosted service and, as a result, already have synced data for hosted service Subgraphs. ### Was bedeutet der Upgrade Indexer für bestehende Indexer? Ketten, die bisher nur vom gehosteten Dienst unterstützt wurden, wurden den Entwicklern auf The Graph Network zunächst ohne Rewards zur Verfügung gestellt. -Durch diese Aktion wurden jedoch Abfragegebühren für jeden interessierten Indexer freigeschaltet und die Anzahl der im Graph Network veröffentlichten Subgraphen erhöht. Infolgedessen haben Indexer mehr Möglichkeiten, diese Subgraphen im Austausch gegen Abfragegebühren zu indizieren und zu bedienen, noch bevor Indexing Rewards für eine Kette aktiviert sind. +However, this action unlocked query fees for any interested Indexer and increased the number of Subgraphs published on The Graph Network. As a result, Indexers have more opportunities to index and serve these Subgraphs in exchange for query fees, even before indexing rewards are enabled for a chain. -Der Upgrade-Indexer versorgt die Indexer-Community auch mit Informationen über die potenzielle Nachfrage nach Subgraphen und neuen Ketten im The Graph Network. +The upgrade Indexer also provides the Indexer community with information about the potential demand for Subgraphs and new chains on The Graph Network. ### Was bedeutet das für die Delegatoren? -Der Upgrade-Indexer bietet eine große Chance für Delegatoren. Da mehr Subgraphen vom gehosteten Dienst auf The Graph Network umgestellt werden können, profitieren die Delegatoren von der erhöhten Netzwerkaktivität. +The upgrade Indexer offers a powerful opportunity for Delegators. As it allowed more Subgraphs to be upgraded from the hosted service to The Graph Network, Delegators benefit from the increased network activity. ### Konkurriert der aktualisierte Indexer mit bestehenden Indexern um Rewards? -Nein, der Upgrade-Indexer weist nur den Mindestbetrag pro Subgraph zu und sammelt keine Rewards für die Indizierung. +No, the upgrade Indexer only allocates the minimum amount per Subgraph and does not collect indexing rewards. -Er arbeitet "nach Bedarf" und dient als Ausweichlösung, bis mindestens drei andere Indexer im Netz für die jeweiligen Ketten und Subgraphen eine ausreichende Dienstqualität erreicht haben. +It operates on an “as needed” basis, serving as a fallback until sufficient service quality is achieved by at least three other Indexers in the network for respective chains and Subgraphs. -### Wie wirkt sich das für die Entwickler von Subgraphen aus? +### How does this affect Subgraph developers? -Entwickler von Subgraphen können ihre Subgraphen auf The Graph Network fast sofort nach dem Upgrade vom gehosteten Dienst oder nach dem [Veröffentlichen aus Subgraph Studio](/subgraphs/developing/publishing/publishing-a-subgraph/) abfragen, da keine Vorlaufzeit für die Indizierung erforderlich war. Bitte beachten Sie, dass das [Erstellen eines Subgraphen](/developing/creating-a-subgraph/) von diesem Upgrade nicht betroffen ist. +Subgraph developers can query their Subgraphs on The Graph Network almost immediately after upgrading from the hosted service or [publishing from Subgraph Studio](/subgraphs/developing/publishing/publishing-a-subgraph/), as no lead time was required for indexing. Please note that [creating a Subgraph](/developing/creating-a-subgraph/) was not impacted by this upgrade. ### Welchen Nutzen hat der Upgrade-Indexer für die Datenkonsumenten? @@ -71,10 +71,10 @@ Der Upgrade-Indexer ermöglicht Verkettungen im Netz, die bisher nur vom gehoste Der Upgrade-Indexer berechnet Abfragen zum Marktpreis, um den Markt für Abfragegebühren nicht zu beeinflussen. -### Wann wird der Upgrade-Indexer aufhören, einen Subgraphen zu unterstützen? +### When will the upgrade Indexer stop supporting a Subgraph? -Der Upgrade-Indexer unterstützt einen Subgraphen so lange, bis mindestens 3 andere Indexer erfolgreich und konsistent die an ihn gerichteten Abfragen bedienen. +The upgrade Indexer supports a Subgraph until at least 3 other Indexers successfully and consistently serve queries made to it. -Außerdem unterstützt der Upgrade-Indexer einen Subgraphen nicht mehr, wenn er in den letzten 30 Tagen nicht abgefragt wurde. +Furthermore, the upgrade Indexer stops supporting a Subgraph if it has not been queried in the last 30 days. -Für andere Indexer besteht ein Anreiz, Subgraphen mit laufendem Abfragevolumen zu unterstützen. Das Anfragevolumen an den Upgrade-Indexer sollte gegen Null tendieren, da er eine kleine Zuweisungsgröße hat, und andere Indexer sollten für Anfragen vor ihm ausgewählt werden. +Other Indexers are incentivized to support Subgraphs with ongoing query volume. The query volume to the upgrade Indexer should trend towards zero, as it has a small allocation size, and other Indexers should be chosen for queries ahead of it. From 6fe578511c1d27031cc0e7164ca46497d0a0cc23 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:14:12 -0500 Subject: [PATCH 0344/1789] New translations sunrise.mdx (Italian) --- website/src/pages/it/archived/sunrise.mdx | 42 +++++++++++------------ 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/website/src/pages/it/archived/sunrise.mdx b/website/src/pages/it/archived/sunrise.mdx index eb18a93c506c..71262f22e7d8 100644 --- a/website/src/pages/it/archived/sunrise.mdx +++ b/website/src/pages/it/archived/sunrise.mdx @@ -7,61 +7,61 @@ sidebarTitle: Post-Sunrise Upgrade FAQ ## What was the Sunrise of Decentralized Data? -The Sunrise of Decentralized Data was an initiative spearheaded by Edge & Node. This initiative enabled subgraph developers to upgrade to The Graph’s decentralized network seamlessly. +The Sunrise of Decentralized Data was an initiative spearheaded by Edge & Node. This initiative enabled Subgraph developers to upgrade to The Graph’s decentralized network seamlessly. -This plan drew on previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published subgraphs. +This plan drew on previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published Subgraphs. ### What happened to the hosted service? -The hosted service query endpoints are no longer available, and developers cannot deploy new subgraphs on the hosted service. +The hosted service query endpoints are no longer available, and developers cannot deploy new Subgraphs on the hosted service. -During the upgrade process, owners of hosted service subgraphs could upgrade their subgraphs to The Graph Network. Additionally, developers were able to claim auto-upgraded subgraphs. +During the upgrade process, owners of hosted service Subgraphs could upgrade their Subgraphs to The Graph Network. Additionally, developers were able to claim auto-upgraded Subgraphs. ### Was Subgraph Studio impacted by this upgrade? No, Subgraph Studio was not impacted by Sunrise. Subgraphs were immediately available for querying, powered by the upgrade Indexer, which uses the same infrastructure as the hosted service. -### Why were subgraphs published to Arbitrum, did it start indexing a different network? +### Why were Subgraphs published to Arbitrum, did it start indexing a different network? -The Graph Network was initially deployed on Ethereum mainnet but was later moved to Arbitrum One in order to lower gas costs for all users. As a result, all new subgraphs are published to The Graph Network on Arbitrum so that Indexers can support them. Arbitrum is the network that subgraphs are published to, but subgraphs can index any of the [supported networks](/supported-networks/) +The Graph Network was initially deployed on Ethereum mainnet but was later moved to Arbitrum One in order to lower gas costs for all users. As a result, all new Subgraphs are published to The Graph Network on Arbitrum so that Indexers can support them. Arbitrum is the network that Subgraphs are published to, but Subgraphs can index any of the [supported networks](/supported-networks/) ## About the Upgrade Indexer > The upgrade Indexer is currently active. -The upgrade Indexer was implemented to improve the experience of upgrading subgraphs from the hosted service to The Graph Network and support new versions of existing subgraphs that had not yet been indexed. +The upgrade Indexer was implemented to improve the experience of upgrading Subgraphs from the hosted service to The Graph Network and support new versions of existing Subgraphs that had not yet been indexed. ### What does the upgrade Indexer do? -- It bootstraps chains that have yet to receive indexing rewards on The Graph Network and ensures that an Indexer is available to serve queries as quickly as possible after a subgraph is published. +- It bootstraps chains that have yet to receive indexing rewards on The Graph Network and ensures that an Indexer is available to serve queries as quickly as possible after a Subgraph is published. - It supports chains that were previously only available on the hosted service. Find a comprehensive list of supported chains [here](/supported-networks/). -- Indexers that operate an upgrade Indexer do so as a public service to support new subgraphs and additional chains that lack indexing rewards before The Graph Council approves them. +- Indexers that operate an upgrade Indexer do so as a public service to support new Subgraphs and additional chains that lack indexing rewards before The Graph Council approves them. ### Why is Edge & Node running the upgrade Indexer? -Edge & Node historically maintained the hosted service and, as a result, already have synced data for hosted service subgraphs. +Edge & Node historically maintained the hosted service and, as a result, already have synced data for hosted service Subgraphs. ### What does the upgrade indexer mean for existing Indexers? Chains previously only supported on the hosted service were made available to developers on The Graph Network without indexing rewards at first. -However, this action unlocked query fees for any interested Indexer and increased the number of subgraphs published on The Graph Network. As a result, Indexers have more opportunities to index and serve these subgraphs in exchange for query fees, even before indexing rewards are enabled for a chain. +However, this action unlocked query fees for any interested Indexer and increased the number of Subgraphs published on The Graph Network. As a result, Indexers have more opportunities to index and serve these Subgraphs in exchange for query fees, even before indexing rewards are enabled for a chain. -The upgrade Indexer also provides the Indexer community with information about the potential demand for subgraphs and new chains on The Graph Network. +The upgrade Indexer also provides the Indexer community with information about the potential demand for Subgraphs and new chains on The Graph Network. ### What does this mean for Delegators? -The upgrade Indexer offers a powerful opportunity for Delegators. As it allowed more subgraphs to be upgraded from the hosted service to The Graph Network, Delegators benefit from the increased network activity. +The upgrade Indexer offers a powerful opportunity for Delegators. As it allowed more Subgraphs to be upgraded from the hosted service to The Graph Network, Delegators benefit from the increased network activity. ### Did the upgrade Indexer compete with existing Indexers for rewards? -No, the upgrade Indexer only allocates the minimum amount per subgraph and does not collect indexing rewards. +No, the upgrade Indexer only allocates the minimum amount per Subgraph and does not collect indexing rewards. -It operates on an “as needed” basis, serving as a fallback until sufficient service quality is achieved by at least three other Indexers in the network for respective chains and subgraphs. +It operates on an “as needed” basis, serving as a fallback until sufficient service quality is achieved by at least three other Indexers in the network for respective chains and Subgraphs. -### How does this affect subgraph developers? +### How does this affect Subgraph developers? -Subgraph developers can query their subgraphs on The Graph Network almost immediately after upgrading from the hosted service or [publishing from Subgraph Studio](/subgraphs/developing/publishing/publishing-a-subgraph/), as no lead time was required for indexing. Please note that [creating a subgraph](/developing/creating-a-subgraph/) was not impacted by this upgrade. +Subgraph developers can query their Subgraphs on The Graph Network almost immediately after upgrading from the hosted service or [publishing from Subgraph Studio](/subgraphs/developing/publishing/publishing-a-subgraph/), as no lead time was required for indexing. Please note that [creating a Subgraph](/developing/creating-a-subgraph/) was not impacted by this upgrade. ### How does the upgrade Indexer benefit data consumers? @@ -71,10 +71,10 @@ The upgrade Indexer enables chains on the network that were previously only supp The upgrade Indexer prices queries at the market rate to avoid influencing the query fee market. -### When will the upgrade Indexer stop supporting a subgraph? +### When will the upgrade Indexer stop supporting a Subgraph? -The upgrade Indexer supports a subgraph until at least 3 other Indexers successfully and consistently serve queries made to it. +The upgrade Indexer supports a Subgraph until at least 3 other Indexers successfully and consistently serve queries made to it. -Furthermore, the upgrade Indexer stops supporting a subgraph if it has not been queried in the last 30 days. +Furthermore, the upgrade Indexer stops supporting a Subgraph if it has not been queried in the last 30 days. -Other Indexers are incentivized to support subgraphs with ongoing query volume. The query volume to the upgrade Indexer should trend towards zero, as it has a small allocation size, and other Indexers should be chosen for queries ahead of it. +Other Indexers are incentivized to support Subgraphs with ongoing query volume. The query volume to the upgrade Indexer should trend towards zero, as it has a small allocation size, and other Indexers should be chosen for queries ahead of it. From 5ca83dc56ce7e96e4b136793b4ebb8ff9310f162 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:14:13 -0500 Subject: [PATCH 0345/1789] New translations sunrise.mdx (Japanese) --- website/src/pages/ja/archived/sunrise.mdx | 42 +++++++++++------------ 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/website/src/pages/ja/archived/sunrise.mdx b/website/src/pages/ja/archived/sunrise.mdx index eac51559a724..e53b28b20016 100644 --- a/website/src/pages/ja/archived/sunrise.mdx +++ b/website/src/pages/ja/archived/sunrise.mdx @@ -7,61 +7,61 @@ sidebarTitle: Post-Sunrise Upgrade FAQ ## What was the Sunrise of Decentralized Data? -The Sunrise of Decentralized Data was an initiative spearheaded by Edge & Node. This initiative enabled subgraph developers to upgrade to The Graph’s decentralized network seamlessly. +The Sunrise of Decentralized Data was an initiative spearheaded by Edge & Node. This initiative enabled Subgraph developers to upgrade to The Graph’s decentralized network seamlessly. -This plan drew on previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published subgraphs. +This plan drew on previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published Subgraphs. ### What happened to the hosted service? -The hosted service query endpoints are no longer available, and developers cannot deploy new subgraphs on the hosted service. +The hosted service query endpoints are no longer available, and developers cannot deploy new Subgraphs on the hosted service. -During the upgrade process, owners of hosted service subgraphs could upgrade their subgraphs to The Graph Network. Additionally, developers were able to claim auto-upgraded subgraphs. +During the upgrade process, owners of hosted service Subgraphs could upgrade their Subgraphs to The Graph Network. Additionally, developers were able to claim auto-upgraded Subgraphs. ### Was Subgraph Studio impacted by this upgrade? No, Subgraph Studio was not impacted by Sunrise. Subgraphs were immediately available for querying, powered by the upgrade Indexer, which uses the same infrastructure as the hosted service. -### Why were subgraphs published to Arbitrum, did it start indexing a different network? +### Why were Subgraphs published to Arbitrum, did it start indexing a different network? -The Graph Network was initially deployed on Ethereum mainnet but was later moved to Arbitrum One in order to lower gas costs for all users. As a result, all new subgraphs are published to The Graph Network on Arbitrum so that Indexers can support them. Arbitrum is the network that subgraphs are published to, but subgraphs can index any of the [supported networks](/supported-networks/) +The Graph Network was initially deployed on Ethereum mainnet but was later moved to Arbitrum One in order to lower gas costs for all users. As a result, all new Subgraphs are published to The Graph Network on Arbitrum so that Indexers can support them. Arbitrum is the network that Subgraphs are published to, but Subgraphs can index any of the [supported networks](/supported-networks/) ## About the Upgrade Indexer > The upgrade Indexer is currently active. -The upgrade Indexer was implemented to improve the experience of upgrading subgraphs from the hosted service to The Graph Network and support new versions of existing subgraphs that had not yet been indexed. +The upgrade Indexer was implemented to improve the experience of upgrading Subgraphs from the hosted service to The Graph Network and support new versions of existing Subgraphs that had not yet been indexed. ### What does the upgrade Indexer do? -- It bootstraps chains that have yet to receive indexing rewards on The Graph Network and ensures that an Indexer is available to serve queries as quickly as possible after a subgraph is published. +- It bootstraps chains that have yet to receive indexing rewards on The Graph Network and ensures that an Indexer is available to serve queries as quickly as possible after a Subgraph is published. - It supports chains that were previously only available on the hosted service. Find a comprehensive list of supported chains [here](/supported-networks/). -- Indexers that operate an upgrade Indexer do so as a public service to support new subgraphs and additional chains that lack indexing rewards before The Graph Council approves them. +- Indexers that operate an upgrade Indexer do so as a public service to support new Subgraphs and additional chains that lack indexing rewards before The Graph Council approves them. ### なぜEdge & Nodeはアップグレード・インデクサーを実行しているのか? -Edge & Node historically maintained the hosted service and, as a result, already have synced data for hosted service subgraphs. +Edge & Node historically maintained the hosted service and, as a result, already have synced data for hosted service Subgraphs. ### What does the upgrade indexer mean for existing Indexers? Chains previously only supported on the hosted service were made available to developers on The Graph Network without indexing rewards at first. -However, this action unlocked query fees for any interested Indexer and increased the number of subgraphs published on The Graph Network. As a result, Indexers have more opportunities to index and serve these subgraphs in exchange for query fees, even before indexing rewards are enabled for a chain. +However, this action unlocked query fees for any interested Indexer and increased the number of Subgraphs published on The Graph Network. As a result, Indexers have more opportunities to index and serve these Subgraphs in exchange for query fees, even before indexing rewards are enabled for a chain. -The upgrade Indexer also provides the Indexer community with information about the potential demand for subgraphs and new chains on The Graph Network. +The upgrade Indexer also provides the Indexer community with information about the potential demand for Subgraphs and new chains on The Graph Network. ### これはデリゲーターにとって何を意味するのか? -The upgrade Indexer offers a powerful opportunity for Delegators. As it allowed more subgraphs to be upgraded from the hosted service to The Graph Network, Delegators benefit from the increased network activity. +The upgrade Indexer offers a powerful opportunity for Delegators. As it allowed more Subgraphs to be upgraded from the hosted service to The Graph Network, Delegators benefit from the increased network activity. ### Did the upgrade Indexer compete with existing Indexers for rewards? -No, the upgrade Indexer only allocates the minimum amount per subgraph and does not collect indexing rewards. +No, the upgrade Indexer only allocates the minimum amount per Subgraph and does not collect indexing rewards. -It operates on an “as needed” basis, serving as a fallback until sufficient service quality is achieved by at least three other Indexers in the network for respective chains and subgraphs. +It operates on an “as needed” basis, serving as a fallback until sufficient service quality is achieved by at least three other Indexers in the network for respective chains and Subgraphs. -### How does this affect subgraph developers? +### How does this affect Subgraph developers? -Subgraph developers can query their subgraphs on The Graph Network almost immediately after upgrading from the hosted service or [publishing from Subgraph Studio](/subgraphs/developing/publishing/publishing-a-subgraph/), as no lead time was required for indexing. Please note that [creating a subgraph](/developing/creating-a-subgraph/) was not impacted by this upgrade. +Subgraph developers can query their Subgraphs on The Graph Network almost immediately after upgrading from the hosted service or [publishing from Subgraph Studio](/subgraphs/developing/publishing/publishing-a-subgraph/), as no lead time was required for indexing. Please note that [creating a Subgraph](/developing/creating-a-subgraph/) was not impacted by this upgrade. ### How does the upgrade Indexer benefit data consumers? @@ -71,10 +71,10 @@ The upgrade Indexer enables chains on the network that were previously only supp The upgrade Indexer prices queries at the market rate to avoid influencing the query fee market. -### When will the upgrade Indexer stop supporting a subgraph? +### When will the upgrade Indexer stop supporting a Subgraph? -The upgrade Indexer supports a subgraph until at least 3 other Indexers successfully and consistently serve queries made to it. +The upgrade Indexer supports a Subgraph until at least 3 other Indexers successfully and consistently serve queries made to it. -Furthermore, the upgrade Indexer stops supporting a subgraph if it has not been queried in the last 30 days. +Furthermore, the upgrade Indexer stops supporting a Subgraph if it has not been queried in the last 30 days. -Other Indexers are incentivized to support subgraphs with ongoing query volume. The query volume to the upgrade Indexer should trend towards zero, as it has a small allocation size, and other Indexers should be chosen for queries ahead of it. +Other Indexers are incentivized to support Subgraphs with ongoing query volume. The query volume to the upgrade Indexer should trend towards zero, as it has a small allocation size, and other Indexers should be chosen for queries ahead of it. From 674a074b25e7720dc03b413bd59e80f68ca607e3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:14:14 -0500 Subject: [PATCH 0346/1789] New translations sunrise.mdx (Korean) --- website/src/pages/ko/archived/sunrise.mdx | 42 +++++++++++------------ 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/website/src/pages/ko/archived/sunrise.mdx b/website/src/pages/ko/archived/sunrise.mdx index eb18a93c506c..71262f22e7d8 100644 --- a/website/src/pages/ko/archived/sunrise.mdx +++ b/website/src/pages/ko/archived/sunrise.mdx @@ -7,61 +7,61 @@ sidebarTitle: Post-Sunrise Upgrade FAQ ## What was the Sunrise of Decentralized Data? -The Sunrise of Decentralized Data was an initiative spearheaded by Edge & Node. This initiative enabled subgraph developers to upgrade to The Graph’s decentralized network seamlessly. +The Sunrise of Decentralized Data was an initiative spearheaded by Edge & Node. This initiative enabled Subgraph developers to upgrade to The Graph’s decentralized network seamlessly. -This plan drew on previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published subgraphs. +This plan drew on previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published Subgraphs. ### What happened to the hosted service? -The hosted service query endpoints are no longer available, and developers cannot deploy new subgraphs on the hosted service. +The hosted service query endpoints are no longer available, and developers cannot deploy new Subgraphs on the hosted service. -During the upgrade process, owners of hosted service subgraphs could upgrade their subgraphs to The Graph Network. Additionally, developers were able to claim auto-upgraded subgraphs. +During the upgrade process, owners of hosted service Subgraphs could upgrade their Subgraphs to The Graph Network. Additionally, developers were able to claim auto-upgraded Subgraphs. ### Was Subgraph Studio impacted by this upgrade? No, Subgraph Studio was not impacted by Sunrise. Subgraphs were immediately available for querying, powered by the upgrade Indexer, which uses the same infrastructure as the hosted service. -### Why were subgraphs published to Arbitrum, did it start indexing a different network? +### Why were Subgraphs published to Arbitrum, did it start indexing a different network? -The Graph Network was initially deployed on Ethereum mainnet but was later moved to Arbitrum One in order to lower gas costs for all users. As a result, all new subgraphs are published to The Graph Network on Arbitrum so that Indexers can support them. Arbitrum is the network that subgraphs are published to, but subgraphs can index any of the [supported networks](/supported-networks/) +The Graph Network was initially deployed on Ethereum mainnet but was later moved to Arbitrum One in order to lower gas costs for all users. As a result, all new Subgraphs are published to The Graph Network on Arbitrum so that Indexers can support them. Arbitrum is the network that Subgraphs are published to, but Subgraphs can index any of the [supported networks](/supported-networks/) ## About the Upgrade Indexer > The upgrade Indexer is currently active. -The upgrade Indexer was implemented to improve the experience of upgrading subgraphs from the hosted service to The Graph Network and support new versions of existing subgraphs that had not yet been indexed. +The upgrade Indexer was implemented to improve the experience of upgrading Subgraphs from the hosted service to The Graph Network and support new versions of existing Subgraphs that had not yet been indexed. ### What does the upgrade Indexer do? -- It bootstraps chains that have yet to receive indexing rewards on The Graph Network and ensures that an Indexer is available to serve queries as quickly as possible after a subgraph is published. +- It bootstraps chains that have yet to receive indexing rewards on The Graph Network and ensures that an Indexer is available to serve queries as quickly as possible after a Subgraph is published. - It supports chains that were previously only available on the hosted service. Find a comprehensive list of supported chains [here](/supported-networks/). -- Indexers that operate an upgrade Indexer do so as a public service to support new subgraphs and additional chains that lack indexing rewards before The Graph Council approves them. +- Indexers that operate an upgrade Indexer do so as a public service to support new Subgraphs and additional chains that lack indexing rewards before The Graph Council approves them. ### Why is Edge & Node running the upgrade Indexer? -Edge & Node historically maintained the hosted service and, as a result, already have synced data for hosted service subgraphs. +Edge & Node historically maintained the hosted service and, as a result, already have synced data for hosted service Subgraphs. ### What does the upgrade indexer mean for existing Indexers? Chains previously only supported on the hosted service were made available to developers on The Graph Network without indexing rewards at first. -However, this action unlocked query fees for any interested Indexer and increased the number of subgraphs published on The Graph Network. As a result, Indexers have more opportunities to index and serve these subgraphs in exchange for query fees, even before indexing rewards are enabled for a chain. +However, this action unlocked query fees for any interested Indexer and increased the number of Subgraphs published on The Graph Network. As a result, Indexers have more opportunities to index and serve these Subgraphs in exchange for query fees, even before indexing rewards are enabled for a chain. -The upgrade Indexer also provides the Indexer community with information about the potential demand for subgraphs and new chains on The Graph Network. +The upgrade Indexer also provides the Indexer community with information about the potential demand for Subgraphs and new chains on The Graph Network. ### What does this mean for Delegators? -The upgrade Indexer offers a powerful opportunity for Delegators. As it allowed more subgraphs to be upgraded from the hosted service to The Graph Network, Delegators benefit from the increased network activity. +The upgrade Indexer offers a powerful opportunity for Delegators. As it allowed more Subgraphs to be upgraded from the hosted service to The Graph Network, Delegators benefit from the increased network activity. ### Did the upgrade Indexer compete with existing Indexers for rewards? -No, the upgrade Indexer only allocates the minimum amount per subgraph and does not collect indexing rewards. +No, the upgrade Indexer only allocates the minimum amount per Subgraph and does not collect indexing rewards. -It operates on an “as needed” basis, serving as a fallback until sufficient service quality is achieved by at least three other Indexers in the network for respective chains and subgraphs. +It operates on an “as needed” basis, serving as a fallback until sufficient service quality is achieved by at least three other Indexers in the network for respective chains and Subgraphs. -### How does this affect subgraph developers? +### How does this affect Subgraph developers? -Subgraph developers can query their subgraphs on The Graph Network almost immediately after upgrading from the hosted service or [publishing from Subgraph Studio](/subgraphs/developing/publishing/publishing-a-subgraph/), as no lead time was required for indexing. Please note that [creating a subgraph](/developing/creating-a-subgraph/) was not impacted by this upgrade. +Subgraph developers can query their Subgraphs on The Graph Network almost immediately after upgrading from the hosted service or [publishing from Subgraph Studio](/subgraphs/developing/publishing/publishing-a-subgraph/), as no lead time was required for indexing. Please note that [creating a Subgraph](/developing/creating-a-subgraph/) was not impacted by this upgrade. ### How does the upgrade Indexer benefit data consumers? @@ -71,10 +71,10 @@ The upgrade Indexer enables chains on the network that were previously only supp The upgrade Indexer prices queries at the market rate to avoid influencing the query fee market. -### When will the upgrade Indexer stop supporting a subgraph? +### When will the upgrade Indexer stop supporting a Subgraph? -The upgrade Indexer supports a subgraph until at least 3 other Indexers successfully and consistently serve queries made to it. +The upgrade Indexer supports a Subgraph until at least 3 other Indexers successfully and consistently serve queries made to it. -Furthermore, the upgrade Indexer stops supporting a subgraph if it has not been queried in the last 30 days. +Furthermore, the upgrade Indexer stops supporting a Subgraph if it has not been queried in the last 30 days. -Other Indexers are incentivized to support subgraphs with ongoing query volume. The query volume to the upgrade Indexer should trend towards zero, as it has a small allocation size, and other Indexers should be chosen for queries ahead of it. +Other Indexers are incentivized to support Subgraphs with ongoing query volume. The query volume to the upgrade Indexer should trend towards zero, as it has a small allocation size, and other Indexers should be chosen for queries ahead of it. From ebf6b954f18362c64aa14e0c23da073bd48f7651 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:14:15 -0500 Subject: [PATCH 0347/1789] New translations sunrise.mdx (Dutch) --- website/src/pages/nl/archived/sunrise.mdx | 42 +++++++++++------------ 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/website/src/pages/nl/archived/sunrise.mdx b/website/src/pages/nl/archived/sunrise.mdx index eb18a93c506c..71262f22e7d8 100644 --- a/website/src/pages/nl/archived/sunrise.mdx +++ b/website/src/pages/nl/archived/sunrise.mdx @@ -7,61 +7,61 @@ sidebarTitle: Post-Sunrise Upgrade FAQ ## What was the Sunrise of Decentralized Data? -The Sunrise of Decentralized Data was an initiative spearheaded by Edge & Node. This initiative enabled subgraph developers to upgrade to The Graph’s decentralized network seamlessly. +The Sunrise of Decentralized Data was an initiative spearheaded by Edge & Node. This initiative enabled Subgraph developers to upgrade to The Graph’s decentralized network seamlessly. -This plan drew on previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published subgraphs. +This plan drew on previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published Subgraphs. ### What happened to the hosted service? -The hosted service query endpoints are no longer available, and developers cannot deploy new subgraphs on the hosted service. +The hosted service query endpoints are no longer available, and developers cannot deploy new Subgraphs on the hosted service. -During the upgrade process, owners of hosted service subgraphs could upgrade their subgraphs to The Graph Network. Additionally, developers were able to claim auto-upgraded subgraphs. +During the upgrade process, owners of hosted service Subgraphs could upgrade their Subgraphs to The Graph Network. Additionally, developers were able to claim auto-upgraded Subgraphs. ### Was Subgraph Studio impacted by this upgrade? No, Subgraph Studio was not impacted by Sunrise. Subgraphs were immediately available for querying, powered by the upgrade Indexer, which uses the same infrastructure as the hosted service. -### Why were subgraphs published to Arbitrum, did it start indexing a different network? +### Why were Subgraphs published to Arbitrum, did it start indexing a different network? -The Graph Network was initially deployed on Ethereum mainnet but was later moved to Arbitrum One in order to lower gas costs for all users. As a result, all new subgraphs are published to The Graph Network on Arbitrum so that Indexers can support them. Arbitrum is the network that subgraphs are published to, but subgraphs can index any of the [supported networks](/supported-networks/) +The Graph Network was initially deployed on Ethereum mainnet but was later moved to Arbitrum One in order to lower gas costs for all users. As a result, all new Subgraphs are published to The Graph Network on Arbitrum so that Indexers can support them. Arbitrum is the network that Subgraphs are published to, but Subgraphs can index any of the [supported networks](/supported-networks/) ## About the Upgrade Indexer > The upgrade Indexer is currently active. -The upgrade Indexer was implemented to improve the experience of upgrading subgraphs from the hosted service to The Graph Network and support new versions of existing subgraphs that had not yet been indexed. +The upgrade Indexer was implemented to improve the experience of upgrading Subgraphs from the hosted service to The Graph Network and support new versions of existing Subgraphs that had not yet been indexed. ### What does the upgrade Indexer do? -- It bootstraps chains that have yet to receive indexing rewards on The Graph Network and ensures that an Indexer is available to serve queries as quickly as possible after a subgraph is published. +- It bootstraps chains that have yet to receive indexing rewards on The Graph Network and ensures that an Indexer is available to serve queries as quickly as possible after a Subgraph is published. - It supports chains that were previously only available on the hosted service. Find a comprehensive list of supported chains [here](/supported-networks/). -- Indexers that operate an upgrade Indexer do so as a public service to support new subgraphs and additional chains that lack indexing rewards before The Graph Council approves them. +- Indexers that operate an upgrade Indexer do so as a public service to support new Subgraphs and additional chains that lack indexing rewards before The Graph Council approves them. ### Why is Edge & Node running the upgrade Indexer? -Edge & Node historically maintained the hosted service and, as a result, already have synced data for hosted service subgraphs. +Edge & Node historically maintained the hosted service and, as a result, already have synced data for hosted service Subgraphs. ### What does the upgrade indexer mean for existing Indexers? Chains previously only supported on the hosted service were made available to developers on The Graph Network without indexing rewards at first. -However, this action unlocked query fees for any interested Indexer and increased the number of subgraphs published on The Graph Network. As a result, Indexers have more opportunities to index and serve these subgraphs in exchange for query fees, even before indexing rewards are enabled for a chain. +However, this action unlocked query fees for any interested Indexer and increased the number of Subgraphs published on The Graph Network. As a result, Indexers have more opportunities to index and serve these Subgraphs in exchange for query fees, even before indexing rewards are enabled for a chain. -The upgrade Indexer also provides the Indexer community with information about the potential demand for subgraphs and new chains on The Graph Network. +The upgrade Indexer also provides the Indexer community with information about the potential demand for Subgraphs and new chains on The Graph Network. ### What does this mean for Delegators? -The upgrade Indexer offers a powerful opportunity for Delegators. As it allowed more subgraphs to be upgraded from the hosted service to The Graph Network, Delegators benefit from the increased network activity. +The upgrade Indexer offers a powerful opportunity for Delegators. As it allowed more Subgraphs to be upgraded from the hosted service to The Graph Network, Delegators benefit from the increased network activity. ### Did the upgrade Indexer compete with existing Indexers for rewards? -No, the upgrade Indexer only allocates the minimum amount per subgraph and does not collect indexing rewards. +No, the upgrade Indexer only allocates the minimum amount per Subgraph and does not collect indexing rewards. -It operates on an “as needed” basis, serving as a fallback until sufficient service quality is achieved by at least three other Indexers in the network for respective chains and subgraphs. +It operates on an “as needed” basis, serving as a fallback until sufficient service quality is achieved by at least three other Indexers in the network for respective chains and Subgraphs. -### How does this affect subgraph developers? +### How does this affect Subgraph developers? -Subgraph developers can query their subgraphs on The Graph Network almost immediately after upgrading from the hosted service or [publishing from Subgraph Studio](/subgraphs/developing/publishing/publishing-a-subgraph/), as no lead time was required for indexing. Please note that [creating a subgraph](/developing/creating-a-subgraph/) was not impacted by this upgrade. +Subgraph developers can query their Subgraphs on The Graph Network almost immediately after upgrading from the hosted service or [publishing from Subgraph Studio](/subgraphs/developing/publishing/publishing-a-subgraph/), as no lead time was required for indexing. Please note that [creating a Subgraph](/developing/creating-a-subgraph/) was not impacted by this upgrade. ### How does the upgrade Indexer benefit data consumers? @@ -71,10 +71,10 @@ The upgrade Indexer enables chains on the network that were previously only supp The upgrade Indexer prices queries at the market rate to avoid influencing the query fee market. -### When will the upgrade Indexer stop supporting a subgraph? +### When will the upgrade Indexer stop supporting a Subgraph? -The upgrade Indexer supports a subgraph until at least 3 other Indexers successfully and consistently serve queries made to it. +The upgrade Indexer supports a Subgraph until at least 3 other Indexers successfully and consistently serve queries made to it. -Furthermore, the upgrade Indexer stops supporting a subgraph if it has not been queried in the last 30 days. +Furthermore, the upgrade Indexer stops supporting a Subgraph if it has not been queried in the last 30 days. -Other Indexers are incentivized to support subgraphs with ongoing query volume. The query volume to the upgrade Indexer should trend towards zero, as it has a small allocation size, and other Indexers should be chosen for queries ahead of it. +Other Indexers are incentivized to support Subgraphs with ongoing query volume. The query volume to the upgrade Indexer should trend towards zero, as it has a small allocation size, and other Indexers should be chosen for queries ahead of it. From 5929764b13acc4f866d7792fb689181d36c8a018 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:14:16 -0500 Subject: [PATCH 0348/1789] New translations sunrise.mdx (Polish) --- website/src/pages/pl/archived/sunrise.mdx | 42 +++++++++++------------ 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/website/src/pages/pl/archived/sunrise.mdx b/website/src/pages/pl/archived/sunrise.mdx index eb18a93c506c..71262f22e7d8 100644 --- a/website/src/pages/pl/archived/sunrise.mdx +++ b/website/src/pages/pl/archived/sunrise.mdx @@ -7,61 +7,61 @@ sidebarTitle: Post-Sunrise Upgrade FAQ ## What was the Sunrise of Decentralized Data? -The Sunrise of Decentralized Data was an initiative spearheaded by Edge & Node. This initiative enabled subgraph developers to upgrade to The Graph’s decentralized network seamlessly. +The Sunrise of Decentralized Data was an initiative spearheaded by Edge & Node. This initiative enabled Subgraph developers to upgrade to The Graph’s decentralized network seamlessly. -This plan drew on previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published subgraphs. +This plan drew on previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published Subgraphs. ### What happened to the hosted service? -The hosted service query endpoints are no longer available, and developers cannot deploy new subgraphs on the hosted service. +The hosted service query endpoints are no longer available, and developers cannot deploy new Subgraphs on the hosted service. -During the upgrade process, owners of hosted service subgraphs could upgrade their subgraphs to The Graph Network. Additionally, developers were able to claim auto-upgraded subgraphs. +During the upgrade process, owners of hosted service Subgraphs could upgrade their Subgraphs to The Graph Network. Additionally, developers were able to claim auto-upgraded Subgraphs. ### Was Subgraph Studio impacted by this upgrade? No, Subgraph Studio was not impacted by Sunrise. Subgraphs were immediately available for querying, powered by the upgrade Indexer, which uses the same infrastructure as the hosted service. -### Why were subgraphs published to Arbitrum, did it start indexing a different network? +### Why were Subgraphs published to Arbitrum, did it start indexing a different network? -The Graph Network was initially deployed on Ethereum mainnet but was later moved to Arbitrum One in order to lower gas costs for all users. As a result, all new subgraphs are published to The Graph Network on Arbitrum so that Indexers can support them. Arbitrum is the network that subgraphs are published to, but subgraphs can index any of the [supported networks](/supported-networks/) +The Graph Network was initially deployed on Ethereum mainnet but was later moved to Arbitrum One in order to lower gas costs for all users. As a result, all new Subgraphs are published to The Graph Network on Arbitrum so that Indexers can support them. Arbitrum is the network that Subgraphs are published to, but Subgraphs can index any of the [supported networks](/supported-networks/) ## About the Upgrade Indexer > The upgrade Indexer is currently active. -The upgrade Indexer was implemented to improve the experience of upgrading subgraphs from the hosted service to The Graph Network and support new versions of existing subgraphs that had not yet been indexed. +The upgrade Indexer was implemented to improve the experience of upgrading Subgraphs from the hosted service to The Graph Network and support new versions of existing Subgraphs that had not yet been indexed. ### What does the upgrade Indexer do? -- It bootstraps chains that have yet to receive indexing rewards on The Graph Network and ensures that an Indexer is available to serve queries as quickly as possible after a subgraph is published. +- It bootstraps chains that have yet to receive indexing rewards on The Graph Network and ensures that an Indexer is available to serve queries as quickly as possible after a Subgraph is published. - It supports chains that were previously only available on the hosted service. Find a comprehensive list of supported chains [here](/supported-networks/). -- Indexers that operate an upgrade Indexer do so as a public service to support new subgraphs and additional chains that lack indexing rewards before The Graph Council approves them. +- Indexers that operate an upgrade Indexer do so as a public service to support new Subgraphs and additional chains that lack indexing rewards before The Graph Council approves them. ### Why is Edge & Node running the upgrade Indexer? -Edge & Node historically maintained the hosted service and, as a result, already have synced data for hosted service subgraphs. +Edge & Node historically maintained the hosted service and, as a result, already have synced data for hosted service Subgraphs. ### What does the upgrade indexer mean for existing Indexers? Chains previously only supported on the hosted service were made available to developers on The Graph Network without indexing rewards at first. -However, this action unlocked query fees for any interested Indexer and increased the number of subgraphs published on The Graph Network. As a result, Indexers have more opportunities to index and serve these subgraphs in exchange for query fees, even before indexing rewards are enabled for a chain. +However, this action unlocked query fees for any interested Indexer and increased the number of Subgraphs published on The Graph Network. As a result, Indexers have more opportunities to index and serve these Subgraphs in exchange for query fees, even before indexing rewards are enabled for a chain. -The upgrade Indexer also provides the Indexer community with information about the potential demand for subgraphs and new chains on The Graph Network. +The upgrade Indexer also provides the Indexer community with information about the potential demand for Subgraphs and new chains on The Graph Network. ### What does this mean for Delegators? -The upgrade Indexer offers a powerful opportunity for Delegators. As it allowed more subgraphs to be upgraded from the hosted service to The Graph Network, Delegators benefit from the increased network activity. +The upgrade Indexer offers a powerful opportunity for Delegators. As it allowed more Subgraphs to be upgraded from the hosted service to The Graph Network, Delegators benefit from the increased network activity. ### Did the upgrade Indexer compete with existing Indexers for rewards? -No, the upgrade Indexer only allocates the minimum amount per subgraph and does not collect indexing rewards. +No, the upgrade Indexer only allocates the minimum amount per Subgraph and does not collect indexing rewards. -It operates on an “as needed” basis, serving as a fallback until sufficient service quality is achieved by at least three other Indexers in the network for respective chains and subgraphs. +It operates on an “as needed” basis, serving as a fallback until sufficient service quality is achieved by at least three other Indexers in the network for respective chains and Subgraphs. -### How does this affect subgraph developers? +### How does this affect Subgraph developers? -Subgraph developers can query their subgraphs on The Graph Network almost immediately after upgrading from the hosted service or [publishing from Subgraph Studio](/subgraphs/developing/publishing/publishing-a-subgraph/), as no lead time was required for indexing. Please note that [creating a subgraph](/developing/creating-a-subgraph/) was not impacted by this upgrade. +Subgraph developers can query their Subgraphs on The Graph Network almost immediately after upgrading from the hosted service or [publishing from Subgraph Studio](/subgraphs/developing/publishing/publishing-a-subgraph/), as no lead time was required for indexing. Please note that [creating a Subgraph](/developing/creating-a-subgraph/) was not impacted by this upgrade. ### How does the upgrade Indexer benefit data consumers? @@ -71,10 +71,10 @@ The upgrade Indexer enables chains on the network that were previously only supp The upgrade Indexer prices queries at the market rate to avoid influencing the query fee market. -### When will the upgrade Indexer stop supporting a subgraph? +### When will the upgrade Indexer stop supporting a Subgraph? -The upgrade Indexer supports a subgraph until at least 3 other Indexers successfully and consistently serve queries made to it. +The upgrade Indexer supports a Subgraph until at least 3 other Indexers successfully and consistently serve queries made to it. -Furthermore, the upgrade Indexer stops supporting a subgraph if it has not been queried in the last 30 days. +Furthermore, the upgrade Indexer stops supporting a Subgraph if it has not been queried in the last 30 days. -Other Indexers are incentivized to support subgraphs with ongoing query volume. The query volume to the upgrade Indexer should trend towards zero, as it has a small allocation size, and other Indexers should be chosen for queries ahead of it. +Other Indexers are incentivized to support Subgraphs with ongoing query volume. The query volume to the upgrade Indexer should trend towards zero, as it has a small allocation size, and other Indexers should be chosen for queries ahead of it. From 4df8380d45656b76fc8233ac24f76026ac2b51f0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:14:17 -0500 Subject: [PATCH 0349/1789] New translations sunrise.mdx (Portuguese) --- website/src/pages/pt/archived/sunrise.mdx | 42 +++++++++++------------ 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/website/src/pages/pt/archived/sunrise.mdx b/website/src/pages/pt/archived/sunrise.mdx index f7e7a0faf5f5..280639c4a9e5 100644 --- a/website/src/pages/pt/archived/sunrise.mdx +++ b/website/src/pages/pt/archived/sunrise.mdx @@ -7,61 +7,61 @@ sidebarTitle: Post-Sunrise Upgrade FAQ ## O Que Foi o Nascer do Sol dos Dados Descentralizados? -O Nascer do Sol dos Dados Descentralizados foi uma iniciativa liderada pela Edge & Node, com a meta de garantir que os programadores de subgraphs fizessem uma atualização suave para a rede descentralizada do The Graph. +The Sunrise of Decentralized Data was an initiative spearheaded by Edge & Node. This initiative enabled Subgraph developers to upgrade to The Graph’s decentralized network seamlessly. -Este plano teve base em desenvolvimentos anteriores do ecossistema do The Graph, e incluiu um Indexador de atualização para servir queries em subgraphs recém-editados. +This plan drew on previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published Subgraphs. ### O que aconteceu com o serviço hospedado? -Os endpoints de query do serviço hospedado não estão mais disponíveis, e programadores não podem mais editar subgraphs novos no serviço hospedado. +The hosted service query endpoints are no longer available, and developers cannot deploy new Subgraphs on the hosted service. -Durante o processo de atualização, donos de subgraphs no serviço hospedado puderam atualizar os seus subgraphs até a Graph Network. Além disto, programadores podiam resgatar subgraphs atualizados automaticamente. +During the upgrade process, owners of hosted service Subgraphs could upgrade their Subgraphs to The Graph Network. Additionally, developers were able to claim auto-upgraded Subgraphs. ### O Subgraph Studio foi atingido por esta atualização? Não, o Subgraph Studio não foi impactado pelo Nascer do Sol. Os subgraphs estavam disponíveis imediatamente para queries, movidos pelo Indexador de atualização, que usa a mesma infraestrutura do serviço hospedado. -### Por que subgraphs eram publicados ao Arbitrum, eles começaram a indexar uma rede diferente? +### Why were Subgraphs published to Arbitrum, did it start indexing a different network? -The Graph Network was initially deployed on Ethereum mainnet but was later moved to Arbitrum One in order to lower gas costs for all users. As a result, all new subgraphs are published to The Graph Network on Arbitrum so that Indexers can support them. Arbitrum is the network that subgraphs are published to, but subgraphs can index any of the [supported networks](/supported-networks/) +The Graph Network was initially deployed on Ethereum mainnet but was later moved to Arbitrum One in order to lower gas costs for all users. As a result, all new Subgraphs are published to The Graph Network on Arbitrum so that Indexers can support them. Arbitrum is the network that Subgraphs are published to, but Subgraphs can index any of the [supported networks](/supported-networks/) ## Sobre o Indexador de Atualização > O Indexador de Atualização está atualmente ativo. -O Indexador de atualização foi construído para melhorar a experiência de atualizar subgraphs do serviço hospedado à Graph Network e apoiar novas versões de subgraphs existentes que ainda não foram indexados. +The upgrade Indexer was implemented to improve the experience of upgrading Subgraphs from the hosted service to The Graph Network and support new versions of existing Subgraphs that had not yet been indexed. ### O que o Indexador de atualização faz? -- Ele inicializa chains que ainda não tenham recompensas de indexação na Graph Network, e garante que um Indexador esteja disponível para servir queries o mais rápido possível após a publicação de um subgraph. +- It bootstraps chains that have yet to receive indexing rewards on The Graph Network and ensures that an Indexer is available to serve queries as quickly as possible after a Subgraph is published. - It supports chains that were previously only available on the hosted service. Find a comprehensive list of supported chains [here](/supported-networks/). -- Indexadores que operam um Indexador de atualização o fazem como um serviço público, para apoiar novos subgraphs e chains adicionais que não tenham recompensas de indexação antes da aprovação do Graph Council. +- Indexers that operate an upgrade Indexer do so as a public service to support new Subgraphs and additional chains that lack indexing rewards before The Graph Council approves them. ### Porque a Edge & Node executa o Indexador de atualização? -A Edge & Node operou historicamente o serviço hospedado, e como resultado, já sincronizou os dados de subgraphs do serviço hospedado. +Edge & Node historically maintained the hosted service and, as a result, already have synced data for hosted service Subgraphs. ### O que o Indexador de atualização significa para Indexadores existentes? Chains que antes só eram apoiadas no serviço hospedado foram disponibilizadas para programadores na Graph Network, inicialmente, sem recompensas de indexação. -Porém, esta ação liberou taxas de query para qualquer Indexador interessado e aumentou o número de subgraphs publicados na Graph Network. Como resultado, Indexadores têm mais oportunidades para indexar e servir estes subgraphs em troca de taxas de query, antes mesmo da ativação de recompensas de indexação para uma chain. +However, this action unlocked query fees for any interested Indexer and increased the number of Subgraphs published on The Graph Network. As a result, Indexers have more opportunities to index and serve these Subgraphs in exchange for query fees, even before indexing rewards are enabled for a chain. -O Indexador de atualização também fornece à comunidade de Indexadores informações sobre a demanda em potencial para subgraphs e novas chains na Graph Network. +The upgrade Indexer also provides the Indexer community with information about the potential demand for Subgraphs and new chains on The Graph Network. ### O que isto significa para Delegantes? -O Indexador de atualização oferece uma forte oportunidade para Delegantes. Como ele permitiu que mais subgraphs fossem atualizados do serviço hospedado até a Graph Network, os Delegantes podem se beneficiar do aumento na atividade da rede. +The upgrade Indexer offers a powerful opportunity for Delegators. As it allowed more Subgraphs to be upgraded from the hosted service to The Graph Network, Delegators benefit from the increased network activity. ### O Indexador de atualização concorreu com Indexadores existentes para recompensas? -Não, o Indexador de atualização só aloca a quantidade mínima por subgraph e não coleta recompensas de indexação. +No, the upgrade Indexer only allocates the minimum amount per Subgraph and does not collect indexing rewards. -Ele opera numa base de "necessidade" e serve como uma reserva até que uma cota de qualidade de serviço seja alcançada por, no mínimo, três outros Indexadores na rede para chains e subgraphs respetivos. +It operates on an “as needed” basis, serving as a fallback until sufficient service quality is achieved by at least three other Indexers in the network for respective chains and Subgraphs. -### Como isto afeta os programadores de subgraph? +### How does this affect Subgraph developers? -Subgraph developers can query their subgraphs on The Graph Network almost immediately after upgrading from the hosted service or [publishing from Subgraph Studio](/subgraphs/developing/publishing/publishing-a-subgraph/), as no lead time was required for indexing. Please note that [creating a subgraph](/developing/creating-a-subgraph/) was not impacted by this upgrade. +Subgraph developers can query their Subgraphs on The Graph Network almost immediately after upgrading from the hosted service or [publishing from Subgraph Studio](/subgraphs/developing/publishing/publishing-a-subgraph/), as no lead time was required for indexing. Please note that [creating a Subgraph](/developing/creating-a-subgraph/) was not impacted by this upgrade. ### Como o Indexador de atualizações beneficia consumidores de dados? @@ -71,10 +71,10 @@ O Indexador de atualização ativa, na rede, chains que antes só tinham apoio n O Indexador de atualização precifica queries no preço do mercado, para não influenciar o mercado de taxas de queries. -### Quando o Indexador de atualização parará de apoiar um subgraph? +### When will the upgrade Indexer stop supporting a Subgraph? -O Indexador de atualização apoia um subgraph até que, no mínimo, 3 outros indexadores sirvam queries feitas nele com êxito e consistência. +The upgrade Indexer supports a Subgraph until at least 3 other Indexers successfully and consistently serve queries made to it. -Além disto, o Indexador de atualização para de apoiar um subgraph se ele não tiver sido consultado nos últimos 30 dias. +Furthermore, the upgrade Indexer stops supporting a Subgraph if it has not been queried in the last 30 days. -Outros Indexadores são incentivados a apoiar subgraphs com o volume de query atual. O volume de query ao Indexador de atualização deve se aproximar de zero, já que ele tem um tamanho de alocação pequeno e outros Indexadores devem ser escolhidos por queries antes disso. +Other Indexers are incentivized to support Subgraphs with ongoing query volume. The query volume to the upgrade Indexer should trend towards zero, as it has a small allocation size, and other Indexers should be chosen for queries ahead of it. From a04d944abcc21c6f1d48d3fc4171d1d6d8bdf059 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:14:18 -0500 Subject: [PATCH 0350/1789] New translations sunrise.mdx (Russian) --- website/src/pages/ru/archived/sunrise.mdx | 42 +++++++++++------------ 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/website/src/pages/ru/archived/sunrise.mdx b/website/src/pages/ru/archived/sunrise.mdx index eb18a93c506c..71262f22e7d8 100644 --- a/website/src/pages/ru/archived/sunrise.mdx +++ b/website/src/pages/ru/archived/sunrise.mdx @@ -7,61 +7,61 @@ sidebarTitle: Post-Sunrise Upgrade FAQ ## What was the Sunrise of Decentralized Data? -The Sunrise of Decentralized Data was an initiative spearheaded by Edge & Node. This initiative enabled subgraph developers to upgrade to The Graph’s decentralized network seamlessly. +The Sunrise of Decentralized Data was an initiative spearheaded by Edge & Node. This initiative enabled Subgraph developers to upgrade to The Graph’s decentralized network seamlessly. -This plan drew on previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published subgraphs. +This plan drew on previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published Subgraphs. ### What happened to the hosted service? -The hosted service query endpoints are no longer available, and developers cannot deploy new subgraphs on the hosted service. +The hosted service query endpoints are no longer available, and developers cannot deploy new Subgraphs on the hosted service. -During the upgrade process, owners of hosted service subgraphs could upgrade their subgraphs to The Graph Network. Additionally, developers were able to claim auto-upgraded subgraphs. +During the upgrade process, owners of hosted service Subgraphs could upgrade their Subgraphs to The Graph Network. Additionally, developers were able to claim auto-upgraded Subgraphs. ### Was Subgraph Studio impacted by this upgrade? No, Subgraph Studio was not impacted by Sunrise. Subgraphs were immediately available for querying, powered by the upgrade Indexer, which uses the same infrastructure as the hosted service. -### Why were subgraphs published to Arbitrum, did it start indexing a different network? +### Why were Subgraphs published to Arbitrum, did it start indexing a different network? -The Graph Network was initially deployed on Ethereum mainnet but was later moved to Arbitrum One in order to lower gas costs for all users. As a result, all new subgraphs are published to The Graph Network on Arbitrum so that Indexers can support them. Arbitrum is the network that subgraphs are published to, but subgraphs can index any of the [supported networks](/supported-networks/) +The Graph Network was initially deployed on Ethereum mainnet but was later moved to Arbitrum One in order to lower gas costs for all users. As a result, all new Subgraphs are published to The Graph Network on Arbitrum so that Indexers can support them. Arbitrum is the network that Subgraphs are published to, but Subgraphs can index any of the [supported networks](/supported-networks/) ## About the Upgrade Indexer > The upgrade Indexer is currently active. -The upgrade Indexer was implemented to improve the experience of upgrading subgraphs from the hosted service to The Graph Network and support new versions of existing subgraphs that had not yet been indexed. +The upgrade Indexer was implemented to improve the experience of upgrading Subgraphs from the hosted service to The Graph Network and support new versions of existing Subgraphs that had not yet been indexed. ### What does the upgrade Indexer do? -- It bootstraps chains that have yet to receive indexing rewards on The Graph Network and ensures that an Indexer is available to serve queries as quickly as possible after a subgraph is published. +- It bootstraps chains that have yet to receive indexing rewards on The Graph Network and ensures that an Indexer is available to serve queries as quickly as possible after a Subgraph is published. - It supports chains that were previously only available on the hosted service. Find a comprehensive list of supported chains [here](/supported-networks/). -- Indexers that operate an upgrade Indexer do so as a public service to support new subgraphs and additional chains that lack indexing rewards before The Graph Council approves them. +- Indexers that operate an upgrade Indexer do so as a public service to support new Subgraphs and additional chains that lack indexing rewards before The Graph Council approves them. ### Why is Edge & Node running the upgrade Indexer? -Edge & Node historically maintained the hosted service and, as a result, already have synced data for hosted service subgraphs. +Edge & Node historically maintained the hosted service and, as a result, already have synced data for hosted service Subgraphs. ### What does the upgrade indexer mean for existing Indexers? Chains previously only supported on the hosted service were made available to developers on The Graph Network without indexing rewards at first. -However, this action unlocked query fees for any interested Indexer and increased the number of subgraphs published on The Graph Network. As a result, Indexers have more opportunities to index and serve these subgraphs in exchange for query fees, even before indexing rewards are enabled for a chain. +However, this action unlocked query fees for any interested Indexer and increased the number of Subgraphs published on The Graph Network. As a result, Indexers have more opportunities to index and serve these Subgraphs in exchange for query fees, even before indexing rewards are enabled for a chain. -The upgrade Indexer also provides the Indexer community with information about the potential demand for subgraphs and new chains on The Graph Network. +The upgrade Indexer also provides the Indexer community with information about the potential demand for Subgraphs and new chains on The Graph Network. ### What does this mean for Delegators? -The upgrade Indexer offers a powerful opportunity for Delegators. As it allowed more subgraphs to be upgraded from the hosted service to The Graph Network, Delegators benefit from the increased network activity. +The upgrade Indexer offers a powerful opportunity for Delegators. As it allowed more Subgraphs to be upgraded from the hosted service to The Graph Network, Delegators benefit from the increased network activity. ### Did the upgrade Indexer compete with existing Indexers for rewards? -No, the upgrade Indexer only allocates the minimum amount per subgraph and does not collect indexing rewards. +No, the upgrade Indexer only allocates the minimum amount per Subgraph and does not collect indexing rewards. -It operates on an “as needed” basis, serving as a fallback until sufficient service quality is achieved by at least three other Indexers in the network for respective chains and subgraphs. +It operates on an “as needed” basis, serving as a fallback until sufficient service quality is achieved by at least three other Indexers in the network for respective chains and Subgraphs. -### How does this affect subgraph developers? +### How does this affect Subgraph developers? -Subgraph developers can query their subgraphs on The Graph Network almost immediately after upgrading from the hosted service or [publishing from Subgraph Studio](/subgraphs/developing/publishing/publishing-a-subgraph/), as no lead time was required for indexing. Please note that [creating a subgraph](/developing/creating-a-subgraph/) was not impacted by this upgrade. +Subgraph developers can query their Subgraphs on The Graph Network almost immediately after upgrading from the hosted service or [publishing from Subgraph Studio](/subgraphs/developing/publishing/publishing-a-subgraph/), as no lead time was required for indexing. Please note that [creating a Subgraph](/developing/creating-a-subgraph/) was not impacted by this upgrade. ### How does the upgrade Indexer benefit data consumers? @@ -71,10 +71,10 @@ The upgrade Indexer enables chains on the network that were previously only supp The upgrade Indexer prices queries at the market rate to avoid influencing the query fee market. -### When will the upgrade Indexer stop supporting a subgraph? +### When will the upgrade Indexer stop supporting a Subgraph? -The upgrade Indexer supports a subgraph until at least 3 other Indexers successfully and consistently serve queries made to it. +The upgrade Indexer supports a Subgraph until at least 3 other Indexers successfully and consistently serve queries made to it. -Furthermore, the upgrade Indexer stops supporting a subgraph if it has not been queried in the last 30 days. +Furthermore, the upgrade Indexer stops supporting a Subgraph if it has not been queried in the last 30 days. -Other Indexers are incentivized to support subgraphs with ongoing query volume. The query volume to the upgrade Indexer should trend towards zero, as it has a small allocation size, and other Indexers should be chosen for queries ahead of it. +Other Indexers are incentivized to support Subgraphs with ongoing query volume. The query volume to the upgrade Indexer should trend towards zero, as it has a small allocation size, and other Indexers should be chosen for queries ahead of it. From 82e1b34a3a04138532bf55792e935bd573dd5504 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:14:19 -0500 Subject: [PATCH 0351/1789] New translations sunrise.mdx (Swedish) --- website/src/pages/sv/archived/sunrise.mdx | 42 +++++++++++------------ 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/website/src/pages/sv/archived/sunrise.mdx b/website/src/pages/sv/archived/sunrise.mdx index eb18a93c506c..71262f22e7d8 100644 --- a/website/src/pages/sv/archived/sunrise.mdx +++ b/website/src/pages/sv/archived/sunrise.mdx @@ -7,61 +7,61 @@ sidebarTitle: Post-Sunrise Upgrade FAQ ## What was the Sunrise of Decentralized Data? -The Sunrise of Decentralized Data was an initiative spearheaded by Edge & Node. This initiative enabled subgraph developers to upgrade to The Graph’s decentralized network seamlessly. +The Sunrise of Decentralized Data was an initiative spearheaded by Edge & Node. This initiative enabled Subgraph developers to upgrade to The Graph’s decentralized network seamlessly. -This plan drew on previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published subgraphs. +This plan drew on previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published Subgraphs. ### What happened to the hosted service? -The hosted service query endpoints are no longer available, and developers cannot deploy new subgraphs on the hosted service. +The hosted service query endpoints are no longer available, and developers cannot deploy new Subgraphs on the hosted service. -During the upgrade process, owners of hosted service subgraphs could upgrade their subgraphs to The Graph Network. Additionally, developers were able to claim auto-upgraded subgraphs. +During the upgrade process, owners of hosted service Subgraphs could upgrade their Subgraphs to The Graph Network. Additionally, developers were able to claim auto-upgraded Subgraphs. ### Was Subgraph Studio impacted by this upgrade? No, Subgraph Studio was not impacted by Sunrise. Subgraphs were immediately available for querying, powered by the upgrade Indexer, which uses the same infrastructure as the hosted service. -### Why were subgraphs published to Arbitrum, did it start indexing a different network? +### Why were Subgraphs published to Arbitrum, did it start indexing a different network? -The Graph Network was initially deployed on Ethereum mainnet but was later moved to Arbitrum One in order to lower gas costs for all users. As a result, all new subgraphs are published to The Graph Network on Arbitrum so that Indexers can support them. Arbitrum is the network that subgraphs are published to, but subgraphs can index any of the [supported networks](/supported-networks/) +The Graph Network was initially deployed on Ethereum mainnet but was later moved to Arbitrum One in order to lower gas costs for all users. As a result, all new Subgraphs are published to The Graph Network on Arbitrum so that Indexers can support them. Arbitrum is the network that Subgraphs are published to, but Subgraphs can index any of the [supported networks](/supported-networks/) ## About the Upgrade Indexer > The upgrade Indexer is currently active. -The upgrade Indexer was implemented to improve the experience of upgrading subgraphs from the hosted service to The Graph Network and support new versions of existing subgraphs that had not yet been indexed. +The upgrade Indexer was implemented to improve the experience of upgrading Subgraphs from the hosted service to The Graph Network and support new versions of existing Subgraphs that had not yet been indexed. ### What does the upgrade Indexer do? -- It bootstraps chains that have yet to receive indexing rewards on The Graph Network and ensures that an Indexer is available to serve queries as quickly as possible after a subgraph is published. +- It bootstraps chains that have yet to receive indexing rewards on The Graph Network and ensures that an Indexer is available to serve queries as quickly as possible after a Subgraph is published. - It supports chains that were previously only available on the hosted service. Find a comprehensive list of supported chains [here](/supported-networks/). -- Indexers that operate an upgrade Indexer do so as a public service to support new subgraphs and additional chains that lack indexing rewards before The Graph Council approves them. +- Indexers that operate an upgrade Indexer do so as a public service to support new Subgraphs and additional chains that lack indexing rewards before The Graph Council approves them. ### Why is Edge & Node running the upgrade Indexer? -Edge & Node historically maintained the hosted service and, as a result, already have synced data for hosted service subgraphs. +Edge & Node historically maintained the hosted service and, as a result, already have synced data for hosted service Subgraphs. ### What does the upgrade indexer mean for existing Indexers? Chains previously only supported on the hosted service were made available to developers on The Graph Network without indexing rewards at first. -However, this action unlocked query fees for any interested Indexer and increased the number of subgraphs published on The Graph Network. As a result, Indexers have more opportunities to index and serve these subgraphs in exchange for query fees, even before indexing rewards are enabled for a chain. +However, this action unlocked query fees for any interested Indexer and increased the number of Subgraphs published on The Graph Network. As a result, Indexers have more opportunities to index and serve these Subgraphs in exchange for query fees, even before indexing rewards are enabled for a chain. -The upgrade Indexer also provides the Indexer community with information about the potential demand for subgraphs and new chains on The Graph Network. +The upgrade Indexer also provides the Indexer community with information about the potential demand for Subgraphs and new chains on The Graph Network. ### What does this mean for Delegators? -The upgrade Indexer offers a powerful opportunity for Delegators. As it allowed more subgraphs to be upgraded from the hosted service to The Graph Network, Delegators benefit from the increased network activity. +The upgrade Indexer offers a powerful opportunity for Delegators. As it allowed more Subgraphs to be upgraded from the hosted service to The Graph Network, Delegators benefit from the increased network activity. ### Did the upgrade Indexer compete with existing Indexers for rewards? -No, the upgrade Indexer only allocates the minimum amount per subgraph and does not collect indexing rewards. +No, the upgrade Indexer only allocates the minimum amount per Subgraph and does not collect indexing rewards. -It operates on an “as needed” basis, serving as a fallback until sufficient service quality is achieved by at least three other Indexers in the network for respective chains and subgraphs. +It operates on an “as needed” basis, serving as a fallback until sufficient service quality is achieved by at least three other Indexers in the network for respective chains and Subgraphs. -### How does this affect subgraph developers? +### How does this affect Subgraph developers? -Subgraph developers can query their subgraphs on The Graph Network almost immediately after upgrading from the hosted service or [publishing from Subgraph Studio](/subgraphs/developing/publishing/publishing-a-subgraph/), as no lead time was required for indexing. Please note that [creating a subgraph](/developing/creating-a-subgraph/) was not impacted by this upgrade. +Subgraph developers can query their Subgraphs on The Graph Network almost immediately after upgrading from the hosted service or [publishing from Subgraph Studio](/subgraphs/developing/publishing/publishing-a-subgraph/), as no lead time was required for indexing. Please note that [creating a Subgraph](/developing/creating-a-subgraph/) was not impacted by this upgrade. ### How does the upgrade Indexer benefit data consumers? @@ -71,10 +71,10 @@ The upgrade Indexer enables chains on the network that were previously only supp The upgrade Indexer prices queries at the market rate to avoid influencing the query fee market. -### When will the upgrade Indexer stop supporting a subgraph? +### When will the upgrade Indexer stop supporting a Subgraph? -The upgrade Indexer supports a subgraph until at least 3 other Indexers successfully and consistently serve queries made to it. +The upgrade Indexer supports a Subgraph until at least 3 other Indexers successfully and consistently serve queries made to it. -Furthermore, the upgrade Indexer stops supporting a subgraph if it has not been queried in the last 30 days. +Furthermore, the upgrade Indexer stops supporting a Subgraph if it has not been queried in the last 30 days. -Other Indexers are incentivized to support subgraphs with ongoing query volume. The query volume to the upgrade Indexer should trend towards zero, as it has a small allocation size, and other Indexers should be chosen for queries ahead of it. +Other Indexers are incentivized to support Subgraphs with ongoing query volume. The query volume to the upgrade Indexer should trend towards zero, as it has a small allocation size, and other Indexers should be chosen for queries ahead of it. From 82dbbd471976bbf04053b63901c94f3a4f42aee3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:14:20 -0500 Subject: [PATCH 0352/1789] New translations sunrise.mdx (Turkish) --- website/src/pages/tr/archived/sunrise.mdx | 42 +++++++++++------------ 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/website/src/pages/tr/archived/sunrise.mdx b/website/src/pages/tr/archived/sunrise.mdx index f7d204bb791f..91accac3661b 100644 --- a/website/src/pages/tr/archived/sunrise.mdx +++ b/website/src/pages/tr/archived/sunrise.mdx @@ -7,61 +7,61 @@ sidebarTitle: Post-Sunrise Upgrade FAQ ## What was the Sunrise of Decentralized Data? -The Sunrise of Decentralized Data was an initiative spearheaded by Edge & Node. This initiative enabled subgraph developers to upgrade to The Graph’s decentralized network seamlessly. +The Sunrise of Decentralized Data was an initiative spearheaded by Edge & Node. This initiative enabled Subgraph developers to upgrade to The Graph’s decentralized network seamlessly. -This plan drew on previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published subgraphs. +This plan drew on previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published Subgraphs. ### What happened to the hosted service? -The hosted service query endpoints are no longer available, and developers cannot deploy new subgraphs on the hosted service. +The hosted service query endpoints are no longer available, and developers cannot deploy new Subgraphs on the hosted service. -During the upgrade process, owners of hosted service subgraphs could upgrade their subgraphs to The Graph Network. Additionally, developers were able to claim auto-upgraded subgraphs. +During the upgrade process, owners of hosted service Subgraphs could upgrade their Subgraphs to The Graph Network. Additionally, developers were able to claim auto-upgraded Subgraphs. ### Was Subgraph Studio impacted by this upgrade? No, Subgraph Studio was not impacted by Sunrise. Subgraphs were immediately available for querying, powered by the upgrade Indexer, which uses the same infrastructure as the hosted service. -### Why were subgraphs published to Arbitrum, did it start indexing a different network? +### Why were Subgraphs published to Arbitrum, did it start indexing a different network? -The Graph Network was initially deployed on Ethereum mainnet but was later moved to Arbitrum One in order to lower gas costs for all users. As a result, all new subgraphs are published to The Graph Network on Arbitrum so that Indexers can support them. Arbitrum is the network that subgraphs are published to, but subgraphs can index any of the [supported networks](/supported-networks/) +The Graph Network was initially deployed on Ethereum mainnet but was later moved to Arbitrum One in order to lower gas costs for all users. As a result, all new Subgraphs are published to The Graph Network on Arbitrum so that Indexers can support them. Arbitrum is the network that Subgraphs are published to, but Subgraphs can index any of the [supported networks](/supported-networks/) ## About the Upgrade Indexer > The upgrade Indexer is currently active. -The upgrade Indexer was implemented to improve the experience of upgrading subgraphs from the hosted service to The Graph Network and support new versions of existing subgraphs that had not yet been indexed. +The upgrade Indexer was implemented to improve the experience of upgrading Subgraphs from the hosted service to The Graph Network and support new versions of existing Subgraphs that had not yet been indexed. ### What does the upgrade Indexer do? -- It bootstraps chains that have yet to receive indexing rewards on The Graph Network and ensures that an Indexer is available to serve queries as quickly as possible after a subgraph is published. +- It bootstraps chains that have yet to receive indexing rewards on The Graph Network and ensures that an Indexer is available to serve queries as quickly as possible after a Subgraph is published. - It supports chains that were previously only available on the hosted service. Find a comprehensive list of supported chains [here](/supported-networks/). -- Indexers that operate an upgrade Indexer do so as a public service to support new subgraphs and additional chains that lack indexing rewards before The Graph Council approves them. +- Indexers that operate an upgrade Indexer do so as a public service to support new Subgraphs and additional chains that lack indexing rewards before The Graph Council approves them. ### Yükseltme İndeksleyicisini neden Edge & Node çalıştırıyor? -Edge & Node historically maintained the hosted service and, as a result, already have synced data for hosted service subgraphs. +Edge & Node historically maintained the hosted service and, as a result, already have synced data for hosted service Subgraphs. ### What does the upgrade indexer mean for existing Indexers? Chains previously only supported on the hosted service were made available to developers on The Graph Network without indexing rewards at first. -However, this action unlocked query fees for any interested Indexer and increased the number of subgraphs published on The Graph Network. As a result, Indexers have more opportunities to index and serve these subgraphs in exchange for query fees, even before indexing rewards are enabled for a chain. +However, this action unlocked query fees for any interested Indexer and increased the number of Subgraphs published on The Graph Network. As a result, Indexers have more opportunities to index and serve these Subgraphs in exchange for query fees, even before indexing rewards are enabled for a chain. -The upgrade Indexer also provides the Indexer community with information about the potential demand for subgraphs and new chains on The Graph Network. +The upgrade Indexer also provides the Indexer community with information about the potential demand for Subgraphs and new chains on The Graph Network. ### Bu Delegatörler için ne anlama gelmektedir? -The upgrade Indexer offers a powerful opportunity for Delegators. As it allowed more subgraphs to be upgraded from the hosted service to The Graph Network, Delegators benefit from the increased network activity. +The upgrade Indexer offers a powerful opportunity for Delegators. As it allowed more Subgraphs to be upgraded from the hosted service to The Graph Network, Delegators benefit from the increased network activity. ### Did the upgrade Indexer compete with existing Indexers for rewards? -No, the upgrade Indexer only allocates the minimum amount per subgraph and does not collect indexing rewards. +No, the upgrade Indexer only allocates the minimum amount per Subgraph and does not collect indexing rewards. -It operates on an “as needed” basis, serving as a fallback until sufficient service quality is achieved by at least three other Indexers in the network for respective chains and subgraphs. +It operates on an “as needed” basis, serving as a fallback until sufficient service quality is achieved by at least three other Indexers in the network for respective chains and Subgraphs. -### How does this affect subgraph developers? +### How does this affect Subgraph developers? -Subgraph developers can query their subgraphs on The Graph Network almost immediately after upgrading from the hosted service or [publishing from Subgraph Studio](/subgraphs/developing/publishing/publishing-a-subgraph/), as no lead time was required for indexing. Please note that [creating a subgraph](/developing/creating-a-subgraph/) was not impacted by this upgrade. +Subgraph developers can query their Subgraphs on The Graph Network almost immediately after upgrading from the hosted service or [publishing from Subgraph Studio](/subgraphs/developing/publishing/publishing-a-subgraph/), as no lead time was required for indexing. Please note that [creating a Subgraph](/developing/creating-a-subgraph/) was not impacted by this upgrade. ### How does the upgrade Indexer benefit data consumers? @@ -71,10 +71,10 @@ The upgrade Indexer enables chains on the network that were previously only supp The upgrade Indexer prices queries at the market rate to avoid influencing the query fee market. -### When will the upgrade Indexer stop supporting a subgraph? +### When will the upgrade Indexer stop supporting a Subgraph? -The upgrade Indexer supports a subgraph until at least 3 other Indexers successfully and consistently serve queries made to it. +The upgrade Indexer supports a Subgraph until at least 3 other Indexers successfully and consistently serve queries made to it. -Furthermore, the upgrade Indexer stops supporting a subgraph if it has not been queried in the last 30 days. +Furthermore, the upgrade Indexer stops supporting a Subgraph if it has not been queried in the last 30 days. -Other Indexers are incentivized to support subgraphs with ongoing query volume. The query volume to the upgrade Indexer should trend towards zero, as it has a small allocation size, and other Indexers should be chosen for queries ahead of it. +Other Indexers are incentivized to support Subgraphs with ongoing query volume. The query volume to the upgrade Indexer should trend towards zero, as it has a small allocation size, and other Indexers should be chosen for queries ahead of it. From 35b6ba81ba80e31661d2e0ce494c499472f5fc84 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:14:21 -0500 Subject: [PATCH 0353/1789] New translations sunrise.mdx (Ukrainian) --- website/src/pages/uk/archived/sunrise.mdx | 42 +++++++++++------------ 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/website/src/pages/uk/archived/sunrise.mdx b/website/src/pages/uk/archived/sunrise.mdx index eb18a93c506c..71262f22e7d8 100644 --- a/website/src/pages/uk/archived/sunrise.mdx +++ b/website/src/pages/uk/archived/sunrise.mdx @@ -7,61 +7,61 @@ sidebarTitle: Post-Sunrise Upgrade FAQ ## What was the Sunrise of Decentralized Data? -The Sunrise of Decentralized Data was an initiative spearheaded by Edge & Node. This initiative enabled subgraph developers to upgrade to The Graph’s decentralized network seamlessly. +The Sunrise of Decentralized Data was an initiative spearheaded by Edge & Node. This initiative enabled Subgraph developers to upgrade to The Graph’s decentralized network seamlessly. -This plan drew on previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published subgraphs. +This plan drew on previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published Subgraphs. ### What happened to the hosted service? -The hosted service query endpoints are no longer available, and developers cannot deploy new subgraphs on the hosted service. +The hosted service query endpoints are no longer available, and developers cannot deploy new Subgraphs on the hosted service. -During the upgrade process, owners of hosted service subgraphs could upgrade their subgraphs to The Graph Network. Additionally, developers were able to claim auto-upgraded subgraphs. +During the upgrade process, owners of hosted service Subgraphs could upgrade their Subgraphs to The Graph Network. Additionally, developers were able to claim auto-upgraded Subgraphs. ### Was Subgraph Studio impacted by this upgrade? No, Subgraph Studio was not impacted by Sunrise. Subgraphs were immediately available for querying, powered by the upgrade Indexer, which uses the same infrastructure as the hosted service. -### Why were subgraphs published to Arbitrum, did it start indexing a different network? +### Why were Subgraphs published to Arbitrum, did it start indexing a different network? -The Graph Network was initially deployed on Ethereum mainnet but was later moved to Arbitrum One in order to lower gas costs for all users. As a result, all new subgraphs are published to The Graph Network on Arbitrum so that Indexers can support them. Arbitrum is the network that subgraphs are published to, but subgraphs can index any of the [supported networks](/supported-networks/) +The Graph Network was initially deployed on Ethereum mainnet but was later moved to Arbitrum One in order to lower gas costs for all users. As a result, all new Subgraphs are published to The Graph Network on Arbitrum so that Indexers can support them. Arbitrum is the network that Subgraphs are published to, but Subgraphs can index any of the [supported networks](/supported-networks/) ## About the Upgrade Indexer > The upgrade Indexer is currently active. -The upgrade Indexer was implemented to improve the experience of upgrading subgraphs from the hosted service to The Graph Network and support new versions of existing subgraphs that had not yet been indexed. +The upgrade Indexer was implemented to improve the experience of upgrading Subgraphs from the hosted service to The Graph Network and support new versions of existing Subgraphs that had not yet been indexed. ### What does the upgrade Indexer do? -- It bootstraps chains that have yet to receive indexing rewards on The Graph Network and ensures that an Indexer is available to serve queries as quickly as possible after a subgraph is published. +- It bootstraps chains that have yet to receive indexing rewards on The Graph Network and ensures that an Indexer is available to serve queries as quickly as possible after a Subgraph is published. - It supports chains that were previously only available on the hosted service. Find a comprehensive list of supported chains [here](/supported-networks/). -- Indexers that operate an upgrade Indexer do so as a public service to support new subgraphs and additional chains that lack indexing rewards before The Graph Council approves them. +- Indexers that operate an upgrade Indexer do so as a public service to support new Subgraphs and additional chains that lack indexing rewards before The Graph Council approves them. ### Why is Edge & Node running the upgrade Indexer? -Edge & Node historically maintained the hosted service and, as a result, already have synced data for hosted service subgraphs. +Edge & Node historically maintained the hosted service and, as a result, already have synced data for hosted service Subgraphs. ### What does the upgrade indexer mean for existing Indexers? Chains previously only supported on the hosted service were made available to developers on The Graph Network without indexing rewards at first. -However, this action unlocked query fees for any interested Indexer and increased the number of subgraphs published on The Graph Network. As a result, Indexers have more opportunities to index and serve these subgraphs in exchange for query fees, even before indexing rewards are enabled for a chain. +However, this action unlocked query fees for any interested Indexer and increased the number of Subgraphs published on The Graph Network. As a result, Indexers have more opportunities to index and serve these Subgraphs in exchange for query fees, even before indexing rewards are enabled for a chain. -The upgrade Indexer also provides the Indexer community with information about the potential demand for subgraphs and new chains on The Graph Network. +The upgrade Indexer also provides the Indexer community with information about the potential demand for Subgraphs and new chains on The Graph Network. ### What does this mean for Delegators? -The upgrade Indexer offers a powerful opportunity for Delegators. As it allowed more subgraphs to be upgraded from the hosted service to The Graph Network, Delegators benefit from the increased network activity. +The upgrade Indexer offers a powerful opportunity for Delegators. As it allowed more Subgraphs to be upgraded from the hosted service to The Graph Network, Delegators benefit from the increased network activity. ### Did the upgrade Indexer compete with existing Indexers for rewards? -No, the upgrade Indexer only allocates the minimum amount per subgraph and does not collect indexing rewards. +No, the upgrade Indexer only allocates the minimum amount per Subgraph and does not collect indexing rewards. -It operates on an “as needed” basis, serving as a fallback until sufficient service quality is achieved by at least three other Indexers in the network for respective chains and subgraphs. +It operates on an “as needed” basis, serving as a fallback until sufficient service quality is achieved by at least three other Indexers in the network for respective chains and Subgraphs. -### How does this affect subgraph developers? +### How does this affect Subgraph developers? -Subgraph developers can query their subgraphs on The Graph Network almost immediately after upgrading from the hosted service or [publishing from Subgraph Studio](/subgraphs/developing/publishing/publishing-a-subgraph/), as no lead time was required for indexing. Please note that [creating a subgraph](/developing/creating-a-subgraph/) was not impacted by this upgrade. +Subgraph developers can query their Subgraphs on The Graph Network almost immediately after upgrading from the hosted service or [publishing from Subgraph Studio](/subgraphs/developing/publishing/publishing-a-subgraph/), as no lead time was required for indexing. Please note that [creating a Subgraph](/developing/creating-a-subgraph/) was not impacted by this upgrade. ### How does the upgrade Indexer benefit data consumers? @@ -71,10 +71,10 @@ The upgrade Indexer enables chains on the network that were previously only supp The upgrade Indexer prices queries at the market rate to avoid influencing the query fee market. -### When will the upgrade Indexer stop supporting a subgraph? +### When will the upgrade Indexer stop supporting a Subgraph? -The upgrade Indexer supports a subgraph until at least 3 other Indexers successfully and consistently serve queries made to it. +The upgrade Indexer supports a Subgraph until at least 3 other Indexers successfully and consistently serve queries made to it. -Furthermore, the upgrade Indexer stops supporting a subgraph if it has not been queried in the last 30 days. +Furthermore, the upgrade Indexer stops supporting a Subgraph if it has not been queried in the last 30 days. -Other Indexers are incentivized to support subgraphs with ongoing query volume. The query volume to the upgrade Indexer should trend towards zero, as it has a small allocation size, and other Indexers should be chosen for queries ahead of it. +Other Indexers are incentivized to support Subgraphs with ongoing query volume. The query volume to the upgrade Indexer should trend towards zero, as it has a small allocation size, and other Indexers should be chosen for queries ahead of it. From 8d8b43cdde6a2fba1cf12d1211c10234cc2de7ee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:14:22 -0500 Subject: [PATCH 0354/1789] New translations sunrise.mdx (Chinese Simplified) --- website/src/pages/zh/archived/sunrise.mdx | 52 +++++++++++------------ 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/website/src/pages/zh/archived/sunrise.mdx b/website/src/pages/zh/archived/sunrise.mdx index a768ee33d016..484c8ec0714e 100644 --- a/website/src/pages/zh/archived/sunrise.mdx +++ b/website/src/pages/zh/archived/sunrise.mdx @@ -1,5 +1,5 @@ --- -title: 黎明后+升级到Graph网络常见问题 +title: Post-Sunrise+升级到The Graph网络常见问题 sidebarTitle: Post-Sunrise Upgrade FAQ --- @@ -7,61 +7,61 @@ sidebarTitle: Post-Sunrise Upgrade FAQ ## 去中心化数据的黎明是什么? -The Sunrise of Decentralized Data was an initiative spearheaded by Edge & Node. This initiative enabled subgraph developers to upgrade to The Graph’s decentralized network seamlessly. +The Sunrise of Decentralized Data was an initiative spearheaded by Edge & Node. This initiative enabled Subgraph developers to upgrade to The Graph’s decentralized network seamlessly. -This plan drew on previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published subgraphs. +This plan drew on previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published Subgraphs. ### What happened to the hosted service? -The hosted service query endpoints are no longer available, and developers cannot deploy new subgraphs on the hosted service. +The hosted service query endpoints are no longer available, and developers cannot deploy new Subgraphs on the hosted service. -During the upgrade process, owners of hosted service subgraphs could upgrade their subgraphs to The Graph Network. Additionally, developers were able to claim auto-upgraded subgraphs. +During the upgrade process, owners of hosted service Subgraphs could upgrade their Subgraphs to The Graph Network. Additionally, developers were able to claim auto-upgraded Subgraphs. ### Was Subgraph Studio impacted by this upgrade? -No, Subgraph Studio was not impacted by Sunrise. Subgraphs were immediately available for querying, powered by the upgrade Indexer, which uses the same infrastructure as the hosted service. +不,Subgraph Studio没有受到Sunrise的影响。子图立即可用于查询,由升级索引人提供支持,该索引人使用与托管服务相同的基础架构。 -### Why were subgraphs published to Arbitrum, did it start indexing a different network? +### Why were Subgraphs published to Arbitrum, did it start indexing a different network? -The Graph Network was initially deployed on Ethereum mainnet but was later moved to Arbitrum One in order to lower gas costs for all users. As a result, all new subgraphs are published to The Graph Network on Arbitrum so that Indexers can support them. Arbitrum is the network that subgraphs are published to, but subgraphs can index any of the [supported networks](/supported-networks/) +The Graph Network was initially deployed on Ethereum mainnet but was later moved to Arbitrum One in order to lower gas costs for all users. As a result, all new Subgraphs are published to The Graph Network on Arbitrum so that Indexers can support them. Arbitrum is the network that Subgraphs are published to, but Subgraphs can index any of the [supported networks](/supported-networks/) -## About the Upgrade Indexer +## 关于升级索引人 > The upgrade Indexer is currently active. -The upgrade Indexer was implemented to improve the experience of upgrading subgraphs from the hosted service to The Graph Network and support new versions of existing subgraphs that had not yet been indexed. +The upgrade Indexer was implemented to improve the experience of upgrading Subgraphs from the hosted service to The Graph Network and support new versions of existing Subgraphs that had not yet been indexed. -### What does the upgrade Indexer do? +### 升级的索引人意味着什么? -- It bootstraps chains that have yet to receive indexing rewards on The Graph Network and ensures that an Indexer is available to serve queries as quickly as possible after a subgraph is published. +- It bootstraps chains that have yet to receive indexing rewards on The Graph Network and ensures that an Indexer is available to serve queries as quickly as possible after a Subgraph is published. - It supports chains that were previously only available on the hosted service. Find a comprehensive list of supported chains [here](/supported-networks/). -- Indexers that operate an upgrade Indexer do so as a public service to support new subgraphs and additional chains that lack indexing rewards before The Graph Council approves them. +- Indexers that operate an upgrade Indexer do so as a public service to support new Subgraphs and additional chains that lack indexing rewards before The Graph Council approves them. -### 为什么 Edge & Node 运行升级索引器? +### 为什么 Edge & Node 运行升级索引人? -Edge & Node historically maintained the hosted service and, as a result, already have synced data for hosted service subgraphs. +Edge & Node historically maintained the hosted service and, as a result, already have synced data for hosted service Subgraphs. ### What does the upgrade indexer mean for existing Indexers? Chains previously only supported on the hosted service were made available to developers on The Graph Network without indexing rewards at first. -However, this action unlocked query fees for any interested Indexer and increased the number of subgraphs published on The Graph Network. As a result, Indexers have more opportunities to index and serve these subgraphs in exchange for query fees, even before indexing rewards are enabled for a chain. +However, this action unlocked query fees for any interested Indexer and increased the number of Subgraphs published on The Graph Network. As a result, Indexers have more opportunities to index and serve these Subgraphs in exchange for query fees, even before indexing rewards are enabled for a chain. -The upgrade Indexer also provides the Indexer community with information about the potential demand for subgraphs and new chains on The Graph Network. +The upgrade Indexer also provides the Indexer community with information about the potential demand for Subgraphs and new chains on The Graph Network. ### What does this mean for Delegators? -The upgrade Indexer offers a powerful opportunity for Delegators. As it allowed more subgraphs to be upgraded from the hosted service to The Graph Network, Delegators benefit from the increased network activity. +The upgrade Indexer offers a powerful opportunity for Delegators. As it allowed more Subgraphs to be upgraded from the hosted service to The Graph Network, Delegators benefit from the increased network activity. ### Did the upgrade Indexer compete with existing Indexers for rewards? -No, the upgrade Indexer only allocates the minimum amount per subgraph and does not collect indexing rewards. +No, the upgrade Indexer only allocates the minimum amount per Subgraph and does not collect indexing rewards. -It operates on an “as needed” basis, serving as a fallback until sufficient service quality is achieved by at least three other Indexers in the network for respective chains and subgraphs. +It operates on an “as needed” basis, serving as a fallback until sufficient service quality is achieved by at least three other Indexers in the network for respective chains and Subgraphs. -### How does this affect subgraph developers? +### How does this affect Subgraph developers? -Subgraph developers can query their subgraphs on The Graph Network almost immediately after upgrading from the hosted service or [publishing from Subgraph Studio](/subgraphs/developing/publishing/publishing-a-subgraph/), as no lead time was required for indexing. Please note that [creating a subgraph](/developing/creating-a-subgraph/) was not impacted by this upgrade. +Subgraph developers can query their Subgraphs on The Graph Network almost immediately after upgrading from the hosted service or [publishing from Subgraph Studio](/subgraphs/developing/publishing/publishing-a-subgraph/), as no lead time was required for indexing. Please note that [creating a Subgraph](/developing/creating-a-subgraph/) was not impacted by this upgrade. ### How does the upgrade Indexer benefit data consumers? @@ -71,10 +71,10 @@ The upgrade Indexer enables chains on the network that were previously only supp The upgrade Indexer prices queries at the market rate to avoid influencing the query fee market. -### When will the upgrade Indexer stop supporting a subgraph? +### When will the upgrade Indexer stop supporting a Subgraph? -The upgrade Indexer supports a subgraph until at least 3 other Indexers successfully and consistently serve queries made to it. +The upgrade Indexer supports a Subgraph until at least 3 other Indexers successfully and consistently serve queries made to it. -Furthermore, the upgrade Indexer stops supporting a subgraph if it has not been queried in the last 30 days. +Furthermore, the upgrade Indexer stops supporting a Subgraph if it has not been queried in the last 30 days. -Other Indexers are incentivized to support subgraphs with ongoing query volume. The query volume to the upgrade Indexer should trend towards zero, as it has a small allocation size, and other Indexers should be chosen for queries ahead of it. +Other Indexers are incentivized to support Subgraphs with ongoing query volume. The query volume to the upgrade Indexer should trend towards zero, as it has a small allocation size, and other Indexers should be chosen for queries ahead of it. From fecf03cc6ed3f73a7a6f58b91afeb9f7a8a492a6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:14:22 -0500 Subject: [PATCH 0355/1789] New translations sunrise.mdx (Urdu (Pakistan)) --- website/src/pages/ur/archived/sunrise.mdx | 42 +++++++++++------------ 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/website/src/pages/ur/archived/sunrise.mdx b/website/src/pages/ur/archived/sunrise.mdx index b1ad2e6523a3..dc77506b82b6 100644 --- a/website/src/pages/ur/archived/sunrise.mdx +++ b/website/src/pages/ur/archived/sunrise.mdx @@ -7,61 +7,61 @@ sidebarTitle: Post-Sunrise Upgrade FAQ ## What was the Sunrise of Decentralized Data? -The Sunrise of Decentralized Data was an initiative spearheaded by Edge & Node. This initiative enabled subgraph developers to upgrade to The Graph’s decentralized network seamlessly. +The Sunrise of Decentralized Data was an initiative spearheaded by Edge & Node. This initiative enabled Subgraph developers to upgrade to The Graph’s decentralized network seamlessly. -This plan drew on previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published subgraphs. +This plan drew on previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published Subgraphs. ### What happened to the hosted service? -The hosted service query endpoints are no longer available, and developers cannot deploy new subgraphs on the hosted service. +The hosted service query endpoints are no longer available, and developers cannot deploy new Subgraphs on the hosted service. -During the upgrade process, owners of hosted service subgraphs could upgrade their subgraphs to The Graph Network. Additionally, developers were able to claim auto-upgraded subgraphs. +During the upgrade process, owners of hosted service Subgraphs could upgrade their Subgraphs to The Graph Network. Additionally, developers were able to claim auto-upgraded Subgraphs. ### Was Subgraph Studio impacted by this upgrade? No, Subgraph Studio was not impacted by Sunrise. Subgraphs were immediately available for querying, powered by the upgrade Indexer, which uses the same infrastructure as the hosted service. -### Why were subgraphs published to Arbitrum, did it start indexing a different network? +### Why were Subgraphs published to Arbitrum, did it start indexing a different network? -The Graph Network was initially deployed on Ethereum mainnet but was later moved to Arbitrum One in order to lower gas costs for all users. As a result, all new subgraphs are published to The Graph Network on Arbitrum so that Indexers can support them. Arbitrum is the network that subgraphs are published to, but subgraphs can index any of the [supported networks](/supported-networks/) +The Graph Network was initially deployed on Ethereum mainnet but was later moved to Arbitrum One in order to lower gas costs for all users. As a result, all new Subgraphs are published to The Graph Network on Arbitrum so that Indexers can support them. Arbitrum is the network that Subgraphs are published to, but Subgraphs can index any of the [supported networks](/supported-networks/) ## About the Upgrade Indexer > The upgrade Indexer is currently active. -The upgrade Indexer was implemented to improve the experience of upgrading subgraphs from the hosted service to The Graph Network and support new versions of existing subgraphs that had not yet been indexed. +The upgrade Indexer was implemented to improve the experience of upgrading Subgraphs from the hosted service to The Graph Network and support new versions of existing Subgraphs that had not yet been indexed. ### What does the upgrade Indexer do? -- It bootstraps chains that have yet to receive indexing rewards on The Graph Network and ensures that an Indexer is available to serve queries as quickly as possible after a subgraph is published. +- It bootstraps chains that have yet to receive indexing rewards on The Graph Network and ensures that an Indexer is available to serve queries as quickly as possible after a Subgraph is published. - It supports chains that were previously only available on the hosted service. Find a comprehensive list of supported chains [here](/supported-networks/). -- Indexers that operate an upgrade Indexer do so as a public service to support new subgraphs and additional chains that lack indexing rewards before The Graph Council approves them. +- Indexers that operate an upgrade Indexer do so as a public service to support new Subgraphs and additional chains that lack indexing rewards before The Graph Council approves them. ### ایج اور نوڈ اپ گریڈ انڈیکسر کیوں چلا رہا ہے؟ -Edge & Node historically maintained the hosted service and, as a result, already have synced data for hosted service subgraphs. +Edge & Node historically maintained the hosted service and, as a result, already have synced data for hosted service Subgraphs. ### What does the upgrade indexer mean for existing Indexers? Chains previously only supported on the hosted service were made available to developers on The Graph Network without indexing rewards at first. -However, this action unlocked query fees for any interested Indexer and increased the number of subgraphs published on The Graph Network. As a result, Indexers have more opportunities to index and serve these subgraphs in exchange for query fees, even before indexing rewards are enabled for a chain. +However, this action unlocked query fees for any interested Indexer and increased the number of Subgraphs published on The Graph Network. As a result, Indexers have more opportunities to index and serve these Subgraphs in exchange for query fees, even before indexing rewards are enabled for a chain. -The upgrade Indexer also provides the Indexer community with information about the potential demand for subgraphs and new chains on The Graph Network. +The upgrade Indexer also provides the Indexer community with information about the potential demand for Subgraphs and new chains on The Graph Network. ### ڈیلیگیٹرز کے لیے اس کا کیا مطلب ہے؟ -The upgrade Indexer offers a powerful opportunity for Delegators. As it allowed more subgraphs to be upgraded from the hosted service to The Graph Network, Delegators benefit from the increased network activity. +The upgrade Indexer offers a powerful opportunity for Delegators. As it allowed more Subgraphs to be upgraded from the hosted service to The Graph Network, Delegators benefit from the increased network activity. ### Did the upgrade Indexer compete with existing Indexers for rewards? -No, the upgrade Indexer only allocates the minimum amount per subgraph and does not collect indexing rewards. +No, the upgrade Indexer only allocates the minimum amount per Subgraph and does not collect indexing rewards. -It operates on an “as needed” basis, serving as a fallback until sufficient service quality is achieved by at least three other Indexers in the network for respective chains and subgraphs. +It operates on an “as needed” basis, serving as a fallback until sufficient service quality is achieved by at least three other Indexers in the network for respective chains and Subgraphs. -### How does this affect subgraph developers? +### How does this affect Subgraph developers? -Subgraph developers can query their subgraphs on The Graph Network almost immediately after upgrading from the hosted service or [publishing from Subgraph Studio](/subgraphs/developing/publishing/publishing-a-subgraph/), as no lead time was required for indexing. Please note that [creating a subgraph](/developing/creating-a-subgraph/) was not impacted by this upgrade. +Subgraph developers can query their Subgraphs on The Graph Network almost immediately after upgrading from the hosted service or [publishing from Subgraph Studio](/subgraphs/developing/publishing/publishing-a-subgraph/), as no lead time was required for indexing. Please note that [creating a Subgraph](/developing/creating-a-subgraph/) was not impacted by this upgrade. ### How does the upgrade Indexer benefit data consumers? @@ -71,10 +71,10 @@ The upgrade Indexer enables chains on the network that were previously only supp The upgrade Indexer prices queries at the market rate to avoid influencing the query fee market. -### When will the upgrade Indexer stop supporting a subgraph? +### When will the upgrade Indexer stop supporting a Subgraph? -The upgrade Indexer supports a subgraph until at least 3 other Indexers successfully and consistently serve queries made to it. +The upgrade Indexer supports a Subgraph until at least 3 other Indexers successfully and consistently serve queries made to it. -Furthermore, the upgrade Indexer stops supporting a subgraph if it has not been queried in the last 30 days. +Furthermore, the upgrade Indexer stops supporting a Subgraph if it has not been queried in the last 30 days. -Other Indexers are incentivized to support subgraphs with ongoing query volume. The query volume to the upgrade Indexer should trend towards zero, as it has a small allocation size, and other Indexers should be chosen for queries ahead of it. +Other Indexers are incentivized to support Subgraphs with ongoing query volume. The query volume to the upgrade Indexer should trend towards zero, as it has a small allocation size, and other Indexers should be chosen for queries ahead of it. From 4d745d83060c56efd30aea7a34bce5519cf00b77 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:14:23 -0500 Subject: [PATCH 0356/1789] New translations sunrise.mdx (Vietnamese) --- website/src/pages/vi/archived/sunrise.mdx | 42 +++++++++++------------ 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/website/src/pages/vi/archived/sunrise.mdx b/website/src/pages/vi/archived/sunrise.mdx index eb18a93c506c..71262f22e7d8 100644 --- a/website/src/pages/vi/archived/sunrise.mdx +++ b/website/src/pages/vi/archived/sunrise.mdx @@ -7,61 +7,61 @@ sidebarTitle: Post-Sunrise Upgrade FAQ ## What was the Sunrise of Decentralized Data? -The Sunrise of Decentralized Data was an initiative spearheaded by Edge & Node. This initiative enabled subgraph developers to upgrade to The Graph’s decentralized network seamlessly. +The Sunrise of Decentralized Data was an initiative spearheaded by Edge & Node. This initiative enabled Subgraph developers to upgrade to The Graph’s decentralized network seamlessly. -This plan drew on previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published subgraphs. +This plan drew on previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published Subgraphs. ### What happened to the hosted service? -The hosted service query endpoints are no longer available, and developers cannot deploy new subgraphs on the hosted service. +The hosted service query endpoints are no longer available, and developers cannot deploy new Subgraphs on the hosted service. -During the upgrade process, owners of hosted service subgraphs could upgrade their subgraphs to The Graph Network. Additionally, developers were able to claim auto-upgraded subgraphs. +During the upgrade process, owners of hosted service Subgraphs could upgrade their Subgraphs to The Graph Network. Additionally, developers were able to claim auto-upgraded Subgraphs. ### Was Subgraph Studio impacted by this upgrade? No, Subgraph Studio was not impacted by Sunrise. Subgraphs were immediately available for querying, powered by the upgrade Indexer, which uses the same infrastructure as the hosted service. -### Why were subgraphs published to Arbitrum, did it start indexing a different network? +### Why were Subgraphs published to Arbitrum, did it start indexing a different network? -The Graph Network was initially deployed on Ethereum mainnet but was later moved to Arbitrum One in order to lower gas costs for all users. As a result, all new subgraphs are published to The Graph Network on Arbitrum so that Indexers can support them. Arbitrum is the network that subgraphs are published to, but subgraphs can index any of the [supported networks](/supported-networks/) +The Graph Network was initially deployed on Ethereum mainnet but was later moved to Arbitrum One in order to lower gas costs for all users. As a result, all new Subgraphs are published to The Graph Network on Arbitrum so that Indexers can support them. Arbitrum is the network that Subgraphs are published to, but Subgraphs can index any of the [supported networks](/supported-networks/) ## About the Upgrade Indexer > The upgrade Indexer is currently active. -The upgrade Indexer was implemented to improve the experience of upgrading subgraphs from the hosted service to The Graph Network and support new versions of existing subgraphs that had not yet been indexed. +The upgrade Indexer was implemented to improve the experience of upgrading Subgraphs from the hosted service to The Graph Network and support new versions of existing Subgraphs that had not yet been indexed. ### What does the upgrade Indexer do? -- It bootstraps chains that have yet to receive indexing rewards on The Graph Network and ensures that an Indexer is available to serve queries as quickly as possible after a subgraph is published. +- It bootstraps chains that have yet to receive indexing rewards on The Graph Network and ensures that an Indexer is available to serve queries as quickly as possible after a Subgraph is published. - It supports chains that were previously only available on the hosted service. Find a comprehensive list of supported chains [here](/supported-networks/). -- Indexers that operate an upgrade Indexer do so as a public service to support new subgraphs and additional chains that lack indexing rewards before The Graph Council approves them. +- Indexers that operate an upgrade Indexer do so as a public service to support new Subgraphs and additional chains that lack indexing rewards before The Graph Council approves them. ### Why is Edge & Node running the upgrade Indexer? -Edge & Node historically maintained the hosted service and, as a result, already have synced data for hosted service subgraphs. +Edge & Node historically maintained the hosted service and, as a result, already have synced data for hosted service Subgraphs. ### What does the upgrade indexer mean for existing Indexers? Chains previously only supported on the hosted service were made available to developers on The Graph Network without indexing rewards at first. -However, this action unlocked query fees for any interested Indexer and increased the number of subgraphs published on The Graph Network. As a result, Indexers have more opportunities to index and serve these subgraphs in exchange for query fees, even before indexing rewards are enabled for a chain. +However, this action unlocked query fees for any interested Indexer and increased the number of Subgraphs published on The Graph Network. As a result, Indexers have more opportunities to index and serve these Subgraphs in exchange for query fees, even before indexing rewards are enabled for a chain. -The upgrade Indexer also provides the Indexer community with information about the potential demand for subgraphs and new chains on The Graph Network. +The upgrade Indexer also provides the Indexer community with information about the potential demand for Subgraphs and new chains on The Graph Network. ### What does this mean for Delegators? -The upgrade Indexer offers a powerful opportunity for Delegators. As it allowed more subgraphs to be upgraded from the hosted service to The Graph Network, Delegators benefit from the increased network activity. +The upgrade Indexer offers a powerful opportunity for Delegators. As it allowed more Subgraphs to be upgraded from the hosted service to The Graph Network, Delegators benefit from the increased network activity. ### Did the upgrade Indexer compete with existing Indexers for rewards? -No, the upgrade Indexer only allocates the minimum amount per subgraph and does not collect indexing rewards. +No, the upgrade Indexer only allocates the minimum amount per Subgraph and does not collect indexing rewards. -It operates on an “as needed” basis, serving as a fallback until sufficient service quality is achieved by at least three other Indexers in the network for respective chains and subgraphs. +It operates on an “as needed” basis, serving as a fallback until sufficient service quality is achieved by at least three other Indexers in the network for respective chains and Subgraphs. -### How does this affect subgraph developers? +### How does this affect Subgraph developers? -Subgraph developers can query their subgraphs on The Graph Network almost immediately after upgrading from the hosted service or [publishing from Subgraph Studio](/subgraphs/developing/publishing/publishing-a-subgraph/), as no lead time was required for indexing. Please note that [creating a subgraph](/developing/creating-a-subgraph/) was not impacted by this upgrade. +Subgraph developers can query their Subgraphs on The Graph Network almost immediately after upgrading from the hosted service or [publishing from Subgraph Studio](/subgraphs/developing/publishing/publishing-a-subgraph/), as no lead time was required for indexing. Please note that [creating a Subgraph](/developing/creating-a-subgraph/) was not impacted by this upgrade. ### How does the upgrade Indexer benefit data consumers? @@ -71,10 +71,10 @@ The upgrade Indexer enables chains on the network that were previously only supp The upgrade Indexer prices queries at the market rate to avoid influencing the query fee market. -### When will the upgrade Indexer stop supporting a subgraph? +### When will the upgrade Indexer stop supporting a Subgraph? -The upgrade Indexer supports a subgraph until at least 3 other Indexers successfully and consistently serve queries made to it. +The upgrade Indexer supports a Subgraph until at least 3 other Indexers successfully and consistently serve queries made to it. -Furthermore, the upgrade Indexer stops supporting a subgraph if it has not been queried in the last 30 days. +Furthermore, the upgrade Indexer stops supporting a Subgraph if it has not been queried in the last 30 days. -Other Indexers are incentivized to support subgraphs with ongoing query volume. The query volume to the upgrade Indexer should trend towards zero, as it has a small allocation size, and other Indexers should be chosen for queries ahead of it. +Other Indexers are incentivized to support Subgraphs with ongoing query volume. The query volume to the upgrade Indexer should trend towards zero, as it has a small allocation size, and other Indexers should be chosen for queries ahead of it. From 3a03ccae02fe837cedbf57ebe0988874335cef60 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:14:24 -0500 Subject: [PATCH 0357/1789] New translations sunrise.mdx (Marathi) --- website/src/pages/mr/archived/sunrise.mdx | 42 +++++++++++------------ 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/website/src/pages/mr/archived/sunrise.mdx b/website/src/pages/mr/archived/sunrise.mdx index eb18a93c506c..71262f22e7d8 100644 --- a/website/src/pages/mr/archived/sunrise.mdx +++ b/website/src/pages/mr/archived/sunrise.mdx @@ -7,61 +7,61 @@ sidebarTitle: Post-Sunrise Upgrade FAQ ## What was the Sunrise of Decentralized Data? -The Sunrise of Decentralized Data was an initiative spearheaded by Edge & Node. This initiative enabled subgraph developers to upgrade to The Graph’s decentralized network seamlessly. +The Sunrise of Decentralized Data was an initiative spearheaded by Edge & Node. This initiative enabled Subgraph developers to upgrade to The Graph’s decentralized network seamlessly. -This plan drew on previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published subgraphs. +This plan drew on previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published Subgraphs. ### What happened to the hosted service? -The hosted service query endpoints are no longer available, and developers cannot deploy new subgraphs on the hosted service. +The hosted service query endpoints are no longer available, and developers cannot deploy new Subgraphs on the hosted service. -During the upgrade process, owners of hosted service subgraphs could upgrade their subgraphs to The Graph Network. Additionally, developers were able to claim auto-upgraded subgraphs. +During the upgrade process, owners of hosted service Subgraphs could upgrade their Subgraphs to The Graph Network. Additionally, developers were able to claim auto-upgraded Subgraphs. ### Was Subgraph Studio impacted by this upgrade? No, Subgraph Studio was not impacted by Sunrise. Subgraphs were immediately available for querying, powered by the upgrade Indexer, which uses the same infrastructure as the hosted service. -### Why were subgraphs published to Arbitrum, did it start indexing a different network? +### Why were Subgraphs published to Arbitrum, did it start indexing a different network? -The Graph Network was initially deployed on Ethereum mainnet but was later moved to Arbitrum One in order to lower gas costs for all users. As a result, all new subgraphs are published to The Graph Network on Arbitrum so that Indexers can support them. Arbitrum is the network that subgraphs are published to, but subgraphs can index any of the [supported networks](/supported-networks/) +The Graph Network was initially deployed on Ethereum mainnet but was later moved to Arbitrum One in order to lower gas costs for all users. As a result, all new Subgraphs are published to The Graph Network on Arbitrum so that Indexers can support them. Arbitrum is the network that Subgraphs are published to, but Subgraphs can index any of the [supported networks](/supported-networks/) ## About the Upgrade Indexer > The upgrade Indexer is currently active. -The upgrade Indexer was implemented to improve the experience of upgrading subgraphs from the hosted service to The Graph Network and support new versions of existing subgraphs that had not yet been indexed. +The upgrade Indexer was implemented to improve the experience of upgrading Subgraphs from the hosted service to The Graph Network and support new versions of existing Subgraphs that had not yet been indexed. ### What does the upgrade Indexer do? -- It bootstraps chains that have yet to receive indexing rewards on The Graph Network and ensures that an Indexer is available to serve queries as quickly as possible after a subgraph is published. +- It bootstraps chains that have yet to receive indexing rewards on The Graph Network and ensures that an Indexer is available to serve queries as quickly as possible after a Subgraph is published. - It supports chains that were previously only available on the hosted service. Find a comprehensive list of supported chains [here](/supported-networks/). -- Indexers that operate an upgrade Indexer do so as a public service to support new subgraphs and additional chains that lack indexing rewards before The Graph Council approves them. +- Indexers that operate an upgrade Indexer do so as a public service to support new Subgraphs and additional chains that lack indexing rewards before The Graph Council approves them. ### Why is Edge & Node running the upgrade Indexer? -Edge & Node historically maintained the hosted service and, as a result, already have synced data for hosted service subgraphs. +Edge & Node historically maintained the hosted service and, as a result, already have synced data for hosted service Subgraphs. ### What does the upgrade indexer mean for existing Indexers? Chains previously only supported on the hosted service were made available to developers on The Graph Network without indexing rewards at first. -However, this action unlocked query fees for any interested Indexer and increased the number of subgraphs published on The Graph Network. As a result, Indexers have more opportunities to index and serve these subgraphs in exchange for query fees, even before indexing rewards are enabled for a chain. +However, this action unlocked query fees for any interested Indexer and increased the number of Subgraphs published on The Graph Network. As a result, Indexers have more opportunities to index and serve these Subgraphs in exchange for query fees, even before indexing rewards are enabled for a chain. -The upgrade Indexer also provides the Indexer community with information about the potential demand for subgraphs and new chains on The Graph Network. +The upgrade Indexer also provides the Indexer community with information about the potential demand for Subgraphs and new chains on The Graph Network. ### What does this mean for Delegators? -The upgrade Indexer offers a powerful opportunity for Delegators. As it allowed more subgraphs to be upgraded from the hosted service to The Graph Network, Delegators benefit from the increased network activity. +The upgrade Indexer offers a powerful opportunity for Delegators. As it allowed more Subgraphs to be upgraded from the hosted service to The Graph Network, Delegators benefit from the increased network activity. ### Did the upgrade Indexer compete with existing Indexers for rewards? -No, the upgrade Indexer only allocates the minimum amount per subgraph and does not collect indexing rewards. +No, the upgrade Indexer only allocates the minimum amount per Subgraph and does not collect indexing rewards. -It operates on an “as needed” basis, serving as a fallback until sufficient service quality is achieved by at least three other Indexers in the network for respective chains and subgraphs. +It operates on an “as needed” basis, serving as a fallback until sufficient service quality is achieved by at least three other Indexers in the network for respective chains and Subgraphs. -### How does this affect subgraph developers? +### How does this affect Subgraph developers? -Subgraph developers can query their subgraphs on The Graph Network almost immediately after upgrading from the hosted service or [publishing from Subgraph Studio](/subgraphs/developing/publishing/publishing-a-subgraph/), as no lead time was required for indexing. Please note that [creating a subgraph](/developing/creating-a-subgraph/) was not impacted by this upgrade. +Subgraph developers can query their Subgraphs on The Graph Network almost immediately after upgrading from the hosted service or [publishing from Subgraph Studio](/subgraphs/developing/publishing/publishing-a-subgraph/), as no lead time was required for indexing. Please note that [creating a Subgraph](/developing/creating-a-subgraph/) was not impacted by this upgrade. ### How does the upgrade Indexer benefit data consumers? @@ -71,10 +71,10 @@ The upgrade Indexer enables chains on the network that were previously only supp The upgrade Indexer prices queries at the market rate to avoid influencing the query fee market. -### When will the upgrade Indexer stop supporting a subgraph? +### When will the upgrade Indexer stop supporting a Subgraph? -The upgrade Indexer supports a subgraph until at least 3 other Indexers successfully and consistently serve queries made to it. +The upgrade Indexer supports a Subgraph until at least 3 other Indexers successfully and consistently serve queries made to it. -Furthermore, the upgrade Indexer stops supporting a subgraph if it has not been queried in the last 30 days. +Furthermore, the upgrade Indexer stops supporting a Subgraph if it has not been queried in the last 30 days. -Other Indexers are incentivized to support subgraphs with ongoing query volume. The query volume to the upgrade Indexer should trend towards zero, as it has a small allocation size, and other Indexers should be chosen for queries ahead of it. +Other Indexers are incentivized to support Subgraphs with ongoing query volume. The query volume to the upgrade Indexer should trend towards zero, as it has a small allocation size, and other Indexers should be chosen for queries ahead of it. From 91949e1bd94cba9e7a9ba2d6281f7dafd139a587 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:14:25 -0500 Subject: [PATCH 0358/1789] New translations sunrise.mdx (Hindi) --- website/src/pages/hi/archived/sunrise.mdx | 42 +++++++++++------------ 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/website/src/pages/hi/archived/sunrise.mdx b/website/src/pages/hi/archived/sunrise.mdx index 64396d2fb998..f90c0a674a5d 100644 --- a/website/src/pages/hi/archived/sunrise.mdx +++ b/website/src/pages/hi/archived/sunrise.mdx @@ -7,61 +7,61 @@ sidebarTitle: Post-Sunrise Upgrade FAQ ## विकेंद्रीकृत डेटा का सूर्योदय क्या था? -"Decentralized Data का उदय" Edge & Node द्वारा आरंभ की गई एक पहल थी। इस पहल ने subgraph डेवलपर्स को The Graph के विकेंद्रीकृत नेटवर्क में सहजता से अपग्रेड करने में सक्षम बनाया। +The Sunrise of Decentralized Data was an initiative spearheaded by Edge & Node. This initiative enabled Subgraph developers to upgrade to The Graph’s decentralized network seamlessly. -इस योजना ने The Graph इकोसिस्टम के पिछले विकासों पर आधारित किया, जिसमें नए प्रकाशित सबग्राफ पर क्वेरी सर्व करने के लिए एक अपग्रेडेड इंडेक्सर शामिल था। +This plan drew on previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published Subgraphs. ### Hosted service का क्या होगा? -होस्टेड सेवा के क्वेरी एंडपॉइंट अब उपलब्ध नहीं हैं, और डेवलपर्स होस्टेड सेवा पर नए सबग्राफ्स को तैनात नहीं कर सकते हैं। +The hosted service query endpoints are no longer available, and developers cannot deploy new Subgraphs on the hosted service. -अपग्रेड प्रक्रिया के दौरान, होस्टेड सर्विस सबग्राफ के मालिक अपने सबग्राफ को The Graph Network पर अपग्रेड कर सकते थे। इसके अतिरिक्त, डेवलपर्स ऑटो-अपग्रेड किए गए सबग्राफ को क्लेम करने में सक्षम थे। +During the upgrade process, owners of hosted service Subgraphs could upgrade their Subgraphs to The Graph Network. Additionally, developers were able to claim auto-upgraded Subgraphs. ### क्या इस अपग्रेड से Subgraph Studio प्रभावित हुआ था? नहीं, सबग्राफ स्टूडियो पर Sunrise का कोई प्रभाव नहीं पड़ा। सबग्राफ तुरंत क्वेरी के लिए उपलब्ध थे, जो अपग्रेड किए गए Indexer द्वारा संचालित हैं, जो उसी इंफ्रास्ट्रक्चर का उपयोग करता है जैसा Hosted Service में होता है। -### सबग्राफ्स को Arbitrum पर क्यों प्रकाशित किया गया, क्या इसने एक अलग नेटवर्क को इंडेक्स करना शुरू किया? +### Why were Subgraphs published to Arbitrum, did it start indexing a different network? -The Graph Network को पहले Ethereum mainnet पर डिप्लॉय किया गया था, लेकिन गैस लागत को कम करने के लिए इसे बाद में Arbitrum One पर स्थानांतरित कर दिया गया। परिणामस्वरूप, सभी नए सबग्राफ को Arbitrum पर The Graph Network में प्रकाशित किया जाता है ताकि Indexers उन्हें सपोर्ट कर सकें। Arbitrum वह नेटवर्क है जिस पर सबग्राफ को प्रकाशित किया जाता है, लेकिन सबग्राफ [supported networks](/supported-networks/) में से किसी पर भी index कर सकते हैं +The Graph Network was initially deployed on Ethereum mainnet but was later moved to Arbitrum One in order to lower gas costs for all users. As a result, all new Subgraphs are published to The Graph Network on Arbitrum so that Indexers can support them. Arbitrum is the network that Subgraphs are published to, but Subgraphs can index any of the [supported networks](/supported-networks/) ## About the Upgrade Indexer > अपग्रेड Indexer वर्तमान में सक्रिय है। -अपग्रेड Indexer को Hosted Service से The Graph Network में सबग्राफ़्स के अपग्रेड करने के अनुभव को सुधारने और उन मौजूदा सबग्राफ़्स के नए संस्करणों का समर्थन करने के लिए लागू किया गया था जो अभी तक इंडेक्स नहीं किए गए थे। +The upgrade Indexer was implemented to improve the experience of upgrading Subgraphs from the hosted service to The Graph Network and support new versions of existing Subgraphs that had not yet been indexed. ### अपग्रेड Indexer क्या करता है? -- यह उन चेन को बूटस्ट्रैप करता है जिन्हें अभी तक The Graph Network पर इंडेक्सिंग पुरस्कार नहीं मिले हैं और यह सुनिश्चित करता है कि एक Indexer उपलब्ध हो ताकि एक Subgraph प्रकाशित होने के तुरंत बाद क्वेरी को यथाशीघ्र सेवा दी जा सके। +- It bootstraps chains that have yet to receive indexing rewards on The Graph Network and ensures that an Indexer is available to serve queries as quickly as possible after a Subgraph is published. - यह उन chain को भी सपोर्ट करता है जो पहले केवल Hosted Service पर उपलब्ध थीं। सपोर्टेड chain की व्यापक सूची [यहां](/supported-networks/) देखें। -- जो Indexer अपग्रेड इंडेक्सर का संचालन करते हैं, वे नए सबग्राफ़ और अतिरिक्त चेन का समर्थन करने के लिए एक सार्वजनिक सेवा के रूप में ऐसा करते हैं जो इंडेक्सिंग पुरस्कारों की कमी का सामना कर रहे हैं, जब तक कि The Graph काउंसिल उन्हें मंजूरी नहीं देती। +- Indexers that operate an upgrade Indexer do so as a public service to support new Subgraphs and additional chains that lack indexing rewards before The Graph Council approves them. ### Why is Edge & Node running the upgrade Indexer? -Edge & Node ने ऐतिहासिक रूप से होस्टेड सेवा का प्रबंधन किया है और, परिणामस्वरूप, उनके पास होस्टेड सेवा के सबग्राफ के लिए पहले से ही समन्वयित डेटा है। +Edge & Node historically maintained the hosted service and, as a result, already have synced data for hosted service Subgraphs. ### What does the upgrade indexer mean for existing Indexers? पहले केवल होस्टेड सेवा पर समर्थित चेन अब बिना indexing पुरस्कार के डेवलपर्स के लिएT he Graph Network पर उपलब्ध कराई गईं। -हालांकि, इस कार्रवाई ने किसी भी इच्छुक Indexer के लिए क्वेरी शुल्क को अनलॉक कर दिया और The Graph Network पर प्रकाशित सबग्राफ की संख्या बढ़ा दी। परिणामस्वरूप, Indexers के पास इन सबग्राफ को इंडेक्स करने और सेवा देने के लिए अधिक अवसर हैं, जो कि क्वेरी शुल्क के बदले में हैं, यहां तक कि जब तक किसी चेन के लिए इंडेक्सिंग इनाम सक्षम नहीं होते। +However, this action unlocked query fees for any interested Indexer and increased the number of Subgraphs published on The Graph Network. As a result, Indexers have more opportunities to index and serve these Subgraphs in exchange for query fees, even before indexing rewards are enabled for a chain. -अपग्रेड इंडेक्सर Indexer समुदाय को The Graph Network पर सबग्राफ और नए चेन की संभावित मांग के बारे में जानकारी भी प्रदान करता है। +The upgrade Indexer also provides the Indexer community with information about the potential demand for Subgraphs and new chains on The Graph Network. ### What does this mean for Delegators? -अपग्रेड Indexer डेलीगेटर्स के लिए एक शक्तिशाली अवसर प्रदान करता है। क्योंकि इससे अधिक सबग्राफ को होस्टेड सेवा से The Graph Network में अपग्रेड करने की अनुमति मिली, डेलीगेटर्स को बढ़ी हुई नेटवर्क गतिविधि का लाभ मिलता है। +The upgrade Indexer offers a powerful opportunity for Delegators. As it allowed more Subgraphs to be upgraded from the hosted service to The Graph Network, Delegators benefit from the increased network activity. ### क्या अपग्रेड किया गया Indexer मौजूदा Indexer के साथ पुरस्कारों के लिए प्रतिस्पर्धा करता था? -नहीं, अपग्रेड किया गया Indexer केवल प्रति Subgraph न्यूनतम राशि आवंटित करता है और indexing पुरस्कार एकत्र नहीं करता है। +No, the upgrade Indexer only allocates the minimum amount per Subgraph and does not collect indexing rewards. -यह "आवश्यकता अनुसार" आधार पर काम करता है, एक बैकअप के रूप में कार्य करता है जब तक कि नेटवर्क में संबंधित चेन और सबग्राफ के लिए कम से कम तीन अन्य Indexer द्वारा पर्याप्त सेवा गुणवत्ता प्राप्त नहीं की जाती। +It operates on an “as needed” basis, serving as a fallback until sufficient service quality is achieved by at least three other Indexers in the network for respective chains and Subgraphs. -### यह Subgraph डेवलपर्स को कैसे प्रभावित करता है? +### How does this affect Subgraph developers? -सबग्राफ डेवलपर्स अपने सबग्राफ को The Graph Network पर लगभग तुरंत क्वेरी कर सकते हैं, जब वे होस्टेड सेवा से या Subgraph Studio()/subgraphs/developing/publishing/publishing-a-subgraph/ से प्रकाशित करते हैं, क्योंकि इंडेक्सिंग के लिए कोई लीड टाइम आवश्यक नहीं है। कृपया ध्यान दें कि सबग्राफ बनाना(/developing/creating-a-subgraph/) इस अपग्रेड से प्रभावित नहीं हुआ था। +Subgraph developers can query their Subgraphs on The Graph Network almost immediately after upgrading from the hosted service or [publishing from Subgraph Studio](/subgraphs/developing/publishing/publishing-a-subgraph/), as no lead time was required for indexing. Please note that [creating a Subgraph](/developing/creating-a-subgraph/) was not impacted by this upgrade. ### अपग्रेड Indexer डेटा उपभोक्ताओं को कैसे लाभ पहुंचाता है? @@ -71,10 +71,10 @@ The upgrade Indexer enables chains on the network that were previously only supp अपग्रेड में Indexer बाज़ार दर पर क्वेरीज़ की कीमत तय करता है ताकि क्वेरी शुल्क बाज़ार पर कोई प्रभाव न पड़े। -### अपग्रेड Indexer कब एक Subgraph का समर्थन करना बंद करेगा? +### When will the upgrade Indexer stop supporting a Subgraph? -अपग्रेड Indexer एक Subgraph का समर्थन करता है जब तक कि कम से कम 3 अन्य Indexers सफलतापूर्वक और लगातार किए गए प्रश्नों का उत्तर नहीं देते। +The upgrade Indexer supports a Subgraph until at least 3 other Indexers successfully and consistently serve queries made to it. -इसके अतिरिक्त, अपग्रेड Indexer एक Subgraph का समर्थन करना बंद कर देता है यदि उसे पिछले 30 दिनों में क्वेरी नहीं किया गया है। +Furthermore, the upgrade Indexer stops supporting a Subgraph if it has not been queried in the last 30 days. -अन्य Indexer को उन सबग्राफ का समर्थन करने के लिए प्रोत्साहित किया जाता है जिनमें निरंतर क्वेरी वॉल्यूम होता है। अपग्रेड Indexer के लिए क्वेरी वॉल्यूम शून्य की ओर बढ़ना चाहिए, क्योंकि इसका आवंटन आकार छोटा होता है, और क्वेरी के लिए अन्य Indexer को प्राथमिकता दी जानी चाहिए। +Other Indexers are incentivized to support Subgraphs with ongoing query volume. The query volume to the upgrade Indexer should trend towards zero, as it has a small allocation size, and other Indexers should be chosen for queries ahead of it. From c16644ada5b65aa1fc412b6163b4c2563bf40b0f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:14:27 -0500 Subject: [PATCH 0359/1789] New translations sunrise.mdx (Swahili) --- website/src/pages/sw/archived/sunrise.mdx | 80 +++++++++++++++++++++++ 1 file changed, 80 insertions(+) create mode 100644 website/src/pages/sw/archived/sunrise.mdx diff --git a/website/src/pages/sw/archived/sunrise.mdx b/website/src/pages/sw/archived/sunrise.mdx new file mode 100644 index 000000000000..71262f22e7d8 --- /dev/null +++ b/website/src/pages/sw/archived/sunrise.mdx @@ -0,0 +1,80 @@ +--- +title: Post-Sunrise + Upgrading to The Graph Network FAQ +sidebarTitle: Post-Sunrise Upgrade FAQ +--- + +> Note: The Sunrise of Decentralized Data ended June 12th, 2024. + +## What was the Sunrise of Decentralized Data? + +The Sunrise of Decentralized Data was an initiative spearheaded by Edge & Node. This initiative enabled Subgraph developers to upgrade to The Graph’s decentralized network seamlessly. + +This plan drew on previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published Subgraphs. + +### What happened to the hosted service? + +The hosted service query endpoints are no longer available, and developers cannot deploy new Subgraphs on the hosted service. + +During the upgrade process, owners of hosted service Subgraphs could upgrade their Subgraphs to The Graph Network. Additionally, developers were able to claim auto-upgraded Subgraphs. + +### Was Subgraph Studio impacted by this upgrade? + +No, Subgraph Studio was not impacted by Sunrise. Subgraphs were immediately available for querying, powered by the upgrade Indexer, which uses the same infrastructure as the hosted service. + +### Why were Subgraphs published to Arbitrum, did it start indexing a different network? + +The Graph Network was initially deployed on Ethereum mainnet but was later moved to Arbitrum One in order to lower gas costs for all users. As a result, all new Subgraphs are published to The Graph Network on Arbitrum so that Indexers can support them. Arbitrum is the network that Subgraphs are published to, but Subgraphs can index any of the [supported networks](/supported-networks/) + +## About the Upgrade Indexer + +> The upgrade Indexer is currently active. + +The upgrade Indexer was implemented to improve the experience of upgrading Subgraphs from the hosted service to The Graph Network and support new versions of existing Subgraphs that had not yet been indexed. + +### What does the upgrade Indexer do? + +- It bootstraps chains that have yet to receive indexing rewards on The Graph Network and ensures that an Indexer is available to serve queries as quickly as possible after a Subgraph is published. +- It supports chains that were previously only available on the hosted service. Find a comprehensive list of supported chains [here](/supported-networks/). +- Indexers that operate an upgrade Indexer do so as a public service to support new Subgraphs and additional chains that lack indexing rewards before The Graph Council approves them. + +### Why is Edge & Node running the upgrade Indexer? + +Edge & Node historically maintained the hosted service and, as a result, already have synced data for hosted service Subgraphs. + +### What does the upgrade indexer mean for existing Indexers? + +Chains previously only supported on the hosted service were made available to developers on The Graph Network without indexing rewards at first. + +However, this action unlocked query fees for any interested Indexer and increased the number of Subgraphs published on The Graph Network. As a result, Indexers have more opportunities to index and serve these Subgraphs in exchange for query fees, even before indexing rewards are enabled for a chain. + +The upgrade Indexer also provides the Indexer community with information about the potential demand for Subgraphs and new chains on The Graph Network. + +### What does this mean for Delegators? + +The upgrade Indexer offers a powerful opportunity for Delegators. As it allowed more Subgraphs to be upgraded from the hosted service to The Graph Network, Delegators benefit from the increased network activity. + +### Did the upgrade Indexer compete with existing Indexers for rewards? + +No, the upgrade Indexer only allocates the minimum amount per Subgraph and does not collect indexing rewards. + +It operates on an “as needed” basis, serving as a fallback until sufficient service quality is achieved by at least three other Indexers in the network for respective chains and Subgraphs. + +### How does this affect Subgraph developers? + +Subgraph developers can query their Subgraphs on The Graph Network almost immediately after upgrading from the hosted service or [publishing from Subgraph Studio](/subgraphs/developing/publishing/publishing-a-subgraph/), as no lead time was required for indexing. Please note that [creating a Subgraph](/developing/creating-a-subgraph/) was not impacted by this upgrade. + +### How does the upgrade Indexer benefit data consumers? + +The upgrade Indexer enables chains on the network that were previously only supported on the hosted service. Therefore, it widens the scope and availability of data that can be queried on the network. + +### How does the upgrade Indexer price queries? + +The upgrade Indexer prices queries at the market rate to avoid influencing the query fee market. + +### When will the upgrade Indexer stop supporting a Subgraph? + +The upgrade Indexer supports a Subgraph until at least 3 other Indexers successfully and consistently serve queries made to it. + +Furthermore, the upgrade Indexer stops supporting a Subgraph if it has not been queried in the last 30 days. + +Other Indexers are incentivized to support Subgraphs with ongoing query volume. The query volume to the upgrade Indexer should trend towards zero, as it has a small allocation size, and other Indexers should be chosen for queries ahead of it. From 3213c190f782defd12a06e12d45314d9d12f8627 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:14:38 -0500 Subject: [PATCH 0360/1789] New translations contracts.mdx (Swahili) --- website/src/pages/sw/contracts.mdx | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 website/src/pages/sw/contracts.mdx diff --git a/website/src/pages/sw/contracts.mdx b/website/src/pages/sw/contracts.mdx new file mode 100644 index 000000000000..3938844149c1 --- /dev/null +++ b/website/src/pages/sw/contracts.mdx @@ -0,0 +1,29 @@ +--- +title: Protocol Contracts +--- + +import { ProtocolContractsTable } from '@/contracts' + +Below are the deployed contracts which power The Graph Network. Visit the official [contracts repository](https://github.com/graphprotocol/contracts) to learn more. + +## Arbitrum + +This is the principal deployment of The Graph Network. + + + +## Mainnet + +This was the original deployment of The Graph Network. [Learn more](/archived/arbitrum/arbitrum-faq/) about The Graph's scaling with Arbitrum. + + + +## Arbitrum Sepolia + +This is the primary testnet for The Graph Network. Testnet is predominantly used by core developers and ecosystem participants for testing purposes. There are no guarantees of service or availability on The Graph's testnets. + + + +## Sepolia + + From 60c5e9d5afb2bf2d3937f4f51c879a9163cc9ee9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:14:39 -0500 Subject: [PATCH 0361/1789] New translations chain-integration-overview.mdx (Romanian) --- website/src/pages/ro/indexing/chain-integration-overview.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/ro/indexing/chain-integration-overview.mdx b/website/src/pages/ro/indexing/chain-integration-overview.mdx index 77141e82b34a..33619b03c483 100644 --- a/website/src/pages/ro/indexing/chain-integration-overview.mdx +++ b/website/src/pages/ro/indexing/chain-integration-overview.mdx @@ -36,7 +36,7 @@ This process is related to the Subgraph Data Service, applicable only to new Sub ### 2. What happens if Firehose & Substreams support comes after the network is supported on mainnet? -This would only impact protocol support for indexing rewards on Substreams-powered subgraphs. The new Firehose implementation would need testing on testnet, following the methodology outlined for Stage 2 in this GIP. Similarly, assuming the implementation is performant and reliable, a PR on the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) would be required (`Substreams data sources` Subgraph Feature), as well as a new GIP for protocol support for indexing rewards. Anyone can create the PR and GIP; the Foundation would help with Council approval. +This would only impact protocol support for indexing rewards on Substreams-powered Subgraphs. The new Firehose implementation would need testing on testnet, following the methodology outlined for Stage 2 in this GIP. Similarly, assuming the implementation is performant and reliable, a PR on the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) would be required (`Substreams data sources` Subgraph Feature), as well as a new GIP for protocol support for indexing rewards. Anyone can create the PR and GIP; the Foundation would help with Council approval. ### 3. How much time will the process of reaching full protocol support take? From be4941c047a401c17fb533876786365150480e15 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:14:40 -0500 Subject: [PATCH 0362/1789] New translations chain-integration-overview.mdx (French) --- website/src/pages/fr/indexing/chain-integration-overview.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/fr/indexing/chain-integration-overview.mdx b/website/src/pages/fr/indexing/chain-integration-overview.mdx index 4bbb83bdc4a9..48787263c1af 100644 --- a/website/src/pages/fr/indexing/chain-integration-overview.mdx +++ b/website/src/pages/fr/indexing/chain-integration-overview.mdx @@ -36,7 +36,7 @@ Ce processus est lié au service de données Subgraph, applicable uniquement aux ### 2. Que se passe-t-il si la prise en charge de Firehose et Substreams intervient après que le réseau est pris en charge sur le mainnet ? -Cela n’aurait un impact que sur la prise en charge du protocole pour l’indexation des récompenses sur les subgraphs alimentés par Substreams. La nouvelle implémentation de Firehose nécessiterait des tests sur testnet, en suivant la méthodologie décrite pour l'étape 2 de ce GIP. De même, en supposant que l'implémentation soit performante et fiable, un PR sur la [Matrice de support des fonctionnalités](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) serait requis ( Fonctionnalité de sous-graphe « Sous-flux de sources de données »), ainsi qu'un nouveau GIP pour la prise en charge du protocole pour l'indexation des récompenses. N'importe qui peut créer le PR et le GIP ; la Fondation aiderait à obtenir l'approbation du Conseil. +This would only impact protocol support for indexing rewards on Substreams-powered Subgraphs. The new Firehose implementation would need testing on testnet, following the methodology outlined for Stage 2 in this GIP. Similarly, assuming the implementation is performant and reliable, a PR on the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) would be required (`Substreams data sources` Subgraph Feature), as well as a new GIP for protocol support for indexing rewards. Anyone can create the PR and GIP; the Foundation would help with Council approval. ### 3. Combien de temps faudra-t-il pour parvenir à la prise en charge complète du protocole ? From 7b20a725a0b66c762a2b1c9703736a2842034d6d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:14:41 -0500 Subject: [PATCH 0363/1789] New translations chain-integration-overview.mdx (Spanish) --- website/src/pages/es/indexing/chain-integration-overview.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/es/indexing/chain-integration-overview.mdx b/website/src/pages/es/indexing/chain-integration-overview.mdx index 77141e82b34a..33619b03c483 100644 --- a/website/src/pages/es/indexing/chain-integration-overview.mdx +++ b/website/src/pages/es/indexing/chain-integration-overview.mdx @@ -36,7 +36,7 @@ This process is related to the Subgraph Data Service, applicable only to new Sub ### 2. What happens if Firehose & Substreams support comes after the network is supported on mainnet? -This would only impact protocol support for indexing rewards on Substreams-powered subgraphs. The new Firehose implementation would need testing on testnet, following the methodology outlined for Stage 2 in this GIP. Similarly, assuming the implementation is performant and reliable, a PR on the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) would be required (`Substreams data sources` Subgraph Feature), as well as a new GIP for protocol support for indexing rewards. Anyone can create the PR and GIP; the Foundation would help with Council approval. +This would only impact protocol support for indexing rewards on Substreams-powered Subgraphs. The new Firehose implementation would need testing on testnet, following the methodology outlined for Stage 2 in this GIP. Similarly, assuming the implementation is performant and reliable, a PR on the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) would be required (`Substreams data sources` Subgraph Feature), as well as a new GIP for protocol support for indexing rewards. Anyone can create the PR and GIP; the Foundation would help with Council approval. ### 3. How much time will the process of reaching full protocol support take? From ede0e0debd3ebf77e42bcc694487511f51c4b566 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:14:42 -0500 Subject: [PATCH 0364/1789] New translations chain-integration-overview.mdx (Arabic) --- website/src/pages/ar/indexing/chain-integration-overview.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/ar/indexing/chain-integration-overview.mdx b/website/src/pages/ar/indexing/chain-integration-overview.mdx index e6b95ec0fc17..af9a582b58d3 100644 --- a/website/src/pages/ar/indexing/chain-integration-overview.mdx +++ b/website/src/pages/ar/indexing/chain-integration-overview.mdx @@ -36,7 +36,7 @@ Ready to shape the future of The Graph Network? [Start your proposal](https://gi ### 2. ماذا يحدث إذا تم دعم فايرهوز و سبستريمز بعد أن تم دعم الشبكة على الشبكة الرئيسية؟ -هذا سيؤثر فقط على دعم البروتوكول لمكافآت الفهرسة على الغرافات الفرعية المدعومة من سبستريمز. تنفيذ الفايرهوز الجديد سيحتاج إلى الفحص على شبكة الاختبار، وفقًا للمنهجية الموضحة للمرحلة الثانية في هذا المقترح لتحسين الغراف. وعلى نحو مماثل، وعلى افتراض أن التنفيذ فعال وموثوق به، سيتتطالب إنشاء طلب سحب على [مصفوفة دعم الميزات] (https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) ("مصادر بيانات سبستريمز" ميزة للغراف الفرعي)، بالإضافة إلى مقترح جديد لتحسين الغراف، لدعم البروتوكول لمكافآت الفهرسة. يمكن لأي شخص إنشاء طلب السحب ومقترح تحسين الغراف؛ وسوف تساعد المؤسسة في الحصول على موافقة المجلس. +This would only impact protocol support for indexing rewards on Substreams-powered Subgraphs. The new Firehose implementation would need testing on testnet, following the methodology outlined for Stage 2 in this GIP. Similarly, assuming the implementation is performant and reliable, a PR on the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) would be required (`Substreams data sources` Subgraph Feature), as well as a new GIP for protocol support for indexing rewards. Anyone can create the PR and GIP; the Foundation would help with Council approval. ### 3. How much time will the process of reaching full protocol support take? From 87394c191cba8884848993b64e511d23cd212585 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:14:43 -0500 Subject: [PATCH 0365/1789] New translations chain-integration-overview.mdx (Czech) --- website/src/pages/cs/indexing/chain-integration-overview.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/cs/indexing/chain-integration-overview.mdx b/website/src/pages/cs/indexing/chain-integration-overview.mdx index e048421d7ad9..a2f1eed58864 100644 --- a/website/src/pages/cs/indexing/chain-integration-overview.mdx +++ b/website/src/pages/cs/indexing/chain-integration-overview.mdx @@ -36,7 +36,7 @@ Tento proces souvisí se službou Datová služba podgrafů a vztahuje se pouze ### 2. Co se stane, když podpora Firehose & Substreams přijde až poté, co bude síť podporována v mainnet? -To by mělo vliv pouze na podporu protokolu pro indexování odměn na podgrafech s podsílou. Novou implementaci Firehose by bylo třeba testovat v testnetu podle metodiky popsané pro fázi 2 v tomto GIP. Podobně, za předpokladu, že implementace bude výkonná a spolehlivá, by bylo nutné provést PR na [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) (`Substreams data sources` Subgraph Feature) a také nový GIP pro podporu protokolu pro indexování odměn. PR a GIP může vytvořit kdokoli; nadace by pomohla se schválením Radou. +This would only impact protocol support for indexing rewards on Substreams-powered Subgraphs. The new Firehose implementation would need testing on testnet, following the methodology outlined for Stage 2 in this GIP. Similarly, assuming the implementation is performant and reliable, a PR on the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) would be required (`Substreams data sources` Subgraph Feature), as well as a new GIP for protocol support for indexing rewards. Anyone can create the PR and GIP; the Foundation would help with Council approval. ### 3. How much time will the process of reaching full protocol support take? From 0bfc75eb31c38b242395928ccdbfca41759dc8c5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:14:44 -0500 Subject: [PATCH 0366/1789] New translations chain-integration-overview.mdx (German) --- website/src/pages/de/indexing/chain-integration-overview.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/de/indexing/chain-integration-overview.mdx b/website/src/pages/de/indexing/chain-integration-overview.mdx index 5f867a52ca5b..1cb1c24cb8ed 100644 --- a/website/src/pages/de/indexing/chain-integration-overview.mdx +++ b/website/src/pages/de/indexing/chain-integration-overview.mdx @@ -36,7 +36,7 @@ Dieser Prozess bezieht sich auf den Subgraph Data Service, der nur für neue Sub ### 2. Was geschieht, wenn die Unterstützung für Firehose & Substreams erst nach der Unterstützung des Netzes im Mainnet erfolgt? -Dies würde sich nur auf die Protokollunterstützung für die Indizierung von Rewards auf Subgraphen mit Substreams auswirken. Die neue Firehose-Implementierung müsste im Testnet getestet werden, wobei die für Stufe 2 in diesem GIP beschriebene Methodik anzuwenden wäre. Unter der Annahme, dass die Implementierung performant und zuverlässig ist, wäre ein PR für die [Funktionsunterstützungsmatrix] (https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) erforderlich (`Substreams data sources`-Subgraph-Funktion), sowie eine neue GIP für die Protokollunterstützung für die Indizierung von Rewards. Jeder kann die PR und GIP erstellen; die Foundation würde bei der Genehmigung durch den Rat helfen. +This would only impact protocol support for indexing rewards on Substreams-powered Subgraphs. The new Firehose implementation would need testing on testnet, following the methodology outlined for Stage 2 in this GIP. Similarly, assuming the implementation is performant and reliable, a PR on the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) would be required (`Substreams data sources` Subgraph Feature), as well as a new GIP for protocol support for indexing rewards. Anyone can create the PR and GIP; the Foundation would help with Council approval. ### 3. Wie viel Zeit wird der Prozess bis zur vollständigen Unterstützung des Protokolls in Anspruch nehmen? From dff89ed268bbc3a5ae6f763a2f5150e98e64b396 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:14:45 -0500 Subject: [PATCH 0367/1789] New translations chain-integration-overview.mdx (Italian) --- website/src/pages/it/indexing/chain-integration-overview.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/it/indexing/chain-integration-overview.mdx b/website/src/pages/it/indexing/chain-integration-overview.mdx index 77141e82b34a..33619b03c483 100644 --- a/website/src/pages/it/indexing/chain-integration-overview.mdx +++ b/website/src/pages/it/indexing/chain-integration-overview.mdx @@ -36,7 +36,7 @@ This process is related to the Subgraph Data Service, applicable only to new Sub ### 2. What happens if Firehose & Substreams support comes after the network is supported on mainnet? -This would only impact protocol support for indexing rewards on Substreams-powered subgraphs. The new Firehose implementation would need testing on testnet, following the methodology outlined for Stage 2 in this GIP. Similarly, assuming the implementation is performant and reliable, a PR on the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) would be required (`Substreams data sources` Subgraph Feature), as well as a new GIP for protocol support for indexing rewards. Anyone can create the PR and GIP; the Foundation would help with Council approval. +This would only impact protocol support for indexing rewards on Substreams-powered Subgraphs. The new Firehose implementation would need testing on testnet, following the methodology outlined for Stage 2 in this GIP. Similarly, assuming the implementation is performant and reliable, a PR on the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) would be required (`Substreams data sources` Subgraph Feature), as well as a new GIP for protocol support for indexing rewards. Anyone can create the PR and GIP; the Foundation would help with Council approval. ### 3. How much time will the process of reaching full protocol support take? From 565d8b222513cf0e0cef8aa2a288385e056dfcaa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:14:46 -0500 Subject: [PATCH 0368/1789] New translations chain-integration-overview.mdx (Japanese) --- website/src/pages/ja/indexing/chain-integration-overview.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/ja/indexing/chain-integration-overview.mdx b/website/src/pages/ja/indexing/chain-integration-overview.mdx index c9349b7a24e5..4b996d3ddfb4 100644 --- a/website/src/pages/ja/indexing/chain-integration-overview.mdx +++ b/website/src/pages/ja/indexing/chain-integration-overview.mdx @@ -36,7 +36,7 @@ The Graph Network の未来を形作る準備はできていますか? [Start yo ### 2. ネットワークがメインネットでサポートされた後に Firehose とサブストリームのサポートが追加された場合はどうなりますか? -これは、サブストリームで動作するサブグラフに対するインデックスリワードのプロトコルサポートに影響を与えるものです。新しいFirehoseの実装は、このGIPのステージ2に概説されている方法論に従って、テストネットでテストされる必要があります。同様に、実装がパフォーマンスが良く信頼性があると仮定して、[Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md)へのPR(「Substreamsデータソース」サブグラフ機能)が必要です。また、インデックスリワードのプロトコルサポートに関する新しいGIPも必要です。誰でもPRとGIPを作成できますが、Foundationは評議会の承認をサポートします。 +This would only impact protocol support for indexing rewards on Substreams-powered Subgraphs. The new Firehose implementation would need testing on testnet, following the methodology outlined for Stage 2 in this GIP. Similarly, assuming the implementation is performant and reliable, a PR on the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) would be required (`Substreams data sources` Subgraph Feature), as well as a new GIP for protocol support for indexing rewards. Anyone can create the PR and GIP; the Foundation would help with Council approval. ### 3. How much time will the process of reaching full protocol support take? From 6fa3101327251e1ac77ac3f8b00de52fc929f712 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:14:47 -0500 Subject: [PATCH 0369/1789] New translations chain-integration-overview.mdx (Korean) --- website/src/pages/ko/indexing/chain-integration-overview.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/ko/indexing/chain-integration-overview.mdx b/website/src/pages/ko/indexing/chain-integration-overview.mdx index 77141e82b34a..33619b03c483 100644 --- a/website/src/pages/ko/indexing/chain-integration-overview.mdx +++ b/website/src/pages/ko/indexing/chain-integration-overview.mdx @@ -36,7 +36,7 @@ This process is related to the Subgraph Data Service, applicable only to new Sub ### 2. What happens if Firehose & Substreams support comes after the network is supported on mainnet? -This would only impact protocol support for indexing rewards on Substreams-powered subgraphs. The new Firehose implementation would need testing on testnet, following the methodology outlined for Stage 2 in this GIP. Similarly, assuming the implementation is performant and reliable, a PR on the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) would be required (`Substreams data sources` Subgraph Feature), as well as a new GIP for protocol support for indexing rewards. Anyone can create the PR and GIP; the Foundation would help with Council approval. +This would only impact protocol support for indexing rewards on Substreams-powered Subgraphs. The new Firehose implementation would need testing on testnet, following the methodology outlined for Stage 2 in this GIP. Similarly, assuming the implementation is performant and reliable, a PR on the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) would be required (`Substreams data sources` Subgraph Feature), as well as a new GIP for protocol support for indexing rewards. Anyone can create the PR and GIP; the Foundation would help with Council approval. ### 3. How much time will the process of reaching full protocol support take? From 3df3e768bc4ec85d7685df07e3fca42c63809db7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:14:48 -0500 Subject: [PATCH 0370/1789] New translations chain-integration-overview.mdx (Dutch) --- website/src/pages/nl/indexing/chain-integration-overview.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/nl/indexing/chain-integration-overview.mdx b/website/src/pages/nl/indexing/chain-integration-overview.mdx index 77141e82b34a..33619b03c483 100644 --- a/website/src/pages/nl/indexing/chain-integration-overview.mdx +++ b/website/src/pages/nl/indexing/chain-integration-overview.mdx @@ -36,7 +36,7 @@ This process is related to the Subgraph Data Service, applicable only to new Sub ### 2. What happens if Firehose & Substreams support comes after the network is supported on mainnet? -This would only impact protocol support for indexing rewards on Substreams-powered subgraphs. The new Firehose implementation would need testing on testnet, following the methodology outlined for Stage 2 in this GIP. Similarly, assuming the implementation is performant and reliable, a PR on the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) would be required (`Substreams data sources` Subgraph Feature), as well as a new GIP for protocol support for indexing rewards. Anyone can create the PR and GIP; the Foundation would help with Council approval. +This would only impact protocol support for indexing rewards on Substreams-powered Subgraphs. The new Firehose implementation would need testing on testnet, following the methodology outlined for Stage 2 in this GIP. Similarly, assuming the implementation is performant and reliable, a PR on the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) would be required (`Substreams data sources` Subgraph Feature), as well as a new GIP for protocol support for indexing rewards. Anyone can create the PR and GIP; the Foundation would help with Council approval. ### 3. How much time will the process of reaching full protocol support take? From 22fd9aed655e914538628c4038b5a1e21f4fd187 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:14:49 -0500 Subject: [PATCH 0371/1789] New translations chain-integration-overview.mdx (Polish) --- website/src/pages/pl/indexing/chain-integration-overview.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/pl/indexing/chain-integration-overview.mdx b/website/src/pages/pl/indexing/chain-integration-overview.mdx index 77141e82b34a..33619b03c483 100644 --- a/website/src/pages/pl/indexing/chain-integration-overview.mdx +++ b/website/src/pages/pl/indexing/chain-integration-overview.mdx @@ -36,7 +36,7 @@ This process is related to the Subgraph Data Service, applicable only to new Sub ### 2. What happens if Firehose & Substreams support comes after the network is supported on mainnet? -This would only impact protocol support for indexing rewards on Substreams-powered subgraphs. The new Firehose implementation would need testing on testnet, following the methodology outlined for Stage 2 in this GIP. Similarly, assuming the implementation is performant and reliable, a PR on the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) would be required (`Substreams data sources` Subgraph Feature), as well as a new GIP for protocol support for indexing rewards. Anyone can create the PR and GIP; the Foundation would help with Council approval. +This would only impact protocol support for indexing rewards on Substreams-powered Subgraphs. The new Firehose implementation would need testing on testnet, following the methodology outlined for Stage 2 in this GIP. Similarly, assuming the implementation is performant and reliable, a PR on the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) would be required (`Substreams data sources` Subgraph Feature), as well as a new GIP for protocol support for indexing rewards. Anyone can create the PR and GIP; the Foundation would help with Council approval. ### 3. How much time will the process of reaching full protocol support take? From 2e2c9d530d05ba288696d624405a925a5a7f4f5b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:14:50 -0500 Subject: [PATCH 0372/1789] New translations chain-integration-overview.mdx (Portuguese) --- website/src/pages/pt/indexing/chain-integration-overview.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/pt/indexing/chain-integration-overview.mdx b/website/src/pages/pt/indexing/chain-integration-overview.mdx index ba7e92f6032b..94fdac9c8e40 100644 --- a/website/src/pages/pt/indexing/chain-integration-overview.mdx +++ b/website/src/pages/pt/indexing/chain-integration-overview.mdx @@ -36,7 +36,7 @@ Este processo é relacionado ao Serviço de Dados de Subgraph, no momento aplic ### 2. O que acontece se o apoio ao Firehose e Substreams chegar após a rede ser apoiada na mainnet? -Isto só impactaria o apoio do protocolo a recompensas de indexação em subgraphs movidos a Substreams. A nova implementação do Firehose precisaria de testes na testnet, seguindo a metodologia sublinhada na Fase 2 deste GIP. De maneira parecida, ao assumir que a implementação seja confiável e de bom desempenho, um PR no [Matrix de Apoio de Funções](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) seria requerido (A função de Subgraph `Substreams data sources`), assim como um novo GIP para apoio do protocolo a recompensas de indexação. Qualquer pessoa pode criar o PR e a GIP; a Foundation ajudaria com o apoio do Conselho. +This would only impact protocol support for indexing rewards on Substreams-powered Subgraphs. The new Firehose implementation would need testing on testnet, following the methodology outlined for Stage 2 in this GIP. Similarly, assuming the implementation is performant and reliable, a PR on the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) would be required (`Substreams data sources` Subgraph Feature), as well as a new GIP for protocol support for indexing rewards. Anyone can create the PR and GIP; the Foundation would help with Council approval. ### 3. Quando tempo demora a conclusão do processo de alcance ao apoio total a protocolos? From 8c0aa4f83fd3227a484fab5c32159ce799820aeb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:14:51 -0500 Subject: [PATCH 0373/1789] New translations chain-integration-overview.mdx (Russian) --- website/src/pages/ru/indexing/chain-integration-overview.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/ru/indexing/chain-integration-overview.mdx b/website/src/pages/ru/indexing/chain-integration-overview.mdx index 3ee1ef3bc4bc..613d4b5151c4 100644 --- a/website/src/pages/ru/indexing/chain-integration-overview.mdx +++ b/website/src/pages/ru/indexing/chain-integration-overview.mdx @@ -36,7 +36,7 @@ This process is related to the Subgraph Data Service, applicable only to new Sub ### 2. What happens if Firehose & Substreams support comes after the network is supported on mainnet? -This would only impact protocol support for indexing rewards on Substreams-powered subgraphs. The new Firehose implementation would need testing on testnet, following the methodology outlined for Stage 2 in this GIP. Similarly, assuming the implementation is performant and reliable, a PR on the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) would be required (`Substreams data sources` Subgraph Feature), as well as a new GIP for protocol support for indexing rewards. Anyone can create the PR and GIP; the Foundation would help with Council approval. +This would only impact protocol support for indexing rewards on Substreams-powered Subgraphs. The new Firehose implementation would need testing on testnet, following the methodology outlined for Stage 2 in this GIP. Similarly, assuming the implementation is performant and reliable, a PR on the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) would be required (`Substreams data sources` Subgraph Feature), as well as a new GIP for protocol support for indexing rewards. Anyone can create the PR and GIP; the Foundation would help with Council approval. ### 3. Сколько времени займет процесс достижения полной поддержки протокола? From 6d37bb72f77ad35ce660e5bf47422824ff67b4fc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:14:52 -0500 Subject: [PATCH 0374/1789] New translations chain-integration-overview.mdx (Swedish) --- website/src/pages/sv/indexing/chain-integration-overview.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/sv/indexing/chain-integration-overview.mdx b/website/src/pages/sv/indexing/chain-integration-overview.mdx index 147468f7dc17..94f8e8dd42e5 100644 --- a/website/src/pages/sv/indexing/chain-integration-overview.mdx +++ b/website/src/pages/sv/indexing/chain-integration-overview.mdx @@ -36,7 +36,7 @@ Denna process är relaterad till Subgraf Data Service och gäller endast nya Sub ### 2. Vad händer om stöd för Firehose & Substreams kommer efter det att nätverket stöds på mainnet? -Detta skulle endast påverka protokollstödet för indexbelöningar på Substreams-drivna subgrafer. Den nya Firehose-implementeringen skulle behöva testas på testnätet, enligt den metodik som beskrivs för Fas 2 i detta GIP. På liknande sätt, förutsatt att implementationen är prestanda- och tillförlitlig, skulle en PR på [Funktionsstödsmatrisen](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) krävas (`Substreams data sources` Subgraf Feature), liksom en ny GIP för protokollstöd för indexbelöningar. Vem som helst kan skapa PR och GIP; Stiftelsen skulle hjälpa till med Rådets godkännande. +This would only impact protocol support for indexing rewards on Substreams-powered Subgraphs. The new Firehose implementation would need testing on testnet, following the methodology outlined for Stage 2 in this GIP. Similarly, assuming the implementation is performant and reliable, a PR on the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) would be required (`Substreams data sources` Subgraph Feature), as well as a new GIP for protocol support for indexing rewards. Anyone can create the PR and GIP; the Foundation would help with Council approval. ### 3. How much time will the process of reaching full protocol support take? From a0381b0bb0d4186c882e12070c74cd801fca1ed1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:14:53 -0500 Subject: [PATCH 0375/1789] New translations chain-integration-overview.mdx (Turkish) --- website/src/pages/tr/indexing/chain-integration-overview.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/tr/indexing/chain-integration-overview.mdx b/website/src/pages/tr/indexing/chain-integration-overview.mdx index db50f7b8e673..adc74805662f 100644 --- a/website/src/pages/tr/indexing/chain-integration-overview.mdx +++ b/website/src/pages/tr/indexing/chain-integration-overview.mdx @@ -36,7 +36,7 @@ Bu süreç Subgraph Veri Hizmeti ile ilgilidir ve yalnızca yeni Subgraph `Veri ### 2. Firehose & Substreams desteği, ağ ana ağda desteklendikten sonra gelirse ne olur? -Bu, yalnızca Substreams destekli subgraphlar'da ödüllerin indekslenmesi için protokol desteğini etkileyecektir. Yeni Firehose uygulamasının, bu GIP'de Aşama 2 için özetlenen metodolojiyi izleyerek testnet üzerinde test edilmesi gerekecektir. Benzer şekilde, uygulamanın performanslı ve güvenilir olduğu varsayıldığı takdirde, [Özellik Destek Matrisi] (https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) üzerinde bir PR (`Substreams veri kaynakları` Subgraph Özelliği) ve ödüllerin indekslenmesi amacıyla protokol desteği için yeni bir GIP gerekecektir. PR ve GIP'yi herkes oluşturabilir; Vakıf, Konsey onayı konusunda yardımcı olacaktır. +This would only impact protocol support for indexing rewards on Substreams-powered Subgraphs. The new Firehose implementation would need testing on testnet, following the methodology outlined for Stage 2 in this GIP. Similarly, assuming the implementation is performant and reliable, a PR on the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) would be required (`Substreams data sources` Subgraph Feature), as well as a new GIP for protocol support for indexing rewards. Anyone can create the PR and GIP; the Foundation would help with Council approval. ### 3. How much time will the process of reaching full protocol support take? From 1db9ba2d5057c79e8ad3468b366c7315bbea40b3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:14:54 -0500 Subject: [PATCH 0376/1789] New translations chain-integration-overview.mdx (Ukrainian) --- website/src/pages/uk/indexing/chain-integration-overview.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/uk/indexing/chain-integration-overview.mdx b/website/src/pages/uk/indexing/chain-integration-overview.mdx index 77141e82b34a..33619b03c483 100644 --- a/website/src/pages/uk/indexing/chain-integration-overview.mdx +++ b/website/src/pages/uk/indexing/chain-integration-overview.mdx @@ -36,7 +36,7 @@ This process is related to the Subgraph Data Service, applicable only to new Sub ### 2. What happens if Firehose & Substreams support comes after the network is supported on mainnet? -This would only impact protocol support for indexing rewards on Substreams-powered subgraphs. The new Firehose implementation would need testing on testnet, following the methodology outlined for Stage 2 in this GIP. Similarly, assuming the implementation is performant and reliable, a PR on the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) would be required (`Substreams data sources` Subgraph Feature), as well as a new GIP for protocol support for indexing rewards. Anyone can create the PR and GIP; the Foundation would help with Council approval. +This would only impact protocol support for indexing rewards on Substreams-powered Subgraphs. The new Firehose implementation would need testing on testnet, following the methodology outlined for Stage 2 in this GIP. Similarly, assuming the implementation is performant and reliable, a PR on the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) would be required (`Substreams data sources` Subgraph Feature), as well as a new GIP for protocol support for indexing rewards. Anyone can create the PR and GIP; the Foundation would help with Council approval. ### 3. How much time will the process of reaching full protocol support take? From 4b63902aa9ee87e78fac52fefd330d0cb14de4e1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:14:55 -0500 Subject: [PATCH 0377/1789] New translations chain-integration-overview.mdx (Chinese Simplified) --- website/src/pages/zh/indexing/chain-integration-overview.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/zh/indexing/chain-integration-overview.mdx b/website/src/pages/zh/indexing/chain-integration-overview.mdx index 425fdaced82a..d8b8644bd94e 100644 --- a/website/src/pages/zh/indexing/chain-integration-overview.mdx +++ b/website/src/pages/zh/indexing/chain-integration-overview.mdx @@ -36,7 +36,7 @@ title: 链集成过程概述 ### 2. 如果在主网上支持网络之后再支持 Firehose 和 Substreams,会发生什么情况? -这只会影响 Substreams 驱动的子图上的索引奖励的协议支持。新的 Firehose 实现需要在测试网上进行测试,遵循了本 GIP 中第二阶段所概述的方法论。同样地,假设实现是高性能且可靠的,那么需要在 [特征支持矩阵](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) 上提出 PR(`Substreams 数据源` 子图特性),以及一个新的 GIP 来支持索引奖励的协议。任何人都可以创建这个 PR 和 GIP;基金会将协助获得理事会的批准。 +This would only impact protocol support for indexing rewards on Substreams-powered Subgraphs. The new Firehose implementation would need testing on testnet, following the methodology outlined for Stage 2 in this GIP. Similarly, assuming the implementation is performant and reliable, a PR on the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) would be required (`Substreams data sources` Subgraph Feature), as well as a new GIP for protocol support for indexing rewards. Anyone can create the PR and GIP; the Foundation would help with Council approval. ### 3. How much time will the process of reaching full protocol support take? From 0f74ec5a3bddf018b7a1f88a462357b5b3ee90f7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:14:56 -0500 Subject: [PATCH 0378/1789] New translations chain-integration-overview.mdx (Urdu (Pakistan)) --- website/src/pages/ur/indexing/chain-integration-overview.mdx | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/website/src/pages/ur/indexing/chain-integration-overview.mdx b/website/src/pages/ur/indexing/chain-integration-overview.mdx index e348639e9efa..1be373a08a70 100644 --- a/website/src/pages/ur/indexing/chain-integration-overview.mdx +++ b/website/src/pages/ur/indexing/chain-integration-overview.mdx @@ -7,7 +7,8 @@ title: چین انٹیگریشن کے عمل کا جائزہ ## مرحلہ 1. تکنیکی انٹیگریشن - Please visit [New Chain Integration](/indexing/new-chain-integration/) for information on `graph-node` support for new chains. -- ٹیمیں فورم تھریڈ بنا کر پروٹوکول انٹیگریشن کا عمل شروع کرتی ہیں [here](https://forum.thegraph.com/c/governance-gips/new-chain-support/71)(گورننس اور GIPs کے تحت نئے ڈیٹا ذرائع ذیلی زمرہ) ۔ پہلے سے طے شدہ فورم ٹیمپلیٹ کا استعمال لازمی ہے. +- ٹیمیں فورم تھریڈ بنا کر پروٹوکول انٹیگریشن کا عمل شروع کرتی ہیں [here](https://forum.thegraph.com/c/governance-gips/new-chain-support/71)(گورننس اور GIPs کے تحت نئے ڈیٹا ذرائع ذیلی زمرہ) + ۔ پہلے سے طے شدہ فورم ٹیمپلیٹ کا استعمال لازمی ہے. ## مرحلہ 2۔ انٹیگریشن کی توثیق @@ -36,7 +37,7 @@ title: چین انٹیگریشن کے عمل کا جائزہ ### 2. اگر مین نیٹ پر نیٹ ورک سپورٹ ہونے کے بعد فائر ہوز اور سب سٹریم سپورٹ آجائے تو کیا ہوگا؟ -یہ صرف سب سٹریمزسے چلنے والے سب گرافس پر انڈیکسنگ کے انعامات کے لیے پروٹوکول سپورٹ کو متاثر کرے گا۔ اس GIP میں اسٹیج 2 کے لیے بیان کردہ طریقہ کار کے بعد، نئے فائر ہوز کے نفاذ کو ٹیسٹ نیٹ پر جانچ کی ضرورت ہوگی۔ اسی طرح، یہ فرض کرتے ہوئے کہ نفاذ پرفارمنس اور قابل اعتماد ہے، [فیچر سپورٹ میٹرکس](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) پر ایک PR کی ضرورت ہوگی ( 'سب سٹریمز ڈیٹا سورسز' سب گراف فیچر)، نیز انڈیکسنگ انعامات کے لیے پروٹوکول سپورٹ کے لیے ایک نیا GIP۔ کوئی بھی PR اور GIP بنا سکتا ہے۔ فاؤنڈیشن کونسل کی منظوری میں مدد کرے گی. +This would only impact protocol support for indexing rewards on Substreams-powered Subgraphs. The new Firehose implementation would need testing on testnet, following the methodology outlined for Stage 2 in this GIP. Similarly, assuming the implementation is performant and reliable, a PR on the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) would be required (`Substreams data sources` Subgraph Feature), as well as a new GIP for protocol support for indexing rewards. Anyone can create the PR and GIP; the Foundation would help with Council approval. ### 3. How much time will the process of reaching full protocol support take? From 6c4caeb2be057499b26d5bd672490951f91f9857 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:14:57 -0500 Subject: [PATCH 0379/1789] New translations chain-integration-overview.mdx (Vietnamese) --- website/src/pages/vi/indexing/chain-integration-overview.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/vi/indexing/chain-integration-overview.mdx b/website/src/pages/vi/indexing/chain-integration-overview.mdx index 77141e82b34a..33619b03c483 100644 --- a/website/src/pages/vi/indexing/chain-integration-overview.mdx +++ b/website/src/pages/vi/indexing/chain-integration-overview.mdx @@ -36,7 +36,7 @@ This process is related to the Subgraph Data Service, applicable only to new Sub ### 2. What happens if Firehose & Substreams support comes after the network is supported on mainnet? -This would only impact protocol support for indexing rewards on Substreams-powered subgraphs. The new Firehose implementation would need testing on testnet, following the methodology outlined for Stage 2 in this GIP. Similarly, assuming the implementation is performant and reliable, a PR on the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) would be required (`Substreams data sources` Subgraph Feature), as well as a new GIP for protocol support for indexing rewards. Anyone can create the PR and GIP; the Foundation would help with Council approval. +This would only impact protocol support for indexing rewards on Substreams-powered Subgraphs. The new Firehose implementation would need testing on testnet, following the methodology outlined for Stage 2 in this GIP. Similarly, assuming the implementation is performant and reliable, a PR on the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) would be required (`Substreams data sources` Subgraph Feature), as well as a new GIP for protocol support for indexing rewards. Anyone can create the PR and GIP; the Foundation would help with Council approval. ### 3. How much time will the process of reaching full protocol support take? From 84c03d984ce9b7c84c04b7b2d7b7c58b2207e537 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:14:58 -0500 Subject: [PATCH 0380/1789] New translations chain-integration-overview.mdx (Marathi) --- website/src/pages/mr/indexing/chain-integration-overview.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/mr/indexing/chain-integration-overview.mdx b/website/src/pages/mr/indexing/chain-integration-overview.mdx index 77141e82b34a..33619b03c483 100644 --- a/website/src/pages/mr/indexing/chain-integration-overview.mdx +++ b/website/src/pages/mr/indexing/chain-integration-overview.mdx @@ -36,7 +36,7 @@ This process is related to the Subgraph Data Service, applicable only to new Sub ### 2. What happens if Firehose & Substreams support comes after the network is supported on mainnet? -This would only impact protocol support for indexing rewards on Substreams-powered subgraphs. The new Firehose implementation would need testing on testnet, following the methodology outlined for Stage 2 in this GIP. Similarly, assuming the implementation is performant and reliable, a PR on the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) would be required (`Substreams data sources` Subgraph Feature), as well as a new GIP for protocol support for indexing rewards. Anyone can create the PR and GIP; the Foundation would help with Council approval. +This would only impact protocol support for indexing rewards on Substreams-powered Subgraphs. The new Firehose implementation would need testing on testnet, following the methodology outlined for Stage 2 in this GIP. Similarly, assuming the implementation is performant and reliable, a PR on the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) would be required (`Substreams data sources` Subgraph Feature), as well as a new GIP for protocol support for indexing rewards. Anyone can create the PR and GIP; the Foundation would help with Council approval. ### 3. How much time will the process of reaching full protocol support take? From c3ca294dbc90ba32b75e0addd351ed6eef8fa60a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:14:59 -0500 Subject: [PATCH 0381/1789] New translations chain-integration-overview.mdx (Hindi) --- website/src/pages/hi/indexing/chain-integration-overview.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/hi/indexing/chain-integration-overview.mdx b/website/src/pages/hi/indexing/chain-integration-overview.mdx index 6a7c06a71a07..03cc897e3245 100644 --- a/website/src/pages/hi/indexing/chain-integration-overview.mdx +++ b/website/src/pages/hi/indexing/chain-integration-overview.mdx @@ -36,7 +36,7 @@ This process is related to the Subgraph Data Service, applicable only to new Sub ### 2. What happens if Firehose & Substreams support comes after the network is supported on mainnet? -This would only impact protocol support for indexing rewards on Substreams-powered subgraphs. The new Firehose implementation would need testing on testnet, following the methodology outlined for Stage 2 in this GIP. Similarly, assuming the implementation is performant and reliable, a PR on the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) would be required (`Substreams data sources` Subgraph Feature), as well as a new GIP for protocol support for indexing rewards. Anyone can create the PR and GIP; the Foundation would help with Council approval. +This would only impact protocol support for indexing rewards on Substreams-powered Subgraphs. The new Firehose implementation would need testing on testnet, following the methodology outlined for Stage 2 in this GIP. Similarly, assuming the implementation is performant and reliable, a PR on the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) would be required (`Substreams data sources` Subgraph Feature), as well as a new GIP for protocol support for indexing rewards. Anyone can create the PR and GIP; the Foundation would help with Council approval. ### 3. पूर्ण प्रोटोकॉल समर्थन तक पहुंचने की प्रक्रिया में कितना समय लगेगा? From 66c3ca406b4f31d90efa6cf46770471ad6cb626e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:15:00 -0500 Subject: [PATCH 0382/1789] New translations chain-integration-overview.mdx (Swahili) --- .../indexing/chain-integration-overview.mdx | 49 +++++++++++++++++++ 1 file changed, 49 insertions(+) create mode 100644 website/src/pages/sw/indexing/chain-integration-overview.mdx diff --git a/website/src/pages/sw/indexing/chain-integration-overview.mdx b/website/src/pages/sw/indexing/chain-integration-overview.mdx new file mode 100644 index 000000000000..33619b03c483 --- /dev/null +++ b/website/src/pages/sw/indexing/chain-integration-overview.mdx @@ -0,0 +1,49 @@ +--- +title: Chain Integration Process Overview +--- + +A transparent and governance-based integration process was designed for blockchain teams seeking [integration with The Graph protocol](https://forum.thegraph.com/t/gip-0057-chain-integration-process/4468). It is a 3-phase process, as summarised below. + +## Stage 1. Technical Integration + +- Please visit [New Chain Integration](/indexing/new-chain-integration/) for information on `graph-node` support for new chains. +- Teams initiate the protocol integration process by creating a Forum thread [here](https://forum.thegraph.com/c/governance-gips/new-chain-support/71) (New Data Sources sub-category under Governance & GIPs). Using the default Forum template is mandatory. + +## Stage 2. Integration Validation + +- Teams collaborate with core developers, Graph Foundation and operators of GUIs and network gateways, such as [Subgraph Studio](https://thegraph.com/studio/), to ensure a smooth integration process. This involves providing the necessary backend infrastructure, such as the integrating chain's JSON-RPC, Firehose or Substreams endpoints. Teams wanting to avoid self-hosting such infrastructure can leverage The Graph's community of node operators (Indexers) to do so, which the Foundation can help with. +- Graph Indexers test the integration on The Graph's testnet. +- Core developers and Indexers monitor stability, performance, and data determinism. + +## Stage 3. Mainnet Integration + +- Teams propose mainnet integration by submitting a Graph Improvement Proposal (GIP) and initiating a pull request (PR) on the [feature support matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) (more details on the link). +- The Graph Council reviews the request and approves mainnet support, providing a successful Stage 2 and positive community feedback. + +--- + +If the process looks daunting, don't worry! The Graph Foundation is committed to supporting integrators by fostering collaboration, offering essential information, and guiding them through various stages, including navigating governance processes such as Graph Improvement Proposals (GIPs) and pull requests. If you have questions, please reach out to [info@thegraph.foundation](mailto:info@thegraph.foundation) or through Discord (either Pedro, The Graph Foundation member, IndexerDAO, or other core developers). + +Ready to shape the future of The Graph Network? [Start your proposal](https://github.com/graphprotocol/graph-improvement-proposals/blob/main/gips/0057-chain-integration-process.md) now and be a part of the web3 revolution! + +--- + +## Frequently Asked Questions + +### 1. How does this relate to the [World of Data Services GIP](https://forum.thegraph.com/t/gip-0042-a-world-of-data-services/3761)? + +This process is related to the Subgraph Data Service, applicable only to new Subgraph `Data Sources`. + +### 2. What happens if Firehose & Substreams support comes after the network is supported on mainnet? + +This would only impact protocol support for indexing rewards on Substreams-powered Subgraphs. The new Firehose implementation would need testing on testnet, following the methodology outlined for Stage 2 in this GIP. Similarly, assuming the implementation is performant and reliable, a PR on the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) would be required (`Substreams data sources` Subgraph Feature), as well as a new GIP for protocol support for indexing rewards. Anyone can create the PR and GIP; the Foundation would help with Council approval. + +### 3. How much time will the process of reaching full protocol support take? + +The time to mainnet is expected to be several weeks, varying based on the time of integration development, whether additional research is required, testing and bug fixes, and, as always, the timing of the governance process that requires community feedback. + +Protocol support for indexing rewards depends on the stakeholders' bandwidth to proceed with testing, feedback gathering, and handling contributions to the core codebase, if applicable. This is directly tied to the integration's maturity and how responsive the integration team is (who may or may not be the team behind the RPC/Firehose implementation). The Foundation is here to help support throughout the whole process. + +### 4. How will priorities be handled? + +Similar to #3, it will depend on overall readiness and involved stakeholders' bandwidth. For example, a new chain with a brand new Firehose implementation may take longer than integrations that have already been battle-tested or are farther along in the governance process. From a771d8d13c2b3180e9fd98dd2222479c28991ba6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:15:01 -0500 Subject: [PATCH 0383/1789] New translations new-chain-integration.mdx (Romanian) --- .../pages/ro/indexing/new-chain-integration.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/ro/indexing/new-chain-integration.mdx b/website/src/pages/ro/indexing/new-chain-integration.mdx index e45c4b411010..c401fa57b348 100644 --- a/website/src/pages/ro/indexing/new-chain-integration.mdx +++ b/website/src/pages/ro/indexing/new-chain-integration.mdx @@ -2,7 +2,7 @@ title: New Chain Integration --- -Chains can bring subgraph support to their ecosystem by starting a new `graph-node` integration. Subgraphs are a powerful indexing tool opening a world of possibilities for developers. Graph Node already indexes data from the chains listed here. If you are interested in a new integration, there are 2 integration strategies: +Chains can bring Subgraph support to their ecosystem by starting a new `graph-node` integration. Subgraphs are a powerful indexing tool opening a world of possibilities for developers. Graph Node already indexes data from the chains listed here. If you are interested in a new integration, there are 2 integration strategies: 1. **EVM JSON-RPC** 2. **Firehose**: All Firehose integration solutions include Substreams, a large-scale streaming engine based off Firehose with native `graph-node` support, allowing for parallelized transforms. @@ -25,7 +25,7 @@ For Graph Node to be able to ingest data from an EVM chain, the RPC node must ex - `eth_getBlockByHash` - `net_version` - `eth_getTransactionReceipt`, in a JSON-RPC batch request -- `trace_filter` *(limited tracing and optionally required for Graph Node)* +- `trace_filter` _(limited tracing and optionally required for Graph Node)_ ### 2. Firehose Integration @@ -47,15 +47,15 @@ For EVM chains, there exists a deeper level of data that can be achieved through ## EVM considerations - Difference between JSON-RPC & Firehose -While the JSON-RPC and Firehose are both suitable for subgraphs, a Firehose is always required for developers wanting to build with [Substreams](https://substreams.streamingfast.io). Supporting Substreams allows developers to build [Substreams-powered subgraphs](/subgraphs/cookbook/substreams-powered-subgraphs/) for the new chain, and has the potential to improve the performance of your subgraphs. Additionally, Firehose — as a drop-in replacement for the JSON-RPC extraction layer of `graph-node` — reduces by 90% the number of RPC calls required for general indexing. +While the JSON-RPC and Firehose are both suitable for Subgraphs, a Firehose is always required for developers wanting to build with [Substreams](https://substreams.streamingfast.io). Supporting Substreams allows developers to build [Substreams-powered Subgraphs](/subgraphs/cookbook/substreams-powered-subgraphs/) for the new chain, and has the potential to improve the performance of your Subgraphs. Additionally, Firehose — as a drop-in replacement for the JSON-RPC extraction layer of `graph-node` — reduces by 90% the number of RPC calls required for general indexing. -- All those `getLogs` calls and roundtrips get replaced by a single stream arriving into the heart of `graph-node`; a single block model for all subgraphs it processes. +- All those `getLogs` calls and roundtrips get replaced by a single stream arriving into the heart of `graph-node`; a single block model for all Subgraphs it processes. -> NOTE: A Firehose-based integration for EVM chains will still require Indexers to run the chain's archive RPC node to properly index subgraphs. This is due to the Firehose's inability to provide smart contract state typically accessible by the `eth_call` RPC method. (It's worth reminding that `eth_calls` are not a good practice for developers) +> NOTE: A Firehose-based integration for EVM chains will still require Indexers to run the chain's archive RPC node to properly index Subgraphs. This is due to the Firehose's inability to provide smart contract state typically accessible by the `eth_call` RPC method. (It's worth reminding that `eth_calls` are not a good practice for developers) ## Graph Node Configuration -Configuring Graph Node is as easy as preparing your local environment. Once your local environment is set, you can test the integration by locally deploying a subgraph. +Configuring Graph Node is as easy as preparing your local environment. Once your local environment is set, you can test the integration by locally deploying a Subgraph. 1. [Clone Graph Node](https://github.com/graphprotocol/graph-node) @@ -67,4 +67,4 @@ Configuring Graph Node is as easy as preparing your local environment. Once your ## Substreams-powered Subgraphs -For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered subgraphs](/substreams/sps/introduction/). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. +For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered Subgraphs](/substreams/sps/introduction/). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. From b35aec91e9731124f1bf4a229efef0087a72c6bd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:15:03 -0500 Subject: [PATCH 0384/1789] New translations new-chain-integration.mdx (French) --- .../pages/fr/indexing/new-chain-integration.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/fr/indexing/new-chain-integration.mdx b/website/src/pages/fr/indexing/new-chain-integration.mdx index b5b6fa8ccd73..20c9e5710b6a 100644 --- a/website/src/pages/fr/indexing/new-chain-integration.mdx +++ b/website/src/pages/fr/indexing/new-chain-integration.mdx @@ -2,7 +2,7 @@ title: Intégration d'une Nouvelle Chaîne --- -Les chaînes peuvent apporter le support des subgraphs à leur écosystème en démarrant une nouvelle intégration `graph-node`. Les subgraphs sont un outil d'indexation puissant qui ouvre un monde de possibilités pour les développeurs. Graph Node indexe déjà les données des chaînes listées ici. Si vous êtes intéressé par une nouvelle intégration, il existe 2 stratégies d'intégration : +Chains can bring Subgraph support to their ecosystem by starting a new `graph-node` integration. Subgraphs are a powerful indexing tool opening a world of possibilities for developers. Graph Node already indexes data from the chains listed here. If you are interested in a new integration, there are 2 integration strategies: 1. **EVM JSON-RPC** 2. **Firehose** : toutes les solutions d'intégration Firehose incluent Substreams, un moteur de streaming à grande échelle basé sur Firehose avec prise en charge native de `graph-node`, permettant des transformations parallélisées. @@ -25,7 +25,7 @@ Afin que Graph Node puisse ingérer des données provenant d'une chaîne EVM, le - `eth_getBlockByHash` - `net_version` - `eth_getTransactionReceipt`, in a JSON-RPC batch request -- `trace_filter` *(traçage limité et optionnellement requis pour Graph Node)* +- `trace_filter` _(traçage limité et optionnellement requis pour Graph Node)_ ### 2. Intégration Firehose @@ -47,15 +47,15 @@ Pour les chaînes EVM, il existe un niveau de données plus approfondi qui peut ## Considérations sur EVM - Différence entre JSON-RPC et Firehose -Bien que le JSON-RPC et le Firehose soient tous deux adaptés aux subgraphs, un Firehose est toujours nécessaire pour les développeurs qui souhaitent construire avec [Substreams](https://substreams.streamingfast.io). La prise en charge de Substreams permet aux développeurs de construire des [subgraphs alimentés par Substreams](/subgraphs/cookbook/substreams-powered-subgraphs/) pour la nouvelle chaîne, et a le potentiel d'améliorer les performances de vos subgraphs. De plus, Firehose - en tant que remplacement direct de la couche d'extraction JSON-RPC de `graph-node` - réduit de 90% le nombre d'appels RPC requis pour l'indexation générale. +While the JSON-RPC and Firehose are both suitable for Subgraphs, a Firehose is always required for developers wanting to build with [Substreams](https://substreams.streamingfast.io). Supporting Substreams allows developers to build [Substreams-powered Subgraphs](/subgraphs/cookbook/substreams-powered-subgraphs/) for the new chain, and has the potential to improve the performance of your Subgraphs. Additionally, Firehose — as a drop-in replacement for the JSON-RPC extraction layer of `graph-node` — reduces by 90% the number of RPC calls required for general indexing. -- Tous ces appels et allers-retours `getLogs` sont remplacés par un seul flux arrivant au cœur de `graph-node` ; un modèle de bloc unique pour tous les subgraphs qu'il traite. +- All those `getLogs` calls and roundtrips get replaced by a single stream arriving into the heart of `graph-node`; a single block model for all Subgraphs it processes. -> NOTEZ: une intégration basée sur Firehose pour les chaînes EVM nécessitera toujours que les indexeurs exécutent le nœud RPC d'archivage de la chaîne pour indexer correctement les subgraphs. Cela est dû à l'incapacité de Firehose à fournir un état de contrat intelligent généralement accessible par la méthode RPC `eth_calls`. (Il convient de rappeler que les `eth_call` ne sont pas une bonne pratique pour les développeurs) +> NOTE: A Firehose-based integration for EVM chains will still require Indexers to run the chain's archive RPC node to properly index Subgraphs. This is due to the Firehose's inability to provide smart contract state typically accessible by the `eth_call` RPC method. (It's worth reminding that `eth_calls` are not a good practice for developers) ## Configuration Graph Node -La configuration de Graph Node est aussi simple que la préparation de votre environnement local. Une fois votre environnement local défini, vous pouvez tester l'intégration en déployant localement un subgraph. +Configuring Graph Node is as easy as preparing your local environment. Once your local environment is set, you can test the integration by locally deploying a Subgraph. 1. [Clone Graph Node](https://github.com/graphprotocol/graph-node) @@ -67,4 +67,4 @@ La configuration de Graph Node est aussi simple que la préparation de votre env ## Subgraphs alimentés par des substreams -Pour les intégrations Firehose/Substreams pilotées par StreamingFast, la prise en charge de base des modules Substreams fondamentaux (par exemple, les transactions décodées, les logs et les événements smart-contract) et les outils codegen Substreams sont inclus. Ces outils permettent d'activer des [subgraphs alimentés par Substreams](/substreams/sps/introduction/). Suivez le [Guide pratique](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) et exécutez `substreams codegen subgraph` pour expérimenter les outils codegen par vous-même. +For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered Subgraphs](/substreams/sps/introduction/). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. From a843fb28a4dd2ecb41289ed7d9aac5777ff0402f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:15:04 -0500 Subject: [PATCH 0385/1789] New translations new-chain-integration.mdx (Spanish) --- .../pages/es/indexing/new-chain-integration.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/es/indexing/new-chain-integration.mdx b/website/src/pages/es/indexing/new-chain-integration.mdx index 04aa90b6e5ae..7316741aa0e6 100644 --- a/website/src/pages/es/indexing/new-chain-integration.mdx +++ b/website/src/pages/es/indexing/new-chain-integration.mdx @@ -2,7 +2,7 @@ title: New Chain Integration --- -Chains can bring subgraph support to their ecosystem by starting a new `graph-node` integration. Subgraphs are a powerful indexing tool opening a world of possibilities for developers. Graph Node already indexes data from the chains listed here. If you are interested in a new integration, there are 2 integration strategies: +Chains can bring Subgraph support to their ecosystem by starting a new `graph-node` integration. Subgraphs are a powerful indexing tool opening a world of possibilities for developers. Graph Node already indexes data from the chains listed here. If you are interested in a new integration, there are 2 integration strategies: 1. **EVM JSON-RPC** 2. **Firehose**: All Firehose integration solutions include Substreams, a large-scale streaming engine based off Firehose with native `graph-node` support, allowing for parallelized transforms. @@ -25,7 +25,7 @@ For Graph Node to be able to ingest data from an EVM chain, the RPC node must ex - `eth_getBlockByHash` - `net_version` - `eth_getTransactionReceipt`, en una solicitud por lotes JSON-RPC -- `trace_filter` *(limited tracing and optionally required for Graph Node)* +- `trace_filter` _(limited tracing and optionally required for Graph Node)_ ### 2. Firehose Integration @@ -47,15 +47,15 @@ For EVM chains, there exists a deeper level of data that can be achieved through ## EVM considerations - Difference between JSON-RPC & Firehose -While the JSON-RPC and Firehose are both suitable for subgraphs, a Firehose is always required for developers wanting to build with [Substreams](https://substreams.streamingfast.io). Supporting Substreams allows developers to build [Substreams-powered subgraphs](/subgraphs/cookbook/substreams-powered-subgraphs/) for the new chain, and has the potential to improve the performance of your subgraphs. Additionally, Firehose — as a drop-in replacement for the JSON-RPC extraction layer of `graph-node` — reduces by 90% the number of RPC calls required for general indexing. +While the JSON-RPC and Firehose are both suitable for Subgraphs, a Firehose is always required for developers wanting to build with [Substreams](https://substreams.streamingfast.io). Supporting Substreams allows developers to build [Substreams-powered Subgraphs](/subgraphs/cookbook/substreams-powered-subgraphs/) for the new chain, and has the potential to improve the performance of your Subgraphs. Additionally, Firehose — as a drop-in replacement for the JSON-RPC extraction layer of `graph-node` — reduces by 90% the number of RPC calls required for general indexing. -- All those `getLogs` calls and roundtrips get replaced by a single stream arriving into the heart of `graph-node`; a single block model for all subgraphs it processes. +- All those `getLogs` calls and roundtrips get replaced by a single stream arriving into the heart of `graph-node`; a single block model for all Subgraphs it processes. -> NOTE: A Firehose-based integration for EVM chains will still require Indexers to run the chain's archive RPC node to properly index subgraphs. This is due to the Firehose's inability to provide smart contract state typically accessible by the `eth_call` RPC method. (It's worth reminding that `eth_calls` are not a good practice for developers) +> NOTE: A Firehose-based integration for EVM chains will still require Indexers to run the chain's archive RPC node to properly index Subgraphs. This is due to the Firehose's inability to provide smart contract state typically accessible by the `eth_call` RPC method. (It's worth reminding that `eth_calls` are not a good practice for developers) ## Configuración del Graph Node -Configuring Graph Node is as easy as preparing your local environment. Once your local environment is set, you can test the integration by locally deploying a subgraph. +Configuring Graph Node is as easy as preparing your local environment. Once your local environment is set, you can test the integration by locally deploying a Subgraph. 1. [Clone Graph Node](https://github.com/graphprotocol/graph-node) @@ -67,4 +67,4 @@ Configuring Graph Node is as easy as preparing your local environment. Once your ## Substreams-powered Subgraphs -For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered subgraphs](/substreams/sps/introduction/). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. +For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered Subgraphs](/substreams/sps/introduction/). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. From 6727ba46a8dfabd6e546c0a679fec36173c27952 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:15:05 -0500 Subject: [PATCH 0386/1789] New translations new-chain-integration.mdx (Arabic) --- .../pages/ar/indexing/new-chain-integration.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/ar/indexing/new-chain-integration.mdx b/website/src/pages/ar/indexing/new-chain-integration.mdx index bff012725d9d..bcd82dafed18 100644 --- a/website/src/pages/ar/indexing/new-chain-integration.mdx +++ b/website/src/pages/ar/indexing/new-chain-integration.mdx @@ -2,7 +2,7 @@ title: New Chain Integration --- -Chains can bring subgraph support to their ecosystem by starting a new `graph-node` integration. Subgraphs are a powerful indexing tool opening a world of possibilities for developers. Graph Node already indexes data from the chains listed here. If you are interested in a new integration, there are 2 integration strategies: +Chains can bring Subgraph support to their ecosystem by starting a new `graph-node` integration. Subgraphs are a powerful indexing tool opening a world of possibilities for developers. Graph Node already indexes data from the chains listed here. If you are interested in a new integration, there are 2 integration strategies: 1. **EVM JSON-RPC** 2. **Firehose**: All Firehose integration solutions include Substreams, a large-scale streaming engine based off Firehose with native `graph-node` support, allowing for parallelized transforms. @@ -25,7 +25,7 @@ For Graph Node to be able to ingest data from an EVM chain, the RPC node must ex - `eth_getBlockByHash` - `net_version` - `eth_getTransactionReceipt`، ضمن طلب دفعة استدعاء الإجراء عن بُعد باستخدام تمثيل كائنات جافا سكريبت -- `trace_filter` *(limited tracing and optionally required for Graph Node)* +- `trace_filter` _(limited tracing and optionally required for Graph Node)_ ### 2. Firehose Integration @@ -47,15 +47,15 @@ For EVM chains, there exists a deeper level of data that can be achieved through ## EVM considerations - Difference between JSON-RPC & Firehose -While the JSON-RPC and Firehose are both suitable for subgraphs, a Firehose is always required for developers wanting to build with [Substreams](https://substreams.streamingfast.io). Supporting Substreams allows developers to build [Substreams-powered subgraphs](/subgraphs/cookbook/substreams-powered-subgraphs/) for the new chain, and has the potential to improve the performance of your subgraphs. Additionally, Firehose — as a drop-in replacement for the JSON-RPC extraction layer of `graph-node` — reduces by 90% the number of RPC calls required for general indexing. +While the JSON-RPC and Firehose are both suitable for Subgraphs, a Firehose is always required for developers wanting to build with [Substreams](https://substreams.streamingfast.io). Supporting Substreams allows developers to build [Substreams-powered Subgraphs](/subgraphs/cookbook/substreams-powered-subgraphs/) for the new chain, and has the potential to improve the performance of your Subgraphs. Additionally, Firehose — as a drop-in replacement for the JSON-RPC extraction layer of `graph-node` — reduces by 90% the number of RPC calls required for general indexing. -- All those `getLogs` calls and roundtrips get replaced by a single stream arriving into the heart of `graph-node`; a single block model for all subgraphs it processes. +- All those `getLogs` calls and roundtrips get replaced by a single stream arriving into the heart of `graph-node`; a single block model for all Subgraphs it processes. -> NOTE: A Firehose-based integration for EVM chains will still require Indexers to run the chain's archive RPC node to properly index subgraphs. This is due to the Firehose's inability to provide smart contract state typically accessible by the `eth_call` RPC method. (It's worth reminding that `eth_calls` are not a good practice for developers) +> NOTE: A Firehose-based integration for EVM chains will still require Indexers to run the chain's archive RPC node to properly index Subgraphs. This is due to the Firehose's inability to provide smart contract state typically accessible by the `eth_call` RPC method. (It's worth reminding that `eth_calls` are not a good practice for developers) ## تكوين عقدة الغراف -Configuring Graph Node is as easy as preparing your local environment. Once your local environment is set, you can test the integration by locally deploying a subgraph. +Configuring Graph Node is as easy as preparing your local environment. Once your local environment is set, you can test the integration by locally deploying a Subgraph. 1. [استنسخ عقدة الغراف](https://github.com/graphprotocol/graph-node) @@ -67,4 +67,4 @@ Configuring Graph Node is as easy as preparing your local environment. Once your ## Substreams-powered Subgraphs -For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered subgraphs](/substreams/sps/introduction/). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. +For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered Subgraphs](/substreams/sps/introduction/). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. From ffde9c785f4c1d67eb9ff691d88ffc8e677a4bfc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:15:06 -0500 Subject: [PATCH 0387/1789] New translations new-chain-integration.mdx (Czech) --- .../pages/cs/indexing/new-chain-integration.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/cs/indexing/new-chain-integration.mdx b/website/src/pages/cs/indexing/new-chain-integration.mdx index 5eb78fc9efbd..2954c7f0b494 100644 --- a/website/src/pages/cs/indexing/new-chain-integration.mdx +++ b/website/src/pages/cs/indexing/new-chain-integration.mdx @@ -2,7 +2,7 @@ title: New Chain Integration --- -Chains can bring subgraph support to their ecosystem by starting a new `graph-node` integration. Subgraphs are a powerful indexing tool opening a world of possibilities for developers. Graph Node already indexes data from the chains listed here. If you are interested in a new integration, there are 2 integration strategies: +Chains can bring Subgraph support to their ecosystem by starting a new `graph-node` integration. Subgraphs are a powerful indexing tool opening a world of possibilities for developers. Graph Node already indexes data from the chains listed here. If you are interested in a new integration, there are 2 integration strategies: 1. **EVM JSON-RPC** 2. **Firehose**: All Firehose integration solutions include Substreams, a large-scale streaming engine based off Firehose with native `graph-node` support, allowing for parallelized transforms. @@ -25,7 +25,7 @@ For Graph Node to be able to ingest data from an EVM chain, the RPC node must ex - `eth_getBlockByHash` - `net_version` - `eth_getTransactionReceipt`, in a JSON-RPC batch request -- `trace_filter` *(limited tracing and optionally required for Graph Node)* +- `trace_filter` _(limited tracing and optionally required for Graph Node)_ ### 2. Firehose Integration @@ -47,15 +47,15 @@ For EVM chains, there exists a deeper level of data that can be achieved through ## EVM considerations - Difference between JSON-RPC & Firehose -While the JSON-RPC and Firehose are both suitable for subgraphs, a Firehose is always required for developers wanting to build with [Substreams](https://substreams.streamingfast.io). Supporting Substreams allows developers to build [Substreams-powered subgraphs](/subgraphs/cookbook/substreams-powered-subgraphs/) for the new chain, and has the potential to improve the performance of your subgraphs. Additionally, Firehose — as a drop-in replacement for the JSON-RPC extraction layer of `graph-node` — reduces by 90% the number of RPC calls required for general indexing. +While the JSON-RPC and Firehose are both suitable for Subgraphs, a Firehose is always required for developers wanting to build with [Substreams](https://substreams.streamingfast.io). Supporting Substreams allows developers to build [Substreams-powered Subgraphs](/subgraphs/cookbook/substreams-powered-subgraphs/) for the new chain, and has the potential to improve the performance of your Subgraphs. Additionally, Firehose — as a drop-in replacement for the JSON-RPC extraction layer of `graph-node` — reduces by 90% the number of RPC calls required for general indexing. -- All those `getLogs` calls and roundtrips get replaced by a single stream arriving into the heart of `graph-node`; a single block model for all subgraphs it processes. +- All those `getLogs` calls and roundtrips get replaced by a single stream arriving into the heart of `graph-node`; a single block model for all Subgraphs it processes. -> NOTE: A Firehose-based integration for EVM chains will still require Indexers to run the chain's archive RPC node to properly index subgraphs. This is due to the Firehose's inability to provide smart contract state typically accessible by the `eth_call` RPC method. (It's worth reminding that `eth_calls` are not a good practice for developers) +> NOTE: A Firehose-based integration for EVM chains will still require Indexers to run the chain's archive RPC node to properly index Subgraphs. This is due to the Firehose's inability to provide smart contract state typically accessible by the `eth_call` RPC method. (It's worth reminding that `eth_calls` are not a good practice for developers) ## Config uzlu grafu -Configuring Graph Node is as easy as preparing your local environment. Once your local environment is set, you can test the integration by locally deploying a subgraph. +Configuring Graph Node is as easy as preparing your local environment. Once your local environment is set, you can test the integration by locally deploying a Subgraph. 1. [Clone Graph Node](https://github.com/graphprotocol/graph-node) @@ -67,4 +67,4 @@ Configuring Graph Node is as easy as preparing your local environment. Once your ## Substreams-powered Subgraphs -For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered subgraphs](/substreams/sps/introduction/). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. +For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered Subgraphs](/substreams/sps/introduction/). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. From 7d21fbf55516c43029f49f1bd1ff49c07ae25b37 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:15:07 -0500 Subject: [PATCH 0388/1789] New translations new-chain-integration.mdx (German) --- .../pages/de/indexing/new-chain-integration.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/de/indexing/new-chain-integration.mdx b/website/src/pages/de/indexing/new-chain-integration.mdx index 54d9b95d5a24..c2edebc14be1 100644 --- a/website/src/pages/de/indexing/new-chain-integration.mdx +++ b/website/src/pages/de/indexing/new-chain-integration.mdx @@ -2,7 +2,7 @@ title: Integration neuer Ketten --- -Ketten können die Unterstützung von Subgraphen in ihr Ökosystem einbringen, indem sie eine neue `graph-node` Integration starten. Subgraphen sind ein leistungsfähiges Indizierungswerkzeug, das Entwicklern eine Welt voller Möglichkeiten eröffnet. Graph Node indiziert bereits Daten von den hier aufgeführten Ketten. Wenn Sie an einer neuen Integration interessiert sind, gibt es 2 Integrationsstrategien: +Chains can bring Subgraph support to their ecosystem by starting a new `graph-node` integration. Subgraphs are a powerful indexing tool opening a world of possibilities for developers. Graph Node already indexes data from the chains listed here. If you are interested in a new integration, there are 2 integration strategies: 1. **EVM JSON-RPC** 2. **Firehose**: Alle Firehose-Integrationslösungen umfassen Substreams, eine groß angelegte Streaming-Engine auf der Grundlage von Firehose mit nativer `graph-node`-Unterstützung, die parallelisierte Transformationen ermöglicht. @@ -25,7 +25,7 @@ Damit Graph Node Daten aus einer EVM-Kette aufnehmen kann, muss der RPC-Knoten d - `eth_getBlockByHash` - `net_version` - `eth_getTransactionReceipt`, in einem JSON-RPC-Batch-Antrag -- `trace_filter`  *(begrenztes Tracing und optional erforderlich für Graph Node)* +- `trace_filter`  _(begrenztes Tracing und optional erforderlich für Graph Node)_ ### 2. Firehose Integration @@ -47,15 +47,15 @@ Für EVM-Ketten gibt es eine tiefere Ebene von Daten, die durch den `geth` [Live ## EVM-Überlegungen - Unterschied zwischen JSON-RPC und Firehose -Während JSON-RPC und Firehose beide für Subgraphen geeignet sind, ist für Entwickler, die mit [Substreams](https://substreams.streamingfast.io) bauen wollen, immer ein Firehose erforderlich. Die Unterstützung von Substreams ermöglicht es Entwicklern, [Substreams-betriebene Subgraphen](/subgraphs/cookbook/substreams-powered-subgraphs/) für die neue Kette zu bauen, und hat das Potenzial, die Leistung Ihrer Subgraphen zu verbessern. Darüber hinaus reduziert Firehose - als Ersatz für die JSON-RPC-Extraktionsschicht von `graph-node` - die Anzahl der RPC-Aufrufe, die für die allgemeine Indizierung erforderlich sind, um 90%. +While the JSON-RPC and Firehose are both suitable for Subgraphs, a Firehose is always required for developers wanting to build with [Substreams](https://substreams.streamingfast.io). Supporting Substreams allows developers to build [Substreams-powered Subgraphs](/subgraphs/cookbook/substreams-powered-subgraphs/) for the new chain, and has the potential to improve the performance of your Subgraphs. Additionally, Firehose — as a drop-in replacement for the JSON-RPC extraction layer of `graph-node` — reduces by 90% the number of RPC calls required for general indexing. -- All diese `getLogs`-Aufrufe und Roundtrips werden durch einen einzigen Stream ersetzt, der im Herzen von `graph-node` ankommt; ein einziges Blockmodell für alle Subgraphen, die es verarbeitet. +- All those `getLogs` calls and roundtrips get replaced by a single stream arriving into the heart of `graph-node`; a single block model for all Subgraphs it processes. -> HINWEIS: Bei einer Firehose-basierten Integration für EVM-Ketten müssen Indexer weiterhin den Archiv-RPC-Knoten der Kette ausführen, um Subgraphen ordnungsgemäß zu indizieren. Dies liegt daran, dass der Firehose nicht in der Lage ist, den Smart-Contract-Status bereitzustellen, der normalerweise über die RPC-Methode „eth_call“ zugänglich ist. (Es ist erwähnenswert, dass `eth_calls` keine gute Praxis für Entwickler sind) +> NOTE: A Firehose-based integration for EVM chains will still require Indexers to run the chain's archive RPC node to properly index Subgraphs. This is due to the Firehose's inability to provide smart contract state typically accessible by the `eth_call` RPC method. (It's worth reminding that `eth_calls` are not a good practice for developers) ## Graph-Node Konfiguration -Die Konfiguration von Graph Node ist so einfach wie die Vorbereitung Ihrer lokalen Umgebung. Sobald Ihre lokale Umgebung eingerichtet ist, können Sie die Integration testen, indem Sie einen Subgraphen lokal bereitstellen. +Configuring Graph Node is as easy as preparing your local environment. Once your local environment is set, you can test the integration by locally deploying a Subgraph. 1. [Graph Node klonen](https://github.com/graphprotocol/graph-node) @@ -67,4 +67,4 @@ Die Konfiguration von Graph Node ist so einfach wie die Vorbereitung Ihrer lokal ## Substreams-getriebene Subgraphen -Für StreamingFast-geführte Firehose/Substreams-Integrationen sind grundlegende Unterstützung für grundlegende Substreams-Module (z. B. entschlüsselte Transaktionen, Protokolle und Smart-Contract-Ereignisse) und Substreams-Codegen-Tools enthalten. Mit diesen Tools können Sie [Substreams-getriebene Subgraphen](/substreams/sps/introduction/) aktivieren. Folgen Sie dem [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) und führen Sie `substreams codegen subgraph` aus, um die codegen-Tools selbst zu erleben. +For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered Subgraphs](/substreams/sps/introduction/). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. From a9c38c03df2f370d36a1a9f1988df6b7d89eb4de Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:15:08 -0500 Subject: [PATCH 0389/1789] New translations new-chain-integration.mdx (Italian) --- .../pages/it/indexing/new-chain-integration.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/it/indexing/new-chain-integration.mdx b/website/src/pages/it/indexing/new-chain-integration.mdx index e45c4b411010..c401fa57b348 100644 --- a/website/src/pages/it/indexing/new-chain-integration.mdx +++ b/website/src/pages/it/indexing/new-chain-integration.mdx @@ -2,7 +2,7 @@ title: New Chain Integration --- -Chains can bring subgraph support to their ecosystem by starting a new `graph-node` integration. Subgraphs are a powerful indexing tool opening a world of possibilities for developers. Graph Node already indexes data from the chains listed here. If you are interested in a new integration, there are 2 integration strategies: +Chains can bring Subgraph support to their ecosystem by starting a new `graph-node` integration. Subgraphs are a powerful indexing tool opening a world of possibilities for developers. Graph Node already indexes data from the chains listed here. If you are interested in a new integration, there are 2 integration strategies: 1. **EVM JSON-RPC** 2. **Firehose**: All Firehose integration solutions include Substreams, a large-scale streaming engine based off Firehose with native `graph-node` support, allowing for parallelized transforms. @@ -25,7 +25,7 @@ For Graph Node to be able to ingest data from an EVM chain, the RPC node must ex - `eth_getBlockByHash` - `net_version` - `eth_getTransactionReceipt`, in a JSON-RPC batch request -- `trace_filter` *(limited tracing and optionally required for Graph Node)* +- `trace_filter` _(limited tracing and optionally required for Graph Node)_ ### 2. Firehose Integration @@ -47,15 +47,15 @@ For EVM chains, there exists a deeper level of data that can be achieved through ## EVM considerations - Difference between JSON-RPC & Firehose -While the JSON-RPC and Firehose are both suitable for subgraphs, a Firehose is always required for developers wanting to build with [Substreams](https://substreams.streamingfast.io). Supporting Substreams allows developers to build [Substreams-powered subgraphs](/subgraphs/cookbook/substreams-powered-subgraphs/) for the new chain, and has the potential to improve the performance of your subgraphs. Additionally, Firehose — as a drop-in replacement for the JSON-RPC extraction layer of `graph-node` — reduces by 90% the number of RPC calls required for general indexing. +While the JSON-RPC and Firehose are both suitable for Subgraphs, a Firehose is always required for developers wanting to build with [Substreams](https://substreams.streamingfast.io). Supporting Substreams allows developers to build [Substreams-powered Subgraphs](/subgraphs/cookbook/substreams-powered-subgraphs/) for the new chain, and has the potential to improve the performance of your Subgraphs. Additionally, Firehose — as a drop-in replacement for the JSON-RPC extraction layer of `graph-node` — reduces by 90% the number of RPC calls required for general indexing. -- All those `getLogs` calls and roundtrips get replaced by a single stream arriving into the heart of `graph-node`; a single block model for all subgraphs it processes. +- All those `getLogs` calls and roundtrips get replaced by a single stream arriving into the heart of `graph-node`; a single block model for all Subgraphs it processes. -> NOTE: A Firehose-based integration for EVM chains will still require Indexers to run the chain's archive RPC node to properly index subgraphs. This is due to the Firehose's inability to provide smart contract state typically accessible by the `eth_call` RPC method. (It's worth reminding that `eth_calls` are not a good practice for developers) +> NOTE: A Firehose-based integration for EVM chains will still require Indexers to run the chain's archive RPC node to properly index Subgraphs. This is due to the Firehose's inability to provide smart contract state typically accessible by the `eth_call` RPC method. (It's worth reminding that `eth_calls` are not a good practice for developers) ## Graph Node Configuration -Configuring Graph Node is as easy as preparing your local environment. Once your local environment is set, you can test the integration by locally deploying a subgraph. +Configuring Graph Node is as easy as preparing your local environment. Once your local environment is set, you can test the integration by locally deploying a Subgraph. 1. [Clone Graph Node](https://github.com/graphprotocol/graph-node) @@ -67,4 +67,4 @@ Configuring Graph Node is as easy as preparing your local environment. Once your ## Substreams-powered Subgraphs -For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered subgraphs](/substreams/sps/introduction/). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. +For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered Subgraphs](/substreams/sps/introduction/). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. From e0830ae4292d619fe51aa2e1591dbdc0256af6d9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:15:09 -0500 Subject: [PATCH 0390/1789] New translations new-chain-integration.mdx (Japanese) --- .../pages/ja/indexing/new-chain-integration.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/ja/indexing/new-chain-integration.mdx b/website/src/pages/ja/indexing/new-chain-integration.mdx index decdf0266d65..dc9408b25f69 100644 --- a/website/src/pages/ja/indexing/new-chain-integration.mdx +++ b/website/src/pages/ja/indexing/new-chain-integration.mdx @@ -2,7 +2,7 @@ title: New Chain Integration --- -Chains can bring subgraph support to their ecosystem by starting a new `graph-node` integration. Subgraphs are a powerful indexing tool opening a world of possibilities for developers. Graph Node already indexes data from the chains listed here. If you are interested in a new integration, there are 2 integration strategies: +Chains can bring Subgraph support to their ecosystem by starting a new `graph-node` integration. Subgraphs are a powerful indexing tool opening a world of possibilities for developers. Graph Node already indexes data from the chains listed here. If you are interested in a new integration, there are 2 integration strategies: 1. **EVM JSON-RPC** 2. **Firehose**: All Firehose integration solutions include Substreams, a large-scale streaming engine based off Firehose with native `graph-node` support, allowing for parallelized transforms. @@ -25,7 +25,7 @@ For Graph Node to be able to ingest data from an EVM chain, the RPC node must ex - `eth_getBlockByHash` - `net_version` - `eth_getTransactionReceipt`, in a JSON-RPC batch request -- `trace_filter` *(limited tracing and optionally required for Graph Node)* +- `trace_filter` _(limited tracing and optionally required for Graph Node)_ ### 2. Firehose Integration @@ -47,15 +47,15 @@ For EVM chains, there exists a deeper level of data that can be achieved through ## EVM considerations - Difference between JSON-RPC & Firehose -While the JSON-RPC and Firehose are both suitable for subgraphs, a Firehose is always required for developers wanting to build with [Substreams](https://substreams.streamingfast.io). Supporting Substreams allows developers to build [Substreams-powered subgraphs](/subgraphs/cookbook/substreams-powered-subgraphs/) for the new chain, and has the potential to improve the performance of your subgraphs. Additionally, Firehose — as a drop-in replacement for the JSON-RPC extraction layer of `graph-node` — reduces by 90% the number of RPC calls required for general indexing. +While the JSON-RPC and Firehose are both suitable for Subgraphs, a Firehose is always required for developers wanting to build with [Substreams](https://substreams.streamingfast.io). Supporting Substreams allows developers to build [Substreams-powered Subgraphs](/subgraphs/cookbook/substreams-powered-subgraphs/) for the new chain, and has the potential to improve the performance of your Subgraphs. Additionally, Firehose — as a drop-in replacement for the JSON-RPC extraction layer of `graph-node` — reduces by 90% the number of RPC calls required for general indexing. -- All those `getLogs` calls and roundtrips get replaced by a single stream arriving into the heart of `graph-node`; a single block model for all subgraphs it processes. +- All those `getLogs` calls and roundtrips get replaced by a single stream arriving into the heart of `graph-node`; a single block model for all Subgraphs it processes. -> NOTE: A Firehose-based integration for EVM chains will still require Indexers to run the chain's archive RPC node to properly index subgraphs. This is due to the Firehose's inability to provide smart contract state typically accessible by the `eth_call` RPC method. (It's worth reminding that `eth_calls` are not a good practice for developers) +> NOTE: A Firehose-based integration for EVM chains will still require Indexers to run the chain's archive RPC node to properly index Subgraphs. This is due to the Firehose's inability to provide smart contract state typically accessible by the `eth_call` RPC method. (It's worth reminding that `eth_calls` are not a good practice for developers) ## Graph Node の設定 -Configuring Graph Node is as easy as preparing your local environment. Once your local environment is set, you can test the integration by locally deploying a subgraph. +Configuring Graph Node is as easy as preparing your local environment. Once your local environment is set, you can test the integration by locally deploying a Subgraph. 1. [Clone Graph Node](https://github.com/graphprotocol/graph-node) @@ -67,4 +67,4 @@ Configuring Graph Node is as easy as preparing your local environment. Once your ## Substreams-powered Subgraphs -For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered subgraphs](/substreams/sps/introduction/). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. +For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered Subgraphs](/substreams/sps/introduction/). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. From 5fcb6a8f154f1192a9551df03c74180d020d2250 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:15:10 -0500 Subject: [PATCH 0391/1789] New translations new-chain-integration.mdx (Korean) --- .../pages/ko/indexing/new-chain-integration.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/ko/indexing/new-chain-integration.mdx b/website/src/pages/ko/indexing/new-chain-integration.mdx index e45c4b411010..c401fa57b348 100644 --- a/website/src/pages/ko/indexing/new-chain-integration.mdx +++ b/website/src/pages/ko/indexing/new-chain-integration.mdx @@ -2,7 +2,7 @@ title: New Chain Integration --- -Chains can bring subgraph support to their ecosystem by starting a new `graph-node` integration. Subgraphs are a powerful indexing tool opening a world of possibilities for developers. Graph Node already indexes data from the chains listed here. If you are interested in a new integration, there are 2 integration strategies: +Chains can bring Subgraph support to their ecosystem by starting a new `graph-node` integration. Subgraphs are a powerful indexing tool opening a world of possibilities for developers. Graph Node already indexes data from the chains listed here. If you are interested in a new integration, there are 2 integration strategies: 1. **EVM JSON-RPC** 2. **Firehose**: All Firehose integration solutions include Substreams, a large-scale streaming engine based off Firehose with native `graph-node` support, allowing for parallelized transforms. @@ -25,7 +25,7 @@ For Graph Node to be able to ingest data from an EVM chain, the RPC node must ex - `eth_getBlockByHash` - `net_version` - `eth_getTransactionReceipt`, in a JSON-RPC batch request -- `trace_filter` *(limited tracing and optionally required for Graph Node)* +- `trace_filter` _(limited tracing and optionally required for Graph Node)_ ### 2. Firehose Integration @@ -47,15 +47,15 @@ For EVM chains, there exists a deeper level of data that can be achieved through ## EVM considerations - Difference between JSON-RPC & Firehose -While the JSON-RPC and Firehose are both suitable for subgraphs, a Firehose is always required for developers wanting to build with [Substreams](https://substreams.streamingfast.io). Supporting Substreams allows developers to build [Substreams-powered subgraphs](/subgraphs/cookbook/substreams-powered-subgraphs/) for the new chain, and has the potential to improve the performance of your subgraphs. Additionally, Firehose — as a drop-in replacement for the JSON-RPC extraction layer of `graph-node` — reduces by 90% the number of RPC calls required for general indexing. +While the JSON-RPC and Firehose are both suitable for Subgraphs, a Firehose is always required for developers wanting to build with [Substreams](https://substreams.streamingfast.io). Supporting Substreams allows developers to build [Substreams-powered Subgraphs](/subgraphs/cookbook/substreams-powered-subgraphs/) for the new chain, and has the potential to improve the performance of your Subgraphs. Additionally, Firehose — as a drop-in replacement for the JSON-RPC extraction layer of `graph-node` — reduces by 90% the number of RPC calls required for general indexing. -- All those `getLogs` calls and roundtrips get replaced by a single stream arriving into the heart of `graph-node`; a single block model for all subgraphs it processes. +- All those `getLogs` calls and roundtrips get replaced by a single stream arriving into the heart of `graph-node`; a single block model for all Subgraphs it processes. -> NOTE: A Firehose-based integration for EVM chains will still require Indexers to run the chain's archive RPC node to properly index subgraphs. This is due to the Firehose's inability to provide smart contract state typically accessible by the `eth_call` RPC method. (It's worth reminding that `eth_calls` are not a good practice for developers) +> NOTE: A Firehose-based integration for EVM chains will still require Indexers to run the chain's archive RPC node to properly index Subgraphs. This is due to the Firehose's inability to provide smart contract state typically accessible by the `eth_call` RPC method. (It's worth reminding that `eth_calls` are not a good practice for developers) ## Graph Node Configuration -Configuring Graph Node is as easy as preparing your local environment. Once your local environment is set, you can test the integration by locally deploying a subgraph. +Configuring Graph Node is as easy as preparing your local environment. Once your local environment is set, you can test the integration by locally deploying a Subgraph. 1. [Clone Graph Node](https://github.com/graphprotocol/graph-node) @@ -67,4 +67,4 @@ Configuring Graph Node is as easy as preparing your local environment. Once your ## Substreams-powered Subgraphs -For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered subgraphs](/substreams/sps/introduction/). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. +For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered Subgraphs](/substreams/sps/introduction/). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. From 8db3d3296bcd53589cdcaccffe9902f5c1318e96 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:15:11 -0500 Subject: [PATCH 0392/1789] New translations new-chain-integration.mdx (Dutch) --- .../pages/nl/indexing/new-chain-integration.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/nl/indexing/new-chain-integration.mdx b/website/src/pages/nl/indexing/new-chain-integration.mdx index e45c4b411010..c401fa57b348 100644 --- a/website/src/pages/nl/indexing/new-chain-integration.mdx +++ b/website/src/pages/nl/indexing/new-chain-integration.mdx @@ -2,7 +2,7 @@ title: New Chain Integration --- -Chains can bring subgraph support to their ecosystem by starting a new `graph-node` integration. Subgraphs are a powerful indexing tool opening a world of possibilities for developers. Graph Node already indexes data from the chains listed here. If you are interested in a new integration, there are 2 integration strategies: +Chains can bring Subgraph support to their ecosystem by starting a new `graph-node` integration. Subgraphs are a powerful indexing tool opening a world of possibilities for developers. Graph Node already indexes data from the chains listed here. If you are interested in a new integration, there are 2 integration strategies: 1. **EVM JSON-RPC** 2. **Firehose**: All Firehose integration solutions include Substreams, a large-scale streaming engine based off Firehose with native `graph-node` support, allowing for parallelized transforms. @@ -25,7 +25,7 @@ For Graph Node to be able to ingest data from an EVM chain, the RPC node must ex - `eth_getBlockByHash` - `net_version` - `eth_getTransactionReceipt`, in a JSON-RPC batch request -- `trace_filter` *(limited tracing and optionally required for Graph Node)* +- `trace_filter` _(limited tracing and optionally required for Graph Node)_ ### 2. Firehose Integration @@ -47,15 +47,15 @@ For EVM chains, there exists a deeper level of data that can be achieved through ## EVM considerations - Difference between JSON-RPC & Firehose -While the JSON-RPC and Firehose are both suitable for subgraphs, a Firehose is always required for developers wanting to build with [Substreams](https://substreams.streamingfast.io). Supporting Substreams allows developers to build [Substreams-powered subgraphs](/subgraphs/cookbook/substreams-powered-subgraphs/) for the new chain, and has the potential to improve the performance of your subgraphs. Additionally, Firehose — as a drop-in replacement for the JSON-RPC extraction layer of `graph-node` — reduces by 90% the number of RPC calls required for general indexing. +While the JSON-RPC and Firehose are both suitable for Subgraphs, a Firehose is always required for developers wanting to build with [Substreams](https://substreams.streamingfast.io). Supporting Substreams allows developers to build [Substreams-powered Subgraphs](/subgraphs/cookbook/substreams-powered-subgraphs/) for the new chain, and has the potential to improve the performance of your Subgraphs. Additionally, Firehose — as a drop-in replacement for the JSON-RPC extraction layer of `graph-node` — reduces by 90% the number of RPC calls required for general indexing. -- All those `getLogs` calls and roundtrips get replaced by a single stream arriving into the heart of `graph-node`; a single block model for all subgraphs it processes. +- All those `getLogs` calls and roundtrips get replaced by a single stream arriving into the heart of `graph-node`; a single block model for all Subgraphs it processes. -> NOTE: A Firehose-based integration for EVM chains will still require Indexers to run the chain's archive RPC node to properly index subgraphs. This is due to the Firehose's inability to provide smart contract state typically accessible by the `eth_call` RPC method. (It's worth reminding that `eth_calls` are not a good practice for developers) +> NOTE: A Firehose-based integration for EVM chains will still require Indexers to run the chain's archive RPC node to properly index Subgraphs. This is due to the Firehose's inability to provide smart contract state typically accessible by the `eth_call` RPC method. (It's worth reminding that `eth_calls` are not a good practice for developers) ## Graph Node Configuration -Configuring Graph Node is as easy as preparing your local environment. Once your local environment is set, you can test the integration by locally deploying a subgraph. +Configuring Graph Node is as easy as preparing your local environment. Once your local environment is set, you can test the integration by locally deploying a Subgraph. 1. [Clone Graph Node](https://github.com/graphprotocol/graph-node) @@ -67,4 +67,4 @@ Configuring Graph Node is as easy as preparing your local environment. Once your ## Substreams-powered Subgraphs -For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered subgraphs](/substreams/sps/introduction/). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. +For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered Subgraphs](/substreams/sps/introduction/). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. From c76d77834f1a00c8333cf5af3d46c720dc94e35a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:15:12 -0500 Subject: [PATCH 0393/1789] New translations new-chain-integration.mdx (Polish) --- .../pages/pl/indexing/new-chain-integration.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/pl/indexing/new-chain-integration.mdx b/website/src/pages/pl/indexing/new-chain-integration.mdx index e45c4b411010..c401fa57b348 100644 --- a/website/src/pages/pl/indexing/new-chain-integration.mdx +++ b/website/src/pages/pl/indexing/new-chain-integration.mdx @@ -2,7 +2,7 @@ title: New Chain Integration --- -Chains can bring subgraph support to their ecosystem by starting a new `graph-node` integration. Subgraphs are a powerful indexing tool opening a world of possibilities for developers. Graph Node already indexes data from the chains listed here. If you are interested in a new integration, there are 2 integration strategies: +Chains can bring Subgraph support to their ecosystem by starting a new `graph-node` integration. Subgraphs are a powerful indexing tool opening a world of possibilities for developers. Graph Node already indexes data from the chains listed here. If you are interested in a new integration, there are 2 integration strategies: 1. **EVM JSON-RPC** 2. **Firehose**: All Firehose integration solutions include Substreams, a large-scale streaming engine based off Firehose with native `graph-node` support, allowing for parallelized transforms. @@ -25,7 +25,7 @@ For Graph Node to be able to ingest data from an EVM chain, the RPC node must ex - `eth_getBlockByHash` - `net_version` - `eth_getTransactionReceipt`, in a JSON-RPC batch request -- `trace_filter` *(limited tracing and optionally required for Graph Node)* +- `trace_filter` _(limited tracing and optionally required for Graph Node)_ ### 2. Firehose Integration @@ -47,15 +47,15 @@ For EVM chains, there exists a deeper level of data that can be achieved through ## EVM considerations - Difference between JSON-RPC & Firehose -While the JSON-RPC and Firehose are both suitable for subgraphs, a Firehose is always required for developers wanting to build with [Substreams](https://substreams.streamingfast.io). Supporting Substreams allows developers to build [Substreams-powered subgraphs](/subgraphs/cookbook/substreams-powered-subgraphs/) for the new chain, and has the potential to improve the performance of your subgraphs. Additionally, Firehose — as a drop-in replacement for the JSON-RPC extraction layer of `graph-node` — reduces by 90% the number of RPC calls required for general indexing. +While the JSON-RPC and Firehose are both suitable for Subgraphs, a Firehose is always required for developers wanting to build with [Substreams](https://substreams.streamingfast.io). Supporting Substreams allows developers to build [Substreams-powered Subgraphs](/subgraphs/cookbook/substreams-powered-subgraphs/) for the new chain, and has the potential to improve the performance of your Subgraphs. Additionally, Firehose — as a drop-in replacement for the JSON-RPC extraction layer of `graph-node` — reduces by 90% the number of RPC calls required for general indexing. -- All those `getLogs` calls and roundtrips get replaced by a single stream arriving into the heart of `graph-node`; a single block model for all subgraphs it processes. +- All those `getLogs` calls and roundtrips get replaced by a single stream arriving into the heart of `graph-node`; a single block model for all Subgraphs it processes. -> NOTE: A Firehose-based integration for EVM chains will still require Indexers to run the chain's archive RPC node to properly index subgraphs. This is due to the Firehose's inability to provide smart contract state typically accessible by the `eth_call` RPC method. (It's worth reminding that `eth_calls` are not a good practice for developers) +> NOTE: A Firehose-based integration for EVM chains will still require Indexers to run the chain's archive RPC node to properly index Subgraphs. This is due to the Firehose's inability to provide smart contract state typically accessible by the `eth_call` RPC method. (It's worth reminding that `eth_calls` are not a good practice for developers) ## Graph Node Configuration -Configuring Graph Node is as easy as preparing your local environment. Once your local environment is set, you can test the integration by locally deploying a subgraph. +Configuring Graph Node is as easy as preparing your local environment. Once your local environment is set, you can test the integration by locally deploying a Subgraph. 1. [Clone Graph Node](https://github.com/graphprotocol/graph-node) @@ -67,4 +67,4 @@ Configuring Graph Node is as easy as preparing your local environment. Once your ## Substreams-powered Subgraphs -For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered subgraphs](/substreams/sps/introduction/). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. +For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered Subgraphs](/substreams/sps/introduction/). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. From 19b7e5d615d4371de3308b0bb18d9df15d6280f0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:15:13 -0500 Subject: [PATCH 0394/1789] New translations new-chain-integration.mdx (Portuguese) --- .../pages/pt/indexing/new-chain-integration.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/pt/indexing/new-chain-integration.mdx b/website/src/pages/pt/indexing/new-chain-integration.mdx index 388561fac3d7..12ca29bf11c2 100644 --- a/website/src/pages/pt/indexing/new-chain-integration.mdx +++ b/website/src/pages/pt/indexing/new-chain-integration.mdx @@ -2,7 +2,7 @@ title: Integração de Chains Novas --- -Chains podem trazer apoio a subgraphs para os seus ecossistemas ao iniciar uma nova integração de `graph-node`. Subgraphs são ferramentas poderosas de indexação que abrem infinitas possibilidades a programadores. O Graph Node já indexa dados das chains listadas aqui. Caso tenha interesse numa nova integração, há 2 estratégias para ela: +Chains can bring Subgraph support to their ecosystem by starting a new `graph-node` integration. Subgraphs are a powerful indexing tool opening a world of possibilities for developers. Graph Node already indexes data from the chains listed here. If you are interested in a new integration, there are 2 integration strategies: 1. **EVM JSON-RPC** 2. **Firehose**: Todas as soluções de integração do Firehose incluem Substreams, um motor de transmissão de grande escala com base no Firehose com apoio nativo ao `graph-node`, o que permite transformações paralelizadas. @@ -25,7 +25,7 @@ Para que o Graph Node possa ingerir dados de uma chain EVM, o node RPC deve expo - `eth_getBlockByHash` - `net_version` - `eth_getTransactionReceipt`, em um pedido conjunto em JSON-RPC -- `trace_filter` *(tracing limitado, e opcionalmente necessário, para o Graph Node)* +- `trace_filter` _(tracing limitado, e opcionalmente necessário, para o Graph Node)_ ### 2. Integração do Firehose @@ -47,15 +47,15 @@ Para chains EVM, há um nível mais profundo de dados que podem ser alcançados ## Considerações de EVM - Diferença entre JSON-RPC e Firehose -Enquanto ambos o JSON-RPC e o Firehose são próprios para subgraphs, um Firehose é sempre necessário para programadores que querem construir com [Substreams](https://substreams.streamingfast.io). Apoiar o Substreams permite que programadores construam [subgraphs movidos a Substreams](/subgraphs/cookbook/substreams-powered-subgraphs/) para a nova chain, e tem o potencial de melhorar o desempenho dos seus subgraphs. Além disto, o Firehose — como um substituto pronto para a camada de extração JSON-RPC do `graph-node` — reduz em 90% o número de RPCs (chamadas de procedimento remoto) exigidas para indexação geral. +While the JSON-RPC and Firehose are both suitable for Subgraphs, a Firehose is always required for developers wanting to build with [Substreams](https://substreams.streamingfast.io). Supporting Substreams allows developers to build [Substreams-powered Subgraphs](/subgraphs/cookbook/substreams-powered-subgraphs/) for the new chain, and has the potential to improve the performance of your Subgraphs. Additionally, Firehose — as a drop-in replacement for the JSON-RPC extraction layer of `graph-node` — reduces by 90% the number of RPC calls required for general indexing. -- Todas essas chamadas `getLogs` e roundtrips são substituídas por um único fluxo que chega no coração do `graph-node`, um modelo de bloco único para todos os subgraphs que processa. +- All those `getLogs` calls and roundtrips get replaced by a single stream arriving into the heart of `graph-node`; a single block model for all Subgraphs it processes. -> NOTA: Uma integração baseada no Firehose para chains EVM ainda exigirá que os Indexadores executem o node RPC de arquivo da chain para indexar subgraphs corretamente. Isto é porque o Firehose não pode fornecer estados de contratos inteligentes que são tipicamente acessíveis pelo método RPC  `eth_call` . (Vale lembrar que eth_calls não são uma boa prática para programadores) +> NOTE: A Firehose-based integration for EVM chains will still require Indexers to run the chain's archive RPC node to properly index Subgraphs. This is due to the Firehose's inability to provide smart contract state typically accessible by the `eth_call` RPC method. (It's worth reminding that `eth_calls` are not a good practice for developers) ## Como Configurar um Graph Node -Configurar um Graph Node é tão fácil quanto preparar o seu ambiente local. Quando o seu ambiente local estiver pronto, será possível testar a integração com a edição local de um subgraph. +Configuring Graph Node is as easy as preparing your local environment. Once your local environment is set, you can test the integration by locally deploying a Subgraph. 1. [Clone o Graph Node](https://github.com/graphprotocol/graph-node) @@ -67,4 +67,4 @@ Configurar um Graph Node é tão fácil quanto preparar o seu ambiente local. Qu ## Subgraphs movidos por Substreams -Para integrações do Substreams ou Firehose movidas ao StreamingFast, são inclusos: apoio básico a módulos do Substreams (por exemplo: transações, logs, e eventos de contrato inteligente decodificados); e ferramentas de geração de código do Substreams. Estas ferramentas permitem a habilidade de ativar [subgraphs movidos pelo Substreams](/substreams/sps/introduction/). Siga o [Passo-a-Passo](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) e execute `substreams codegen subgraph` para sentir um gostinho das ferramentas. +For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered Subgraphs](/substreams/sps/introduction/). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. From edbf1902483c20ec65e2b04cd60f7ada9e368fce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:15:14 -0500 Subject: [PATCH 0395/1789] New translations new-chain-integration.mdx (Russian) --- .../src/pages/ru/indexing/new-chain-integration.mdx | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/website/src/pages/ru/indexing/new-chain-integration.mdx b/website/src/pages/ru/indexing/new-chain-integration.mdx index 427169610d41..8b23af33ebd1 100644 --- a/website/src/pages/ru/indexing/new-chain-integration.mdx +++ b/website/src/pages/ru/indexing/new-chain-integration.mdx @@ -2,7 +2,7 @@ title: Интеграция новых чейнов --- -Чейны могут обеспечить поддержку субграфов в своей экосистеме, начав новую интеграцию `graph-node`. Субграфы — это мощный инструмент индексирования, открывающий перед разработчиками целый мир возможностей. Graph Node уже индексирует данные из перечисленных здесь чейнов. Если Вы заинтересованы в новой интеграции, для этого существуют 2 стратегии: +Chains can bring Subgraph support to their ecosystem by starting a new `graph-node` integration. Subgraphs are a powerful indexing tool opening a world of possibilities for developers. Graph Node already indexes data from the chains listed here. If you are interested in a new integration, there are 2 integration strategies: 1. **EVM JSON-RPC** 2. **Firehose**: все решения по интеграции Firehose включают Substreams, крупномасштабный механизм потоковой передачи на базе Firehose со встроенной поддержкой `graph-node`, позволяющий выполнять распараллеленные преобразования. @@ -47,15 +47,15 @@ title: Интеграция новых чейнов ## Рекомендации по EVM — разница между JSON-RPC & Firehose -Хотя как JSON-RPC, так и Firehose оба подходят для субграфов, Firehose всегда востребован разработчиками, желающими создавать с помощью [Substreams](https://substreams.streamingfast.io). Поддержка Substreams позволяет разработчикам создавать [субграфы на основе субпотоков](/subgraphs/cookbook/substreams-powered-subgraphs/) для нового чейна и потенциально может повысить производительность Ваших субграфов. Кроме того, Firehose — в качестве замены уровня извлечения JSON-RPC `graph-node` — сокращает на 90 % количество вызовов RPC, необходимых для общего индексирования. +While the JSON-RPC and Firehose are both suitable for Subgraphs, a Firehose is always required for developers wanting to build with [Substreams](https://substreams.streamingfast.io). Supporting Substreams allows developers to build [Substreams-powered Subgraphs](/subgraphs/cookbook/substreams-powered-subgraphs/) for the new chain, and has the potential to improve the performance of your Subgraphs. Additionally, Firehose — as a drop-in replacement for the JSON-RPC extraction layer of `graph-node` — reduces by 90% the number of RPC calls required for general indexing. -- Все эти вызовы `getLogs` и циклические передачи заменяются единым потоком, поступающим в сердце `graph-node`; единой блочной моделью для всех обрабатываемых ею субграфов. +- All those `getLogs` calls and roundtrips get replaced by a single stream arriving into the heart of `graph-node`; a single block model for all Subgraphs it processes. -> ПРИМЕЧАНИЕ: Интеграция на основе Firehose для чейнов EVM по-прежнему будет требовать от Индексаторов запуска ноды архива RPC чейна для правильного индексирования субрафов. Это происходит из-за неспособности Firehose предоставить состояние смарт-контракта, обычно доступное с помощью метода RPC `eth_call`. (Стоит напомнить, что `eth_calls` не является хорошей практикой для разработчиков) +> NOTE: A Firehose-based integration for EVM chains will still require Indexers to run the chain's archive RPC node to properly index Subgraphs. This is due to the Firehose's inability to provide smart contract state typically accessible by the `eth_call` RPC method. (It's worth reminding that `eth_calls` are not a good practice for developers) ## Конфигурация Graph Node -Настроить Graph Node так же просто, как подготовить локальную среду. После того, как Ваша локальная среда настроена, Вы можете протестировать интеграцию, локально развернув субграф. +Configuring Graph Node is as easy as preparing your local environment. Once your local environment is set, you can test the integration by locally deploying a Subgraph. 1. [Клонировать Graph Node](https://github.com/graphprotocol/graph-node) @@ -67,4 +67,4 @@ title: Интеграция новых чейнов ## Субграфы, работающие на основе субпотоков (Substreams) -Для интеграции Firehose/Substreams под управлением StreamingFast включена базовая поддержка фундаментальных модулей Substreams (например, декодированные транзакции, логи и события смарт-контрактов) и инструментов генерации кодов Substreams. Эти инструменты позволяют включать [субграфы на базе субпотоков](/substreams/sps/introduction/). Следуйте [Практическому руководству] (https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) и запустите `substreams codegen subgraph`, чтобы самостоятельно испробовать инструменты кодирования. +For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered Subgraphs](/substreams/sps/introduction/). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. From 371b244170a24552972a4e6d9e73b4682011c760 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:15:15 -0500 Subject: [PATCH 0396/1789] New translations new-chain-integration.mdx (Swedish) --- .../pages/sv/indexing/new-chain-integration.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/sv/indexing/new-chain-integration.mdx b/website/src/pages/sv/indexing/new-chain-integration.mdx index c33a501eb77f..504940f98a6b 100644 --- a/website/src/pages/sv/indexing/new-chain-integration.mdx +++ b/website/src/pages/sv/indexing/new-chain-integration.mdx @@ -2,7 +2,7 @@ title: New Chain Integration --- -Chains can bring subgraph support to their ecosystem by starting a new `graph-node` integration. Subgraphs are a powerful indexing tool opening a world of possibilities for developers. Graph Node already indexes data from the chains listed here. If you are interested in a new integration, there are 2 integration strategies: +Chains can bring Subgraph support to their ecosystem by starting a new `graph-node` integration. Subgraphs are a powerful indexing tool opening a world of possibilities for developers. Graph Node already indexes data from the chains listed here. If you are interested in a new integration, there are 2 integration strategies: 1. **EVM JSON-RPC** 2. **Firehose**: All Firehose integration solutions include Substreams, a large-scale streaming engine based off Firehose with native `graph-node` support, allowing for parallelized transforms. @@ -25,7 +25,7 @@ For Graph Node to be able to ingest data from an EVM chain, the RPC node must ex - `eth_getBlockByHash` - `net_version` - `eth_getTransactionReceipt`, i en JSON-RPC batch-begäran -- `trace_filter` *(limited tracing and optionally required for Graph Node)* +- `trace_filter` _(limited tracing and optionally required for Graph Node)_ ### 2. Firehose Integration @@ -47,15 +47,15 @@ For EVM chains, there exists a deeper level of data that can be achieved through ## EVM considerations - Difference between JSON-RPC & Firehose -While the JSON-RPC and Firehose are both suitable for subgraphs, a Firehose is always required for developers wanting to build with [Substreams](https://substreams.streamingfast.io). Supporting Substreams allows developers to build [Substreams-powered subgraphs](/subgraphs/cookbook/substreams-powered-subgraphs/) for the new chain, and has the potential to improve the performance of your subgraphs. Additionally, Firehose — as a drop-in replacement for the JSON-RPC extraction layer of `graph-node` — reduces by 90% the number of RPC calls required for general indexing. +While the JSON-RPC and Firehose are both suitable for Subgraphs, a Firehose is always required for developers wanting to build with [Substreams](https://substreams.streamingfast.io). Supporting Substreams allows developers to build [Substreams-powered Subgraphs](/subgraphs/cookbook/substreams-powered-subgraphs/) for the new chain, and has the potential to improve the performance of your Subgraphs. Additionally, Firehose — as a drop-in replacement for the JSON-RPC extraction layer of `graph-node` — reduces by 90% the number of RPC calls required for general indexing. -- All those `getLogs` calls and roundtrips get replaced by a single stream arriving into the heart of `graph-node`; a single block model for all subgraphs it processes. +- All those `getLogs` calls and roundtrips get replaced by a single stream arriving into the heart of `graph-node`; a single block model for all Subgraphs it processes. -> NOTE: A Firehose-based integration for EVM chains will still require Indexers to run the chain's archive RPC node to properly index subgraphs. This is due to the Firehose's inability to provide smart contract state typically accessible by the `eth_call` RPC method. (It's worth reminding that `eth_calls` are not a good practice for developers) +> NOTE: A Firehose-based integration for EVM chains will still require Indexers to run the chain's archive RPC node to properly index Subgraphs. This is due to the Firehose's inability to provide smart contract state typically accessible by the `eth_call` RPC method. (It's worth reminding that `eth_calls` are not a good practice for developers) ## Graf Node-konfiguration -Configuring Graph Node is as easy as preparing your local environment. Once your local environment is set, you can test the integration by locally deploying a subgraph. +Configuring Graph Node is as easy as preparing your local environment. Once your local environment is set, you can test the integration by locally deploying a Subgraph. 1. [Klona Graf Node](https://github.com/graphprotocol/graph-node) @@ -67,4 +67,4 @@ Configuring Graph Node is as easy as preparing your local environment. Once your ## Substreams-powered Subgraphs -For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered subgraphs](/substreams/sps/introduction/). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. +For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered Subgraphs](/substreams/sps/introduction/). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. From 07d46528df9e0985908fe3b394bb7c46cf16f85e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:15:16 -0500 Subject: [PATCH 0397/1789] New translations new-chain-integration.mdx (Turkish) --- .../pages/tr/indexing/new-chain-integration.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/tr/indexing/new-chain-integration.mdx b/website/src/pages/tr/indexing/new-chain-integration.mdx index 5eb41f1d922d..8bd9b8b5b773 100644 --- a/website/src/pages/tr/indexing/new-chain-integration.mdx +++ b/website/src/pages/tr/indexing/new-chain-integration.mdx @@ -2,7 +2,7 @@ title: New Chain Integration --- -Chains can bring subgraph support to their ecosystem by starting a new `graph-node` integration. Subgraphs are a powerful indexing tool opening a world of possibilities for developers. Graph Node already indexes data from the chains listed here. If you are interested in a new integration, there are 2 integration strategies: +Chains can bring Subgraph support to their ecosystem by starting a new `graph-node` integration. Subgraphs are a powerful indexing tool opening a world of possibilities for developers. Graph Node already indexes data from the chains listed here. If you are interested in a new integration, there are 2 integration strategies: 1. **EVM JSON-RPC** 2. **Firehose**: All Firehose integration solutions include Substreams, a large-scale streaming engine based off Firehose with native `graph-node` support, allowing for parallelized transforms. @@ -25,7 +25,7 @@ For Graph Node to be able to ingest data from an EVM chain, the RPC node must ex - `eth_getBlockByHash` - `net_version` - `eth_getTransactionReceipt`, bir JSON-RPC toplu talebinde -- `trace_filter` *(limited tracing and optionally required for Graph Node)* +- `trace_filter` _(limited tracing and optionally required for Graph Node)_ ### 2. Firehose Integration @@ -47,15 +47,15 @@ For EVM chains, there exists a deeper level of data that can be achieved through ## EVM considerations - Difference between JSON-RPC & Firehose -While the JSON-RPC and Firehose are both suitable for subgraphs, a Firehose is always required for developers wanting to build with [Substreams](https://substreams.streamingfast.io). Supporting Substreams allows developers to build [Substreams-powered subgraphs](/subgraphs/cookbook/substreams-powered-subgraphs/) for the new chain, and has the potential to improve the performance of your subgraphs. Additionally, Firehose — as a drop-in replacement for the JSON-RPC extraction layer of `graph-node` — reduces by 90% the number of RPC calls required for general indexing. +While the JSON-RPC and Firehose are both suitable for Subgraphs, a Firehose is always required for developers wanting to build with [Substreams](https://substreams.streamingfast.io). Supporting Substreams allows developers to build [Substreams-powered Subgraphs](/subgraphs/cookbook/substreams-powered-subgraphs/) for the new chain, and has the potential to improve the performance of your Subgraphs. Additionally, Firehose — as a drop-in replacement for the JSON-RPC extraction layer of `graph-node` — reduces by 90% the number of RPC calls required for general indexing. -- All those `getLogs` calls and roundtrips get replaced by a single stream arriving into the heart of `graph-node`; a single block model for all subgraphs it processes. +- All those `getLogs` calls and roundtrips get replaced by a single stream arriving into the heart of `graph-node`; a single block model for all Subgraphs it processes. -> NOTE: A Firehose-based integration for EVM chains will still require Indexers to run the chain's archive RPC node to properly index subgraphs. This is due to the Firehose's inability to provide smart contract state typically accessible by the `eth_call` RPC method. (It's worth reminding that `eth_calls` are not a good practice for developers) +> NOTE: A Firehose-based integration for EVM chains will still require Indexers to run the chain's archive RPC node to properly index Subgraphs. This is due to the Firehose's inability to provide smart contract state typically accessible by the `eth_call` RPC method. (It's worth reminding that `eth_calls` are not a good practice for developers) ## Graph Node Configuration -Configuring Graph Node is as easy as preparing your local environment. Once your local environment is set, you can test the integration by locally deploying a subgraph. +Configuring Graph Node is as easy as preparing your local environment. Once your local environment is set, you can test the integration by locally deploying a Subgraph. 1. [Graph Düğümü'nü Klonlayın](https://github.com/graphprotocol/graph-node) @@ -67,4 +67,4 @@ Configuring Graph Node is as easy as preparing your local environment. Once your ## Substreams-powered Subgraphs -For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered subgraphs](/substreams/sps/introduction/). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. +For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered Subgraphs](/substreams/sps/introduction/). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. From 2ee4ba581ec7220e52c8a04714475ef7ebf66c66 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:15:17 -0500 Subject: [PATCH 0398/1789] New translations new-chain-integration.mdx (Ukrainian) --- .../pages/uk/indexing/new-chain-integration.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/uk/indexing/new-chain-integration.mdx b/website/src/pages/uk/indexing/new-chain-integration.mdx index e45c4b411010..c401fa57b348 100644 --- a/website/src/pages/uk/indexing/new-chain-integration.mdx +++ b/website/src/pages/uk/indexing/new-chain-integration.mdx @@ -2,7 +2,7 @@ title: New Chain Integration --- -Chains can bring subgraph support to their ecosystem by starting a new `graph-node` integration. Subgraphs are a powerful indexing tool opening a world of possibilities for developers. Graph Node already indexes data from the chains listed here. If you are interested in a new integration, there are 2 integration strategies: +Chains can bring Subgraph support to their ecosystem by starting a new `graph-node` integration. Subgraphs are a powerful indexing tool opening a world of possibilities for developers. Graph Node already indexes data from the chains listed here. If you are interested in a new integration, there are 2 integration strategies: 1. **EVM JSON-RPC** 2. **Firehose**: All Firehose integration solutions include Substreams, a large-scale streaming engine based off Firehose with native `graph-node` support, allowing for parallelized transforms. @@ -25,7 +25,7 @@ For Graph Node to be able to ingest data from an EVM chain, the RPC node must ex - `eth_getBlockByHash` - `net_version` - `eth_getTransactionReceipt`, in a JSON-RPC batch request -- `trace_filter` *(limited tracing and optionally required for Graph Node)* +- `trace_filter` _(limited tracing and optionally required for Graph Node)_ ### 2. Firehose Integration @@ -47,15 +47,15 @@ For EVM chains, there exists a deeper level of data that can be achieved through ## EVM considerations - Difference between JSON-RPC & Firehose -While the JSON-RPC and Firehose are both suitable for subgraphs, a Firehose is always required for developers wanting to build with [Substreams](https://substreams.streamingfast.io). Supporting Substreams allows developers to build [Substreams-powered subgraphs](/subgraphs/cookbook/substreams-powered-subgraphs/) for the new chain, and has the potential to improve the performance of your subgraphs. Additionally, Firehose — as a drop-in replacement for the JSON-RPC extraction layer of `graph-node` — reduces by 90% the number of RPC calls required for general indexing. +While the JSON-RPC and Firehose are both suitable for Subgraphs, a Firehose is always required for developers wanting to build with [Substreams](https://substreams.streamingfast.io). Supporting Substreams allows developers to build [Substreams-powered Subgraphs](/subgraphs/cookbook/substreams-powered-subgraphs/) for the new chain, and has the potential to improve the performance of your Subgraphs. Additionally, Firehose — as a drop-in replacement for the JSON-RPC extraction layer of `graph-node` — reduces by 90% the number of RPC calls required for general indexing. -- All those `getLogs` calls and roundtrips get replaced by a single stream arriving into the heart of `graph-node`; a single block model for all subgraphs it processes. +- All those `getLogs` calls and roundtrips get replaced by a single stream arriving into the heart of `graph-node`; a single block model for all Subgraphs it processes. -> NOTE: A Firehose-based integration for EVM chains will still require Indexers to run the chain's archive RPC node to properly index subgraphs. This is due to the Firehose's inability to provide smart contract state typically accessible by the `eth_call` RPC method. (It's worth reminding that `eth_calls` are not a good practice for developers) +> NOTE: A Firehose-based integration for EVM chains will still require Indexers to run the chain's archive RPC node to properly index Subgraphs. This is due to the Firehose's inability to provide smart contract state typically accessible by the `eth_call` RPC method. (It's worth reminding that `eth_calls` are not a good practice for developers) ## Graph Node Configuration -Configuring Graph Node is as easy as preparing your local environment. Once your local environment is set, you can test the integration by locally deploying a subgraph. +Configuring Graph Node is as easy as preparing your local environment. Once your local environment is set, you can test the integration by locally deploying a Subgraph. 1. [Clone Graph Node](https://github.com/graphprotocol/graph-node) @@ -67,4 +67,4 @@ Configuring Graph Node is as easy as preparing your local environment. Once your ## Substreams-powered Subgraphs -For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered subgraphs](/substreams/sps/introduction/). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. +For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered Subgraphs](/substreams/sps/introduction/). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. From 90b7a817fd49c60a2be6dbbef8a02e6ad49a5e2a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:15:18 -0500 Subject: [PATCH 0399/1789] New translations new-chain-integration.mdx (Chinese Simplified) --- .../pages/zh/indexing/new-chain-integration.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/zh/indexing/new-chain-integration.mdx b/website/src/pages/zh/indexing/new-chain-integration.mdx index cb717c36d646..cb0348d01a6e 100644 --- a/website/src/pages/zh/indexing/new-chain-integration.mdx +++ b/website/src/pages/zh/indexing/new-chain-integration.mdx @@ -2,7 +2,7 @@ title: New Chain Integration --- -Chains can bring subgraph support to their ecosystem by starting a new `graph-node` integration. Subgraphs are a powerful indexing tool opening a world of possibilities for developers. Graph Node already indexes data from the chains listed here. If you are interested in a new integration, there are 2 integration strategies: +Chains can bring Subgraph support to their ecosystem by starting a new `graph-node` integration. Subgraphs are a powerful indexing tool opening a world of possibilities for developers. Graph Node already indexes data from the chains listed here. If you are interested in a new integration, there are 2 integration strategies: 1. **EVM JSON-RPC** 2. **Firehose**: All Firehose integration solutions include Substreams, a large-scale streaming engine based off Firehose with native `graph-node` support, allowing for parallelized transforms. @@ -25,7 +25,7 @@ For Graph Node to be able to ingest data from an EVM chain, the RPC node must ex - `eth_getBlockByHash` - `net_version` - `eth_getTransactionReceipt`, in a JSON-RPC batch request -- `trace_filter` *(limited tracing and optionally required for Graph Node)* +- `trace_filter` _(limited tracing and optionally required for Graph Node)_ ### 2. Firehose Integration @@ -47,15 +47,15 @@ For EVM chains, there exists a deeper level of data that can be achieved through ## EVM considerations - Difference between JSON-RPC & Firehose -While the JSON-RPC and Firehose are both suitable for subgraphs, a Firehose is always required for developers wanting to build with [Substreams](https://substreams.streamingfast.io). Supporting Substreams allows developers to build [Substreams-powered subgraphs](/subgraphs/cookbook/substreams-powered-subgraphs/) for the new chain, and has the potential to improve the performance of your subgraphs. Additionally, Firehose — as a drop-in replacement for the JSON-RPC extraction layer of `graph-node` — reduces by 90% the number of RPC calls required for general indexing. +While the JSON-RPC and Firehose are both suitable for Subgraphs, a Firehose is always required for developers wanting to build with [Substreams](https://substreams.streamingfast.io). Supporting Substreams allows developers to build [Substreams-powered Subgraphs](/subgraphs/cookbook/substreams-powered-subgraphs/) for the new chain, and has the potential to improve the performance of your Subgraphs. Additionally, Firehose — as a drop-in replacement for the JSON-RPC extraction layer of `graph-node` — reduces by 90% the number of RPC calls required for general indexing. -- All those `getLogs` calls and roundtrips get replaced by a single stream arriving into the heart of `graph-node`; a single block model for all subgraphs it processes. +- All those `getLogs` calls and roundtrips get replaced by a single stream arriving into the heart of `graph-node`; a single block model for all Subgraphs it processes. -> NOTE: A Firehose-based integration for EVM chains will still require Indexers to run the chain's archive RPC node to properly index subgraphs. This is due to the Firehose's inability to provide smart contract state typically accessible by the `eth_call` RPC method. (It's worth reminding that `eth_calls` are not a good practice for developers) +> NOTE: A Firehose-based integration for EVM chains will still require Indexers to run the chain's archive RPC node to properly index Subgraphs. This is due to the Firehose's inability to provide smart contract state typically accessible by the `eth_call` RPC method. (It's worth reminding that `eth_calls` are not a good practice for developers) ## Graph节点配置 -Configuring Graph Node is as easy as preparing your local environment. Once your local environment is set, you can test the integration by locally deploying a subgraph. +Configuring Graph Node is as easy as preparing your local environment. Once your local environment is set, you can test the integration by locally deploying a Subgraph. 1. [克隆Graph节点](https://github.com/graphprotocol/graph-node) @@ -67,4 +67,4 @@ Configuring Graph Node is as easy as preparing your local environment. Once your ## Substreams驱动的子图 -For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered subgraphs](/substreams/sps/introduction/). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. +For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered Subgraphs](/substreams/sps/introduction/). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. From 780a4af19a40b104a4c1c799c8081f0b5454bf93 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:15:19 -0500 Subject: [PATCH 0400/1789] New translations new-chain-integration.mdx (Urdu (Pakistan)) --- .../pages/ur/indexing/new-chain-integration.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/ur/indexing/new-chain-integration.mdx b/website/src/pages/ur/indexing/new-chain-integration.mdx index fc630546433a..e4d231c67e18 100644 --- a/website/src/pages/ur/indexing/new-chain-integration.mdx +++ b/website/src/pages/ur/indexing/new-chain-integration.mdx @@ -2,7 +2,7 @@ title: New Chain Integration --- -Chains can bring subgraph support to their ecosystem by starting a new `graph-node` integration. Subgraphs are a powerful indexing tool opening a world of possibilities for developers. Graph Node already indexes data from the chains listed here. If you are interested in a new integration, there are 2 integration strategies: +Chains can bring Subgraph support to their ecosystem by starting a new `graph-node` integration. Subgraphs are a powerful indexing tool opening a world of possibilities for developers. Graph Node already indexes data from the chains listed here. If you are interested in a new integration, there are 2 integration strategies: 1. **EVM JSON-RPC** 2. **Firehose**: All Firehose integration solutions include Substreams, a large-scale streaming engine based off Firehose with native `graph-node` support, allowing for parallelized transforms. @@ -25,7 +25,7 @@ For Graph Node to be able to ingest data from an EVM chain, the RPC node must ex - `eth_getBlockByHash` - `net_version` - `eth_getTransactionReceipt`, in a JSON-RPC batch request -- `trace_filter` *(limited tracing and optionally required for Graph Node)* +- `trace_filter` _(limited tracing and optionally required for Graph Node)_ ### 2. Firehose Integration @@ -47,15 +47,15 @@ For EVM chains, there exists a deeper level of data that can be achieved through ## EVM considerations - Difference between JSON-RPC & Firehose -While the JSON-RPC and Firehose are both suitable for subgraphs, a Firehose is always required for developers wanting to build with [Substreams](https://substreams.streamingfast.io). Supporting Substreams allows developers to build [Substreams-powered subgraphs](/subgraphs/cookbook/substreams-powered-subgraphs/) for the new chain, and has the potential to improve the performance of your subgraphs. Additionally, Firehose — as a drop-in replacement for the JSON-RPC extraction layer of `graph-node` — reduces by 90% the number of RPC calls required for general indexing. +While the JSON-RPC and Firehose are both suitable for Subgraphs, a Firehose is always required for developers wanting to build with [Substreams](https://substreams.streamingfast.io). Supporting Substreams allows developers to build [Substreams-powered Subgraphs](/subgraphs/cookbook/substreams-powered-subgraphs/) for the new chain, and has the potential to improve the performance of your Subgraphs. Additionally, Firehose — as a drop-in replacement for the JSON-RPC extraction layer of `graph-node` — reduces by 90% the number of RPC calls required for general indexing. -- All those `getLogs` calls and roundtrips get replaced by a single stream arriving into the heart of `graph-node`; a single block model for all subgraphs it processes. +- All those `getLogs` calls and roundtrips get replaced by a single stream arriving into the heart of `graph-node`; a single block model for all Subgraphs it processes. -> NOTE: A Firehose-based integration for EVM chains will still require Indexers to run the chain's archive RPC node to properly index subgraphs. This is due to the Firehose's inability to provide smart contract state typically accessible by the `eth_call` RPC method. (It's worth reminding that `eth_calls` are not a good practice for developers) +> NOTE: A Firehose-based integration for EVM chains will still require Indexers to run the chain's archive RPC node to properly index Subgraphs. This is due to the Firehose's inability to provide smart contract state typically accessible by the `eth_call` RPC method. (It's worth reminding that `eth_calls` are not a good practice for developers) ## گراف نوڈ کنفگریشن -Configuring Graph Node is as easy as preparing your local environment. Once your local environment is set, you can test the integration by locally deploying a subgraph. +Configuring Graph Node is as easy as preparing your local environment. Once your local environment is set, you can test the integration by locally deploying a Subgraph. 1. [گراف نوڈ کی نقل بنائیں](https://github.com/graphprotocol/graph-node) @@ -67,4 +67,4 @@ Configuring Graph Node is as easy as preparing your local environment. Once your ## Substreams-powered Subgraphs -For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered subgraphs](/substreams/sps/introduction/). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. +For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered Subgraphs](/substreams/sps/introduction/). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. From a8cb8bff7d0aaa3dde1a5b85362463c0e84ddb12 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:15:20 -0500 Subject: [PATCH 0401/1789] New translations new-chain-integration.mdx (Vietnamese) --- .../pages/vi/indexing/new-chain-integration.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/vi/indexing/new-chain-integration.mdx b/website/src/pages/vi/indexing/new-chain-integration.mdx index e45c4b411010..c401fa57b348 100644 --- a/website/src/pages/vi/indexing/new-chain-integration.mdx +++ b/website/src/pages/vi/indexing/new-chain-integration.mdx @@ -2,7 +2,7 @@ title: New Chain Integration --- -Chains can bring subgraph support to their ecosystem by starting a new `graph-node` integration. Subgraphs are a powerful indexing tool opening a world of possibilities for developers. Graph Node already indexes data from the chains listed here. If you are interested in a new integration, there are 2 integration strategies: +Chains can bring Subgraph support to their ecosystem by starting a new `graph-node` integration. Subgraphs are a powerful indexing tool opening a world of possibilities for developers. Graph Node already indexes data from the chains listed here. If you are interested in a new integration, there are 2 integration strategies: 1. **EVM JSON-RPC** 2. **Firehose**: All Firehose integration solutions include Substreams, a large-scale streaming engine based off Firehose with native `graph-node` support, allowing for parallelized transforms. @@ -25,7 +25,7 @@ For Graph Node to be able to ingest data from an EVM chain, the RPC node must ex - `eth_getBlockByHash` - `net_version` - `eth_getTransactionReceipt`, in a JSON-RPC batch request -- `trace_filter` *(limited tracing and optionally required for Graph Node)* +- `trace_filter` _(limited tracing and optionally required for Graph Node)_ ### 2. Firehose Integration @@ -47,15 +47,15 @@ For EVM chains, there exists a deeper level of data that can be achieved through ## EVM considerations - Difference between JSON-RPC & Firehose -While the JSON-RPC and Firehose are both suitable for subgraphs, a Firehose is always required for developers wanting to build with [Substreams](https://substreams.streamingfast.io). Supporting Substreams allows developers to build [Substreams-powered subgraphs](/subgraphs/cookbook/substreams-powered-subgraphs/) for the new chain, and has the potential to improve the performance of your subgraphs. Additionally, Firehose — as a drop-in replacement for the JSON-RPC extraction layer of `graph-node` — reduces by 90% the number of RPC calls required for general indexing. +While the JSON-RPC and Firehose are both suitable for Subgraphs, a Firehose is always required for developers wanting to build with [Substreams](https://substreams.streamingfast.io). Supporting Substreams allows developers to build [Substreams-powered Subgraphs](/subgraphs/cookbook/substreams-powered-subgraphs/) for the new chain, and has the potential to improve the performance of your Subgraphs. Additionally, Firehose — as a drop-in replacement for the JSON-RPC extraction layer of `graph-node` — reduces by 90% the number of RPC calls required for general indexing. -- All those `getLogs` calls and roundtrips get replaced by a single stream arriving into the heart of `graph-node`; a single block model for all subgraphs it processes. +- All those `getLogs` calls and roundtrips get replaced by a single stream arriving into the heart of `graph-node`; a single block model for all Subgraphs it processes. -> NOTE: A Firehose-based integration for EVM chains will still require Indexers to run the chain's archive RPC node to properly index subgraphs. This is due to the Firehose's inability to provide smart contract state typically accessible by the `eth_call` RPC method. (It's worth reminding that `eth_calls` are not a good practice for developers) +> NOTE: A Firehose-based integration for EVM chains will still require Indexers to run the chain's archive RPC node to properly index Subgraphs. This is due to the Firehose's inability to provide smart contract state typically accessible by the `eth_call` RPC method. (It's worth reminding that `eth_calls` are not a good practice for developers) ## Graph Node Configuration -Configuring Graph Node is as easy as preparing your local environment. Once your local environment is set, you can test the integration by locally deploying a subgraph. +Configuring Graph Node is as easy as preparing your local environment. Once your local environment is set, you can test the integration by locally deploying a Subgraph. 1. [Clone Graph Node](https://github.com/graphprotocol/graph-node) @@ -67,4 +67,4 @@ Configuring Graph Node is as easy as preparing your local environment. Once your ## Substreams-powered Subgraphs -For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered subgraphs](/substreams/sps/introduction/). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. +For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered Subgraphs](/substreams/sps/introduction/). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. From ad14c93e6714c61cff75f47cb4fa09c6ed0a25d4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:15:21 -0500 Subject: [PATCH 0402/1789] New translations new-chain-integration.mdx (Marathi) --- .../pages/mr/indexing/new-chain-integration.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/mr/indexing/new-chain-integration.mdx b/website/src/pages/mr/indexing/new-chain-integration.mdx index e45c4b411010..c401fa57b348 100644 --- a/website/src/pages/mr/indexing/new-chain-integration.mdx +++ b/website/src/pages/mr/indexing/new-chain-integration.mdx @@ -2,7 +2,7 @@ title: New Chain Integration --- -Chains can bring subgraph support to their ecosystem by starting a new `graph-node` integration. Subgraphs are a powerful indexing tool opening a world of possibilities for developers. Graph Node already indexes data from the chains listed here. If you are interested in a new integration, there are 2 integration strategies: +Chains can bring Subgraph support to their ecosystem by starting a new `graph-node` integration. Subgraphs are a powerful indexing tool opening a world of possibilities for developers. Graph Node already indexes data from the chains listed here. If you are interested in a new integration, there are 2 integration strategies: 1. **EVM JSON-RPC** 2. **Firehose**: All Firehose integration solutions include Substreams, a large-scale streaming engine based off Firehose with native `graph-node` support, allowing for parallelized transforms. @@ -25,7 +25,7 @@ For Graph Node to be able to ingest data from an EVM chain, the RPC node must ex - `eth_getBlockByHash` - `net_version` - `eth_getTransactionReceipt`, in a JSON-RPC batch request -- `trace_filter` *(limited tracing and optionally required for Graph Node)* +- `trace_filter` _(limited tracing and optionally required for Graph Node)_ ### 2. Firehose Integration @@ -47,15 +47,15 @@ For EVM chains, there exists a deeper level of data that can be achieved through ## EVM considerations - Difference between JSON-RPC & Firehose -While the JSON-RPC and Firehose are both suitable for subgraphs, a Firehose is always required for developers wanting to build with [Substreams](https://substreams.streamingfast.io). Supporting Substreams allows developers to build [Substreams-powered subgraphs](/subgraphs/cookbook/substreams-powered-subgraphs/) for the new chain, and has the potential to improve the performance of your subgraphs. Additionally, Firehose — as a drop-in replacement for the JSON-RPC extraction layer of `graph-node` — reduces by 90% the number of RPC calls required for general indexing. +While the JSON-RPC and Firehose are both suitable for Subgraphs, a Firehose is always required for developers wanting to build with [Substreams](https://substreams.streamingfast.io). Supporting Substreams allows developers to build [Substreams-powered Subgraphs](/subgraphs/cookbook/substreams-powered-subgraphs/) for the new chain, and has the potential to improve the performance of your Subgraphs. Additionally, Firehose — as a drop-in replacement for the JSON-RPC extraction layer of `graph-node` — reduces by 90% the number of RPC calls required for general indexing. -- All those `getLogs` calls and roundtrips get replaced by a single stream arriving into the heart of `graph-node`; a single block model for all subgraphs it processes. +- All those `getLogs` calls and roundtrips get replaced by a single stream arriving into the heart of `graph-node`; a single block model for all Subgraphs it processes. -> NOTE: A Firehose-based integration for EVM chains will still require Indexers to run the chain's archive RPC node to properly index subgraphs. This is due to the Firehose's inability to provide smart contract state typically accessible by the `eth_call` RPC method. (It's worth reminding that `eth_calls` are not a good practice for developers) +> NOTE: A Firehose-based integration for EVM chains will still require Indexers to run the chain's archive RPC node to properly index Subgraphs. This is due to the Firehose's inability to provide smart contract state typically accessible by the `eth_call` RPC method. (It's worth reminding that `eth_calls` are not a good practice for developers) ## Graph Node Configuration -Configuring Graph Node is as easy as preparing your local environment. Once your local environment is set, you can test the integration by locally deploying a subgraph. +Configuring Graph Node is as easy as preparing your local environment. Once your local environment is set, you can test the integration by locally deploying a Subgraph. 1. [Clone Graph Node](https://github.com/graphprotocol/graph-node) @@ -67,4 +67,4 @@ Configuring Graph Node is as easy as preparing your local environment. Once your ## Substreams-powered Subgraphs -For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered subgraphs](/substreams/sps/introduction/). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. +For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered Subgraphs](/substreams/sps/introduction/). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. From 64243f37a608739a36c1c9a9eea67dd010857555 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:15:22 -0500 Subject: [PATCH 0403/1789] New translations new-chain-integration.mdx (Hindi) --- .../pages/hi/indexing/new-chain-integration.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/hi/indexing/new-chain-integration.mdx b/website/src/pages/hi/indexing/new-chain-integration.mdx index 0cb393914982..15be8aeb883d 100644 --- a/website/src/pages/hi/indexing/new-chain-integration.mdx +++ b/website/src/pages/hi/indexing/new-chain-integration.mdx @@ -2,7 +2,7 @@ title: नई श्रृंखला एकीकरण --- -चेन अपने पारिस्थितिकी तंत्र में सबग्राफ़ समर्थन लाने के लिए एक नया `graph-node` एकीकरण शुरू कर सकती हैं। सबग्राफ़ एक शक्तिशाली इंडेक्सिंग उपकरण हैं, जो डेवलपर्स के लिए संभावनाओं की एक नई दुनिया खोलते हैं। ग्राफ़ नोड पहले से ही यहाँ सूचीबद्ध चेन से डेटा को इंडेक्स करता है। यदि आप नए एकीकरण में रुचि रखते हैं, तो दो एकीकरण रणनीतियाँ हैं: +Chains can bring Subgraph support to their ecosystem by starting a new `graph-node` integration. Subgraphs are a powerful indexing tool opening a world of possibilities for developers. Graph Node already indexes data from the chains listed here. If you are interested in a new integration, there are 2 integration strategies: 1. EVM JSON-RPC 2. Firehose: सभी Firehose एकीकरण समाधान में Substreams शामिल हैं, जो Firehose पर आधारित एक बड़े पैमाने पर स्ट्रीमिंग इंजन है, जिसमें स्वदेशी `graph-node` समर्थन है, जो समानांतर रूपांतरण की अनुमति देता है। @@ -25,7 +25,7 @@ Graph Node को EVM चेन से डेटा इन्गेस्ट क - `eth_getBlockByHash` - `net_version` - `eth_getTransactionReceipt`, in a JSON-RPC batch request -- `trace_filter` *(सीमित ट्रेसिंग और विकल्पतः Graph Node के लिए आवश्यक)* +- `trace_filter` _(सीमित ट्रेसिंग और विकल्पतः Graph Node के लिए आवश्यक)_ ### 2. Firehose एकीकरण @@ -47,15 +47,15 @@ EVM चेन के लिए, एक गहरे स्तर के डे ## EVM विचार - JSON-RPC और Firehose के बीच का अंतर -JSON-RPC और Firehose दोनों ही सबग्राफ के लिए उपयुक्त हैं, लेकिन एक Firehose हमेशा आवश्यक होता है यदि डेवलपर्स [सबस्ट्रीम](https://substreams.streamingfast.io) के साथ निर्माण करना चाहते हैं। सबस्ट्रीम का समर्थन करने से डेवलपर्स को नए chain के लिए [सबस्ट्रीम-powered सबग्राफ](/subgraphs/cookbook/substreams-powered-subgraphs/) बनाने की अनुमति मिलती है, और इसके परिणामस्वरूप आपके सबग्राफ की प्रदर्शन क्षमता में सुधार हो सकता है। इसके अतिरिक्त, Firehose — जो कि `ग्राफ-नोड` के JSON-RPC extraction layer का एक drop-in replacement है — सामान्य indexing के लिए आवश्यक RPC कॉल्स की संख्या को 90% तक घटा देता है। +While the JSON-RPC and Firehose are both suitable for Subgraphs, a Firehose is always required for developers wanting to build with [Substreams](https://substreams.streamingfast.io). Supporting Substreams allows developers to build [Substreams-powered Subgraphs](/subgraphs/cookbook/substreams-powered-subgraphs/) for the new chain, and has the potential to improve the performance of your Subgraphs. Additionally, Firehose — as a drop-in replacement for the JSON-RPC extraction layer of `graph-node` — reduces by 90% the number of RPC calls required for general indexing. -- सभी `getLogs` कॉल्स और राउंडट्रिप्स को एकल स्ट्रीम द्वारा प्रतिस्थापित किया जाता है, जो सीधे `graph-node` के केंद्र में पहुंचती है; यह एकल ब्लॉक मॉडल सभी सबग्राफ्स के लिए काम करता है जिन्हें यह प्रोसेस करता है। +- All those `getLogs` calls and roundtrips get replaced by a single stream arriving into the heart of `graph-node`; a single block model for all Subgraphs it processes. -> **NOTE**: EVM chains के लिए Firehose-based integration के लिए अभी भी Indexers को chain के संग्रह RPC node को subgraph को ठीक से index करने के लिए चलाने की आवश्यकता होगी। यह `eth_call` RPC विधि द्वारा आम तौर पर पहुंच योग्य smart contract स्थिति प्रदान करने में Firehosesकी असमर्थता के कारण है। (It's worth reminding that eth_calls are [not a good practice for developers](/)) +> NOTE: A Firehose-based integration for EVM chains will still require Indexers to run the chain's archive RPC node to properly index Subgraphs. This is due to the Firehose's inability to provide smart contract state typically accessible by the `eth_call` RPC method. (It's worth reminding that `eth_calls` are not a good practice for developers) ## Graph Node Configuration -ग्राफ नोड को कॉन्फ़िगर करना आपके स्थानीय वातावरण को तैयार करने के समान आसान है। एक बार जब आपका स्थानीय वातावरण सेट हो जाता है, तो आप एक उपग्राफ को स्थानीय रूप से डिप्लॉय करके एकीकरण का परीक्षण कर सकते हैं। +Configuring Graph Node is as easy as preparing your local environment. Once your local environment is set, you can test the integration by locally deploying a Subgraph. 1. [Clone Graph Node](https://github.com/graphprotocol/graph-node) @@ -67,4 +67,4 @@ JSON-RPC और Firehose दोनों ही सबग्राफ के ल ## सबस्ट्रीम-संचालित सबग्राफ की सेवा -StreamingFast द्वारा संचालित Firehose/सबस्ट्रीम इंटीग्रेशन के लिए, बुनियादी सबस्ट्रीम मॉड्यूल (जैसे डिकोड किए गए लेनदेन, log और स्मार्ट-contract आयोजन) और सबस्ट्रीम कोडजेन टूल्स का बेसिक सपोर्ट शामिल है। ये टूल्स [सबस्ट्रीम-powered सबग्राफ](/substreams/sps/introduction/) को सक्षम बनाने की क्षमता प्रदान करते हैं। [ मार्गदर्शक](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) का अनुसरण करें और `सबस्ट्रीम codegen सबग्राफ` चलाकर कोडजेन टूल्स का अनुभव लें। +For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered Subgraphs](/substreams/sps/introduction/). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. From 1967ee95152f6d16e8ca0fcefa86df665ab76086 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:15:23 -0500 Subject: [PATCH 0404/1789] New translations new-chain-integration.mdx (Swahili) --- .../sw/indexing/new-chain-integration.mdx | 70 +++++++++++++++++++ 1 file changed, 70 insertions(+) create mode 100644 website/src/pages/sw/indexing/new-chain-integration.mdx diff --git a/website/src/pages/sw/indexing/new-chain-integration.mdx b/website/src/pages/sw/indexing/new-chain-integration.mdx new file mode 100644 index 000000000000..c401fa57b348 --- /dev/null +++ b/website/src/pages/sw/indexing/new-chain-integration.mdx @@ -0,0 +1,70 @@ +--- +title: New Chain Integration +--- + +Chains can bring Subgraph support to their ecosystem by starting a new `graph-node` integration. Subgraphs are a powerful indexing tool opening a world of possibilities for developers. Graph Node already indexes data from the chains listed here. If you are interested in a new integration, there are 2 integration strategies: + +1. **EVM JSON-RPC** +2. **Firehose**: All Firehose integration solutions include Substreams, a large-scale streaming engine based off Firehose with native `graph-node` support, allowing for parallelized transforms. + +> Note that while the recommended approach is to develop a new Firehose for all new chains, it is only required for non-EVM chains. + +## Integration Strategies + +### 1. EVM JSON-RPC + +If the blockchain is EVM equivalent and the client/node exposes the standard EVM JSON-RPC API, Graph Node should be able to index the new chain. + +#### Testing an EVM JSON-RPC + +For Graph Node to be able to ingest data from an EVM chain, the RPC node must expose the following EVM JSON-RPC methods: + +- `eth_getLogs` +- `eth_call` (for historical blocks, with EIP-1898 - requires archive node) +- `eth_getBlockByNumber` +- `eth_getBlockByHash` +- `net_version` +- `eth_getTransactionReceipt`, in a JSON-RPC batch request +- `trace_filter` _(limited tracing and optionally required for Graph Node)_ + +### 2. Firehose Integration + +[Firehose](https://firehose.streamingfast.io/firehose-setup/overview) is a next-generation extraction layer. It collects history in flat files and streams in real time. Firehose technology replaces those polling API calls with a stream of data utilizing a push model that sends data to the indexing node faster. This helps increase the speed of syncing and indexing. + +> NOTE: All integrations done by the StreamingFast team include maintenance for the Firehose replication protocol into the chain's codebase. StreamingFast tracks any changes and releases binaries when you change code and when StreamingFast changes code. This includes releasing Firehose/Substreams binaries for the protocol, maintaining Substreams modules for the block model of the chain, and releasing binaries for the blockchain node with instrumentation if need be. + +#### Integration for Non-EVM chains + +The primary method to integrate the Firehose into chains is to use an RPC polling strategy. Our polling algorithm will predict when a new block will arrive and increase the rate at which it checks for a new block near that time, making it a very low-latency and efficient solution. For help with the integration and maintenance of the Firehose, contact the [StreamingFast team](https://www.streamingfast.io/firehose-integration-program). New chains and their integrators will appreciate the [fork awareness](https://substreams.streamingfast.io/documentation/consume/reliability-guarantees) and massive parallelized indexing capabilities that Firehose and Substreams bring to their ecosystem. + +#### Specific Instrumentation for EVM (`geth`) chains + +For EVM chains, there exists a deeper level of data that can be achieved through the `geth` [live-tracer](https://github.com/ethereum/go-ethereum/releases/tag/v1.14.0), a collaboration between Go-Ethereum and StreamingFast, in building a high-throughput and rich transaction tracing system. The Live Tracer is the most comprehensive solution, resulting in [Extended](https://streamingfastio.medium.com/new-block-model-to-accelerate-chain-integration-9f65126e5425) block details. This enables new indexing paradigms, like pattern matching of events based on state changes, calls, parent call trees, or triggering of events based on changes to the actual variables in a smart contract. + +![Base block vs Extended block](/img/extended-vs-base-substreams-blocks.png) + +> NOTE: This improvement upon the Firehose requires chains make use of the EVM engine `geth version 1.13.0` and up. + +## EVM considerations - Difference between JSON-RPC & Firehose + +While the JSON-RPC and Firehose are both suitable for Subgraphs, a Firehose is always required for developers wanting to build with [Substreams](https://substreams.streamingfast.io). Supporting Substreams allows developers to build [Substreams-powered Subgraphs](/subgraphs/cookbook/substreams-powered-subgraphs/) for the new chain, and has the potential to improve the performance of your Subgraphs. Additionally, Firehose — as a drop-in replacement for the JSON-RPC extraction layer of `graph-node` — reduces by 90% the number of RPC calls required for general indexing. + +- All those `getLogs` calls and roundtrips get replaced by a single stream arriving into the heart of `graph-node`; a single block model for all Subgraphs it processes. + +> NOTE: A Firehose-based integration for EVM chains will still require Indexers to run the chain's archive RPC node to properly index Subgraphs. This is due to the Firehose's inability to provide smart contract state typically accessible by the `eth_call` RPC method. (It's worth reminding that `eth_calls` are not a good practice for developers) + +## Graph Node Configuration + +Configuring Graph Node is as easy as preparing your local environment. Once your local environment is set, you can test the integration by locally deploying a Subgraph. + +1. [Clone Graph Node](https://github.com/graphprotocol/graph-node) + +2. Modify [this line](https://github.com/graphprotocol/graph-node/blob/master/docker/docker-compose.yml#L22) to include the new network name and the EVM JSON-RPC or Firehose compliant URL + + > Do not change the env var name itself. It must remain `ethereum` even if the network name is different. + +3. Run an IPFS node or use the one used by The Graph: https://api.thegraph.com/ipfs/ + +## Substreams-powered Subgraphs + +For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered Subgraphs](/substreams/sps/introduction/). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. From e44f517135f3df3d3b575dda7d191adfc8847150 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:15:24 -0500 Subject: [PATCH 0405/1789] New translations supported-network-requirements.mdx (Romanian) --- .../supported-network-requirements.mdx | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/website/src/pages/ro/indexing/supported-network-requirements.mdx b/website/src/pages/ro/indexing/supported-network-requirements.mdx index df15ef48d762..afbf755c0a5a 100644 --- a/website/src/pages/ro/indexing/supported-network-requirements.mdx +++ b/website/src/pages/ro/indexing/supported-network-requirements.mdx @@ -2,17 +2,17 @@ title: Supported Network Requirements --- -| Network | Guides | System Requirements | Indexing Rewards | -| --- | --- | --- | :-: | -| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 5 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
Debian 12/Ubuntu 22.04
16 GB RAM
>= 4.5TB (NVME preffered)
_last updated 14th May 2024_ | ✅ | -| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
Ubuntu 22.04
>=32 GB RAM
>= 14 TiB NVMe SSD
_last updated 22nd June 2024_ | ✅ | -| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 2 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
Ubuntu 22.04
16GB+ RAM
>=3TB (NVMe recommended)
_last updated August 2023_ | ✅ | -| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 13 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 3 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 2nd April 2024_ | ✅ | -| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
Ubuntu 22.04
32GB+ RAM
>= 10 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
Debian 12
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 3rd April 2024_ | ✅ | +| Network | Guides | System Requirements | Indexing Rewards | +| --------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | :--------------: | +| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 5 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
Debian 12/Ubuntu 22.04
16 GB RAM
>= 4.5TB (NVME preffered)
_last updated 14th May 2024_ | ✅ | +| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
Ubuntu 22.04
>=32 GB RAM
>= 14 TiB NVMe SSD
_last updated 22nd June 2024_ | ✅ | +| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 2 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
Ubuntu 22.04
16GB+ RAM
>=3TB (NVMe recommended)
_last updated August 2023_ | ✅ | +| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 13 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 3 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 2nd April 2024_ | ✅ | +| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
Ubuntu 22.04
32GB+ RAM
>= 10 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
Debian 12
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 3rd April 2024_ | ✅ | From 3a7259eac64dc0ba014db74f1b724546ea871a98 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:15:25 -0500 Subject: [PATCH 0406/1789] New translations supported-network-requirements.mdx (French) --- .../supported-network-requirements.mdx | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/website/src/pages/fr/indexing/supported-network-requirements.mdx b/website/src/pages/fr/indexing/supported-network-requirements.mdx index 799fd25b8136..e22b3896b253 100644 --- a/website/src/pages/fr/indexing/supported-network-requirements.mdx +++ b/website/src/pages/fr/indexing/supported-network-requirements.mdx @@ -2,17 +2,17 @@ title: Exigences du réseau pris en charge --- -| Réseau | Guides | Configuration requise | Récompenses d'indexation | -| --- | --- | --- | :-: | -| Arbitrum | [Guide Baremetal ](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
[Guide Docker ](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | CPU 4+ coeurs
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_dernière mise à jour août 2023_ | ✅ | -| Avalanche | [Guide Docker](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | CPU 4 cœurs / 8 threads
Ubuntu 22.04
16Go+ RAM
>= 5 Tio NVMe SSD
_dernière mise à jour août 2023_ | ✅ | -| Base | [Guide Erigon Baremetal ](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

[Guide GETH Baremetal ](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
[Guide GETH Docker ](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | CPU 8+ cœurs
Debian 12/Ubuntu 22.04
16 Go RAM
>= 4.5To (NVME recommandé)
_Dernière mise à jour le 14 mai 2024_ | ✅ | -| Binance | [Guide Erigon Baremetal ](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | CPU 8 cœurs / 16 threads
Ubuntu 22.04
>=32 Go RAM
>= 14 Tio NVMe SSD
_Dernière mise à jour le 22 juin 2024_ | ✅ | -| Celo | [Guide Docker](https://docs.infradao.com/archive-nodes-101/celo/docker) | CPU 4 cœurs / 8 threads
Ubuntu 22.04
16Go+ RAM
>= 2 Tio NVMe SSD
_Dernière mise à jour en août 2023_ | ✅ | -| Ethereum | [Guide Docker](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Vitesse d'horloge supérieure par rapport au nombre de cœurs
Ubuntu 22.04
16 Go+ RAM
>=3 To (NVMe recommandé)
_dernière mise à jour août 2023_ | ✅ | -| Fantom | [Guide Docker](https://docs.infradao.com/archive-nodes-101/fantom/docker) | CPU 4 cœurs / 8 threads
Ubuntu 22.04
16 Go + RAM
>= 13 Tio SSD NVMe
_dernière mise à jour août 2023_ | ✅ | -| Gnosis | [Guide Baremetal](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | CPU 6 cœurs / 12 threads
Ubuntu 22.04
16 Go+ RAM
>= 3 To SSD NVMe
_dernière mise à jour août 2023_ | ✅ | -| Linea | [Guide Baremetal ](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | CPU 4+ cœurs
Ubuntu 22.04
16 Go+ RAM
>= 1 To SSD NVMe
_dernière mise à jour le 2 avril 2024_ | ✅ | -| Optimism | [Guide Erigon Baremetal](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

[Guide GETH Baremetal](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
[Guide GETH Docker](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | CPU 4 cœurs / 8 threads
Ubuntu 22.04
16 Go + RAM
>= SSD NVMe 8 Tio
_dernière mise à jour août 2023_ | ✅ | -| Polygon | [Guide Docker](https://docs.infradao.com/archive-nodes-101/polygon/docker) | CPU 16 cœurs
Ubuntu 22.04
32 Go+ RAM
>= 10 Tio NVMe SSD
_dernière mise à jour août 2023_ | ✅ | -| Scroll | [Guide Baremetal](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
[Guide Docker](https://docs.infradao.com/archive-nodes-101/scroll/docker) | CPU 4 cœurs / 8 threads
Debian 12
16 Go + RAM
>= 1 Tio NVMe SSD
_dernière mise à jour le 3 avril 2024_ | ✅ | +| Réseau | Guides | Configuration requise | Récompenses d'indexation | +| --------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :----------------------: | +| Arbitrum | [Guide Baremetal ](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
[Guide Docker ](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | CPU 4+ coeurs
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_dernière mise à jour août 2023_ | ✅ | +| Avalanche | [Guide Docker](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | CPU 4 cœurs / 8 threads
Ubuntu 22.04
16Go+ RAM
>= 5 Tio NVMe SSD
_dernière mise à jour août 2023_ | ✅ | +| Base | [Guide Erigon Baremetal ](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

[Guide GETH Baremetal ](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
[Guide GETH Docker ](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | CPU 8+ cœurs
Debian 12/Ubuntu 22.04
16 Go RAM
>= 4.5To (NVME recommandé)
_Dernière mise à jour le 14 mai 2024_ | ✅ | +| Binance | [Guide Erigon Baremetal ](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | CPU 8 cœurs / 16 threads
Ubuntu 22.04
>=32 Go RAM
>= 14 Tio NVMe SSD
_Dernière mise à jour le 22 juin 2024_ | ✅ | +| Celo | [Guide Docker](https://docs.infradao.com/archive-nodes-101/celo/docker) | CPU 4 cœurs / 8 threads
Ubuntu 22.04
16Go+ RAM
>= 2 Tio NVMe SSD
_Dernière mise à jour en août 2023_ | ✅ | +| Ethereum | [Guide Docker](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Vitesse d'horloge supérieure par rapport au nombre de cœurs
Ubuntu 22.04
16 Go+ RAM
>=3 To (NVMe recommandé)
_dernière mise à jour août 2023_ | ✅ | +| Fantom | [Guide Docker](https://docs.infradao.com/archive-nodes-101/fantom/docker) | CPU 4 cœurs / 8 threads
Ubuntu 22.04
16 Go + RAM
>= 13 Tio SSD NVMe
_dernière mise à jour août 2023_ | ✅ | +| Gnosis | [Guide Baremetal](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | CPU 6 cœurs / 12 threads
Ubuntu 22.04
16 Go+ RAM
>= 3 To SSD NVMe
_dernière mise à jour août 2023_ | ✅ | +| Linea | [Guide Baremetal ](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | CPU 4+ cœurs
Ubuntu 22.04
16 Go+ RAM
>= 1 To SSD NVMe
_dernière mise à jour le 2 avril 2024_ | ✅ | +| Optimism | [Guide Erigon Baremetal](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

[Guide GETH Baremetal](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
[Guide GETH Docker](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | CPU 4 cœurs / 8 threads
Ubuntu 22.04
16 Go + RAM
>= SSD NVMe 8 Tio
_dernière mise à jour août 2023_ | ✅ | +| Polygon | [Guide Docker](https://docs.infradao.com/archive-nodes-101/polygon/docker) | CPU 16 cœurs
Ubuntu 22.04
32 Go+ RAM
>= 10 Tio NVMe SSD
_dernière mise à jour août 2023_ | ✅ | +| Scroll | [Guide Baremetal](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
[Guide Docker](https://docs.infradao.com/archive-nodes-101/scroll/docker) | CPU 4 cœurs / 8 threads
Debian 12
16 Go + RAM
>= 1 Tio NVMe SSD
_dernière mise à jour le 3 avril 2024_ | ✅ | From da85d6daf1179caa7afe70b3fc4821ca57c78cbb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:15:27 -0500 Subject: [PATCH 0407/1789] New translations supported-network-requirements.mdx (Spanish) --- .../supported-network-requirements.mdx | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/website/src/pages/es/indexing/supported-network-requirements.mdx b/website/src/pages/es/indexing/supported-network-requirements.mdx index dfebec344880..c06461d981d6 100644 --- a/website/src/pages/es/indexing/supported-network-requirements.mdx +++ b/website/src/pages/es/indexing/supported-network-requirements.mdx @@ -2,17 +2,17 @@ title: Supported Network Requirements --- -| Red | Guides | System Requirements | Indexing Rewards | -| --- | --- | --- | :-: | -| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 5 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
Debian 12/Ubuntu 22.04
16 GB RAM
>= 4.5TB (NVME preffered)
_last updated 14th May 2024_ | ✅ | -| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
Ubuntu 22.04
>=32 GB RAM
>= 14 TiB NVMe SSD
_last updated 22nd June 2024_ | ✅ | -| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 2 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
Ubuntu 22.04
16GB+ RAM
>=3TB (NVMe recommended)
_last updated August 2023_ | ✅ | -| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 13 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 3 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 2nd April 2024_ | ✅ | -| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
Ubuntu 22.04
32GB+ RAM
>= 10 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
Debian 12
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 3rd April 2024_ | ✅ | +| Red | Guides | System Requirements | Indexing Rewards | +| --------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | :--------------: | +| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 5 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
Debian 12/Ubuntu 22.04
16 GB RAM
>= 4.5TB (NVME preffered)
_last updated 14th May 2024_ | ✅ | +| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
Ubuntu 22.04
>=32 GB RAM
>= 14 TiB NVMe SSD
_last updated 22nd June 2024_ | ✅ | +| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 2 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
Ubuntu 22.04
16GB+ RAM
>=3TB (NVMe recommended)
_last updated August 2023_ | ✅ | +| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 13 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 3 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 2nd April 2024_ | ✅ | +| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
Ubuntu 22.04
32GB+ RAM
>= 10 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
Debian 12
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 3rd April 2024_ | ✅ | From 7b9da99ed30a500ee71d6d58bca72095703ed0af Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:15:28 -0500 Subject: [PATCH 0408/1789] New translations supported-network-requirements.mdx (Arabic) --- .../supported-network-requirements.mdx | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/website/src/pages/ar/indexing/supported-network-requirements.mdx b/website/src/pages/ar/indexing/supported-network-requirements.mdx index 9c820d055399..811fb2a8cec7 100644 --- a/website/src/pages/ar/indexing/supported-network-requirements.mdx +++ b/website/src/pages/ar/indexing/supported-network-requirements.mdx @@ -2,17 +2,17 @@ title: Supported Network Requirements --- -| Network | Guides | System Requirements | Indexing Rewards | -| --- | --- | --- | :-: | -| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 5 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
Debian 12/Ubuntu 22.04
16 GB RAM
>= 4.5TB (NVME preffered)
_last updated 14th May 2024_ | ✅ | -| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
Ubuntu 22.04
>=32 GB RAM
>= 14 TiB NVMe SSD
_last updated 22nd June 2024_ | ✅ | -| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 2 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
Ubuntu 22.04
16GB+ RAM
>=3TB (NVMe recommended)
_last updated August 2023_ | ✅ | -| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 13 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 3 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 2nd April 2024_ | ✅ | -| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| بوليجون | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
Ubuntu 22.04
32GB+ RAM
>= 10 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
Debian 12
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 3rd April 2024_ | ✅ | +| Network | Guides | System Requirements | Indexing Rewards | +| --------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | :--------------: | +| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 5 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
Debian 12/Ubuntu 22.04
16 GB RAM
>= 4.5TB (NVME preffered)
_last updated 14th May 2024_ | ✅ | +| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
Ubuntu 22.04
>=32 GB RAM
>= 14 TiB NVMe SSD
_last updated 22nd June 2024_ | ✅ | +| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 2 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
Ubuntu 22.04
16GB+ RAM
>=3TB (NVMe recommended)
_last updated August 2023_ | ✅ | +| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 13 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 3 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 2nd April 2024_ | ✅ | +| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| بوليجون | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
Ubuntu 22.04
32GB+ RAM
>= 10 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
Debian 12
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 3rd April 2024_ | ✅ | From 863203c5a3c5d6b577256d79947e3053a4995b29 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:15:29 -0500 Subject: [PATCH 0409/1789] New translations supported-network-requirements.mdx (Czech) --- .../supported-network-requirements.mdx | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/website/src/pages/cs/indexing/supported-network-requirements.mdx b/website/src/pages/cs/indexing/supported-network-requirements.mdx index a81118cec231..efbee1c17750 100644 --- a/website/src/pages/cs/indexing/supported-network-requirements.mdx +++ b/website/src/pages/cs/indexing/supported-network-requirements.mdx @@ -2,17 +2,17 @@ title: Supported Network Requirements --- -| Síť | Guides | System Requirements | Indexing Rewards | -| --- | --- | --- | :-: | -| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 5 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
Debian 12/Ubuntu 22.04
16 GB RAM
>= 4.5TB (NVME preffered)
_last updated 14th May 2024_ | ✅ | -| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
Ubuntu 22.04
>=32 GB RAM
>= 14 TiB NVMe SSD
_last updated 22nd June 2024_ | ✅ | -| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 2 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
Ubuntu 22.04
16GB+ RAM
>=3TB (NVMe recommended)
_last updated August 2023_ | ✅ | -| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 13 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 3 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 2nd April 2024_ | ✅ | -| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
Ubuntu 22.04
32GB+ RAM
>= 10 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
Debian 12
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 3rd April 2024_ | ✅ | +| Síť | Guides | System Requirements | Indexing Rewards | +| --------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | :--------------: | +| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 5 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
Debian 12/Ubuntu 22.04
16 GB RAM
>= 4.5TB (NVME preffered)
_last updated 14th May 2024_ | ✅ | +| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
Ubuntu 22.04
>=32 GB RAM
>= 14 TiB NVMe SSD
_last updated 22nd June 2024_ | ✅ | +| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 2 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
Ubuntu 22.04
16GB+ RAM
>=3TB (NVMe recommended)
_last updated August 2023_ | ✅ | +| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 13 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 3 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 2nd April 2024_ | ✅ | +| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
Ubuntu 22.04
32GB+ RAM
>= 10 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
Debian 12
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 3rd April 2024_ | ✅ | From a02933a1fbdfba7d0c0c3614a6527089c03f1624 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:15:30 -0500 Subject: [PATCH 0410/1789] New translations supported-network-requirements.mdx (German) --- .../supported-network-requirements.mdx | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/website/src/pages/de/indexing/supported-network-requirements.mdx b/website/src/pages/de/indexing/supported-network-requirements.mdx index 72e36248f68c..7bbfce189885 100644 --- a/website/src/pages/de/indexing/supported-network-requirements.mdx +++ b/website/src/pages/de/indexing/supported-network-requirements.mdx @@ -2,17 +2,17 @@ title: Unterstützte Netzwerkanforderungen --- -| Netzwerk | Guides | Systemanforderungen | Indexing Rewards | -| --- | --- | --- | :-: | -| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 5 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
Debian 12/Ubuntu 22.04
16 GB RAM
>= 4.5TB (NVME preffered)
_last updated 14th May 2024_ | ✅ | -| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
Ubuntu 22.04
>=32 GB RAM
>= 14 TiB NVMe SSD
_last updated 22nd June 2024_ | ✅ | -| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 2 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Höhere Taktfrequenz im Vergleich zur Kernanzahl
Ubuntu 22.04
16GB+ RAM
>=3TB (NVMe recommended)
_last updated August 2023_ | ✅ | -| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 13 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 3 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 2nd April 2024_ | ✅ | -| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
Ubuntu 22.04
32GB+ RAM
>= 10 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
Debian 12
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 3rd April 2024_ | ✅ | +| Netzwerk | Guides | Systemanforderungen | Indexing Rewards | +| --------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------- | :--------------: | +| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 5 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
Debian 12/Ubuntu 22.04
16 GB RAM
>= 4.5TB (NVME preffered)
_last updated 14th May 2024_ | ✅ | +| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
Ubuntu 22.04
>=32 GB RAM
>= 14 TiB NVMe SSD
_last updated 22nd June 2024_ | ✅ | +| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 2 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Höhere Taktfrequenz im Vergleich zur Kernanzahl
Ubuntu 22.04
16GB+ RAM
>=3TB (NVMe recommended)
_last updated August 2023_ | ✅ | +| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 13 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 3 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 2nd April 2024_ | ✅ | +| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
Ubuntu 22.04
32GB+ RAM
>= 10 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
Debian 12
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 3rd April 2024_ | ✅ | From 4465ae2eddea8ec09394195b8be47712706f144e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:15:31 -0500 Subject: [PATCH 0411/1789] New translations supported-network-requirements.mdx (Italian) --- .../supported-network-requirements.mdx | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/website/src/pages/it/indexing/supported-network-requirements.mdx b/website/src/pages/it/indexing/supported-network-requirements.mdx index 7eed955d1013..88be77e74cc8 100644 --- a/website/src/pages/it/indexing/supported-network-requirements.mdx +++ b/website/src/pages/it/indexing/supported-network-requirements.mdx @@ -2,17 +2,17 @@ title: Supported Network Requirements --- -| La rete | Guides | System Requirements | Indexing Rewards | -| --- | --- | --- | :-: | -| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 5 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
Debian 12/Ubuntu 22.04
16 GB RAM
>= 4.5TB (NVME preffered)
_last updated 14th May 2024_ | ✅ | -| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
Ubuntu 22.04
>=32 GB RAM
>= 14 TiB NVMe SSD
_last updated 22nd June 2024_ | ✅ | -| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 2 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
Ubuntu 22.04
16GB+ RAM
>=3TB (NVMe recommended)
_last updated August 2023_ | ✅ | -| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 13 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 3 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 2nd April 2024_ | ✅ | -| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
Ubuntu 22.04
32GB+ RAM
>= 10 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
Debian 12
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 3rd April 2024_ | ✅ | +| La rete | Guides | System Requirements | Indexing Rewards | +| --------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | :--------------: | +| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 5 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
Debian 12/Ubuntu 22.04
16 GB RAM
>= 4.5TB (NVME preffered)
_last updated 14th May 2024_ | ✅ | +| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
Ubuntu 22.04
>=32 GB RAM
>= 14 TiB NVMe SSD
_last updated 22nd June 2024_ | ✅ | +| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 2 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
Ubuntu 22.04
16GB+ RAM
>=3TB (NVMe recommended)
_last updated August 2023_ | ✅ | +| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 13 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 3 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 2nd April 2024_ | ✅ | +| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
Ubuntu 22.04
32GB+ RAM
>= 10 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
Debian 12
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 3rd April 2024_ | ✅ | From 57491e14fc2bf1989401eee5cf980db15bd6926c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:15:32 -0500 Subject: [PATCH 0412/1789] New translations supported-network-requirements.mdx (Japanese) --- .../supported-network-requirements.mdx | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/website/src/pages/ja/indexing/supported-network-requirements.mdx b/website/src/pages/ja/indexing/supported-network-requirements.mdx index 6aa0c0caa16f..99ceda419e06 100644 --- a/website/src/pages/ja/indexing/supported-network-requirements.mdx +++ b/website/src/pages/ja/indexing/supported-network-requirements.mdx @@ -2,17 +2,17 @@ title: Supported Network Requirements --- -| ネットワーク | Guides | System Requirements | Indexing Rewards | -| --- | --- | --- | :-: | -| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 5 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
Debian 12/Ubuntu 22.04
16 GB RAM
>= 4.5TB (NVME preffered)
_last updated 14th May 2024_ | ✅ | -| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
Ubuntu 22.04
>=32 GB RAM
>= 14 TiB NVMe SSD
_last updated 22nd June 2024_ | ✅ | -| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 2 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
Ubuntu 22.04
16GB+ RAM
>=3TB (NVMe recommended)
_last updated August 2023_ | ✅ | -| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 13 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 3 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 2nd April 2024_ | ✅ | -| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
Ubuntu 22.04
32GB+ RAM
>= 10 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
Debian 12
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 3rd April 2024_ | ✅ | +| ネットワーク | Guides | System Requirements | Indexing Rewards | +| --------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | :--------------: | +| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 5 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
Debian 12/Ubuntu 22.04
16 GB RAM
>= 4.5TB (NVME preffered)
_last updated 14th May 2024_ | ✅ | +| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
Ubuntu 22.04
>=32 GB RAM
>= 14 TiB NVMe SSD
_last updated 22nd June 2024_ | ✅ | +| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 2 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
Ubuntu 22.04
16GB+ RAM
>=3TB (NVMe recommended)
_last updated August 2023_ | ✅ | +| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 13 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 3 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 2nd April 2024_ | ✅ | +| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
Ubuntu 22.04
32GB+ RAM
>= 10 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
Debian 12
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 3rd April 2024_ | ✅ | From acc08af4567918e49fe487ec53e9a6f6dcf534dc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:15:33 -0500 Subject: [PATCH 0413/1789] New translations supported-network-requirements.mdx (Korean) --- .../supported-network-requirements.mdx | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/website/src/pages/ko/indexing/supported-network-requirements.mdx b/website/src/pages/ko/indexing/supported-network-requirements.mdx index df15ef48d762..afbf755c0a5a 100644 --- a/website/src/pages/ko/indexing/supported-network-requirements.mdx +++ b/website/src/pages/ko/indexing/supported-network-requirements.mdx @@ -2,17 +2,17 @@ title: Supported Network Requirements --- -| Network | Guides | System Requirements | Indexing Rewards | -| --- | --- | --- | :-: | -| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 5 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
Debian 12/Ubuntu 22.04
16 GB RAM
>= 4.5TB (NVME preffered)
_last updated 14th May 2024_ | ✅ | -| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
Ubuntu 22.04
>=32 GB RAM
>= 14 TiB NVMe SSD
_last updated 22nd June 2024_ | ✅ | -| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 2 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
Ubuntu 22.04
16GB+ RAM
>=3TB (NVMe recommended)
_last updated August 2023_ | ✅ | -| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 13 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 3 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 2nd April 2024_ | ✅ | -| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
Ubuntu 22.04
32GB+ RAM
>= 10 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
Debian 12
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 3rd April 2024_ | ✅ | +| Network | Guides | System Requirements | Indexing Rewards | +| --------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | :--------------: | +| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 5 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
Debian 12/Ubuntu 22.04
16 GB RAM
>= 4.5TB (NVME preffered)
_last updated 14th May 2024_ | ✅ | +| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
Ubuntu 22.04
>=32 GB RAM
>= 14 TiB NVMe SSD
_last updated 22nd June 2024_ | ✅ | +| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 2 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
Ubuntu 22.04
16GB+ RAM
>=3TB (NVMe recommended)
_last updated August 2023_ | ✅ | +| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 13 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 3 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 2nd April 2024_ | ✅ | +| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
Ubuntu 22.04
32GB+ RAM
>= 10 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
Debian 12
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 3rd April 2024_ | ✅ | From af7dbd1c3d640e1b0ab638113ad6d4aca8e79994 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:15:34 -0500 Subject: [PATCH 0414/1789] New translations supported-network-requirements.mdx (Dutch) --- .../supported-network-requirements.mdx | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/website/src/pages/nl/indexing/supported-network-requirements.mdx b/website/src/pages/nl/indexing/supported-network-requirements.mdx index 9bfbc8d0fefd..4a5d2fa8f364 100644 --- a/website/src/pages/nl/indexing/supported-network-requirements.mdx +++ b/website/src/pages/nl/indexing/supported-network-requirements.mdx @@ -2,17 +2,17 @@ title: Supported Network Requirements --- -| Netwerk | Guides | System Requirements | Indexing Rewards | -| --- | --- | --- | :-: | -| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 5 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
Debian 12/Ubuntu 22.04
16 GB RAM
>= 4.5TB (NVME preffered)
_last updated 14th May 2024_ | ✅ | -| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
Ubuntu 22.04
>=32 GB RAM
>= 14 TiB NVMe SSD
_last updated 22nd June 2024_ | ✅ | -| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 2 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
Ubuntu 22.04
16GB+ RAM
>=3TB (NVMe recommended)
_last updated August 2023_ | ✅ | -| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 13 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 3 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 2nd April 2024_ | ✅ | -| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
Ubuntu 22.04
32GB+ RAM
>= 10 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
Debian 12
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 3rd April 2024_ | ✅ | +| Netwerk | Guides | System Requirements | Indexing Rewards | +| --------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | :--------------: | +| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 5 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
Debian 12/Ubuntu 22.04
16 GB RAM
>= 4.5TB (NVME preffered)
_last updated 14th May 2024_ | ✅ | +| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
Ubuntu 22.04
>=32 GB RAM
>= 14 TiB NVMe SSD
_last updated 22nd June 2024_ | ✅ | +| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 2 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
Ubuntu 22.04
16GB+ RAM
>=3TB (NVMe recommended)
_last updated August 2023_ | ✅ | +| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 13 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 3 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 2nd April 2024_ | ✅ | +| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
Ubuntu 22.04
32GB+ RAM
>= 10 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
Debian 12
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 3rd April 2024_ | ✅ | From e49def7880a2a43dc732c71ed1410a9761d565f6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:15:35 -0500 Subject: [PATCH 0415/1789] New translations supported-network-requirements.mdx (Polish) --- .../supported-network-requirements.mdx | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/website/src/pages/pl/indexing/supported-network-requirements.mdx b/website/src/pages/pl/indexing/supported-network-requirements.mdx index df15ef48d762..afbf755c0a5a 100644 --- a/website/src/pages/pl/indexing/supported-network-requirements.mdx +++ b/website/src/pages/pl/indexing/supported-network-requirements.mdx @@ -2,17 +2,17 @@ title: Supported Network Requirements --- -| Network | Guides | System Requirements | Indexing Rewards | -| --- | --- | --- | :-: | -| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 5 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
Debian 12/Ubuntu 22.04
16 GB RAM
>= 4.5TB (NVME preffered)
_last updated 14th May 2024_ | ✅ | -| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
Ubuntu 22.04
>=32 GB RAM
>= 14 TiB NVMe SSD
_last updated 22nd June 2024_ | ✅ | -| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 2 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
Ubuntu 22.04
16GB+ RAM
>=3TB (NVMe recommended)
_last updated August 2023_ | ✅ | -| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 13 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 3 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 2nd April 2024_ | ✅ | -| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
Ubuntu 22.04
32GB+ RAM
>= 10 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
Debian 12
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 3rd April 2024_ | ✅ | +| Network | Guides | System Requirements | Indexing Rewards | +| --------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | :--------------: | +| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 5 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
Debian 12/Ubuntu 22.04
16 GB RAM
>= 4.5TB (NVME preffered)
_last updated 14th May 2024_ | ✅ | +| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
Ubuntu 22.04
>=32 GB RAM
>= 14 TiB NVMe SSD
_last updated 22nd June 2024_ | ✅ | +| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 2 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
Ubuntu 22.04
16GB+ RAM
>=3TB (NVMe recommended)
_last updated August 2023_ | ✅ | +| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 13 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 3 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 2nd April 2024_ | ✅ | +| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
Ubuntu 22.04
32GB+ RAM
>= 10 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
Debian 12
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 3rd April 2024_ | ✅ | From 28aaf3e32db616f1d3f7b724ee0854791ab86735 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:15:36 -0500 Subject: [PATCH 0416/1789] New translations supported-network-requirements.mdx (Portuguese) --- .../supported-network-requirements.mdx | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/website/src/pages/pt/indexing/supported-network-requirements.mdx b/website/src/pages/pt/indexing/supported-network-requirements.mdx index d678f0534f01..c1bd4433f1d7 100644 --- a/website/src/pages/pt/indexing/supported-network-requirements.mdx +++ b/website/src/pages/pt/indexing/supported-network-requirements.mdx @@ -2,17 +2,17 @@ title: Requisitos de Redes Apoiadas --- -| Rede | Guias | Requisitos de sistema | Recompensas de Indexação | -| --- | --- | --- | :-: | -| Arbitrum | [Guia Baremetal](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
[Guia Docker](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | CPU de 4+ núcleos
Ubuntu 22.04
16GB+ RAM
>= SSD NVMe com mais de 8 TiB
_última atualização em agosto de 2023_ | ✅ | -| Avalanche | [Guia Docker](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | CPU de 4 núcleos e 8 threads
Ubuntu 22.04
16GB+ RAM
SSD NVMe com mais de 5 TiB
_última atualização em agosto de 2023_ | ✅ | -| Base | [Guia Erigon Baremetal](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

[Guia GETH Baremetal](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
[Guia GETH Docker](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | CPU de 8+ núcleos
Debian 12/Ubuntu 22.04
16 GB RAM
mais 4.5TB (NVMe preferido)
_última atualização em 14 de maio de 2024_ | ✅ | -| Binance | [Guia Erigon Baremetal](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | CPU de 8 núcleos e 16 threads
Ubuntu 22.04
16GB+ RAM
NVMe SSD com mais de 14 TiB
_última atualização em 22 de junho de 2024_ | ✅ | -| Celo | [Guia Docker](https://docs.infradao.com/archive-nodes-101/celo/docker) | CPU de 4 núcleos e 8 threads
Ubuntu 22.04
16GB+ RAM
>= SSD NVMe com mais de 2 TiB
_última atualização em agosto de 2023_ | ✅ | -| Ethereum | [Guia Docker](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Frequência de clock maior que número de núcleos
Ubuntu 22.04
16GB+ RAM
Mais de 3TB (NVMe recomendado)
_última atualização em agosto de 2023_ | ✅ | -| Fantom | [Guia Docker](https://docs.infradao.com/archive-nodes-101/fantom/docker) | CPU de 4 núcleos e 8 threads
Ubuntu 22.04
16GB+ RAM
SSD NVMe com mais de 13 TiB
_última atualização em agosto de 2023_ | ✅ | -| Gnosis | [Guia Baremetal](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | CPU de 6 núcleos e 12 threads
Ubuntu 22.04
16GB+ RAM
NVMe SSD com mais de 3 TiB
_última atualização em agosto de 2023_ | ✅ | -| Linea | [Guia Baremetal](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | CPU de 4+ núcleos
Ubuntu 22.04
16GB+ RAM
>= SSD NVMe com mais de 1 TiB
_última atualização em 2 de abril de 2024_ | ✅ | -| Optimism | [Guia Erigon Baremetal](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

[Guia GETH Baremetal](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
[Guia GETH Docker](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | CPU de 4 núcleos e 8 threads
Ubuntu 22.04
16GB+ RAM
SSD NVMe com mais de 8 TiB
_última atualização em agosto de 2023_ | ✅ | -| Polygon | [Guia Docker](https://docs.infradao.com/archive-nodes-101/polygon/docker) | CPU de 16 núcleos
Ubuntu 22.04
32GB+ RAM
>= SSD NVMe com mais de 10 TiB
_última atualização em agosto de 2023_ | ✅ | -| Scroll | [Guia Baremetal](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
[Guia Docker](https://docs.infradao.com/archive-nodes-101/scroll/docker) | CPU de 4 núcleos e 8 threads
Debian 12
16GB+ RAM
SSD NVMe com mais de 1 TiB
_última atualização em 3 de abril de 2024_ | ✅ | +| Rede | Guias | Requisitos de sistema | Recompensas de Indexação | +| --------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :----------------------: | +| Arbitrum | [Guia Baremetal](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
[Guia Docker](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | CPU de 4+ núcleos
Ubuntu 22.04
16GB+ RAM
>= SSD NVMe com mais de 8 TiB
_última atualização em agosto de 2023_ | ✅ | +| Avalanche | [Guia Docker](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | CPU de 4 núcleos e 8 threads
Ubuntu 22.04
16GB+ RAM
SSD NVMe com mais de 5 TiB
_última atualização em agosto de 2023_ | ✅ | +| Base | [Guia Erigon Baremetal](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

[Guia GETH Baremetal](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
[Guia GETH Docker](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | CPU de 8+ núcleos
Debian 12/Ubuntu 22.04
16 GB RAM
mais 4.5TB (NVMe preferido)
_última atualização em 14 de maio de 2024_ | ✅ | +| Binance | [Guia Erigon Baremetal](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | CPU de 8 núcleos e 16 threads
Ubuntu 22.04
16GB+ RAM
NVMe SSD com mais de 14 TiB
_última atualização em 22 de junho de 2024_ | ✅ | +| Celo | [Guia Docker](https://docs.infradao.com/archive-nodes-101/celo/docker) | CPU de 4 núcleos e 8 threads
Ubuntu 22.04
16GB+ RAM
>= SSD NVMe com mais de 2 TiB
_última atualização em agosto de 2023_ | ✅ | +| Ethereum | [Guia Docker](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Frequência de clock maior que número de núcleos
Ubuntu 22.04
16GB+ RAM
Mais de 3TB (NVMe recomendado)
_última atualização em agosto de 2023_ | ✅ | +| Fantom | [Guia Docker](https://docs.infradao.com/archive-nodes-101/fantom/docker) | CPU de 4 núcleos e 8 threads
Ubuntu 22.04
16GB+ RAM
SSD NVMe com mais de 13 TiB
_última atualização em agosto de 2023_ | ✅ | +| Gnosis | [Guia Baremetal](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | CPU de 6 núcleos e 12 threads
Ubuntu 22.04
16GB+ RAM
NVMe SSD com mais de 3 TiB
_última atualização em agosto de 2023_ | ✅ | +| Linea | [Guia Baremetal](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | CPU de 4+ núcleos
Ubuntu 22.04
16GB+ RAM
>= SSD NVMe com mais de 1 TiB
_última atualização em 2 de abril de 2024_ | ✅ | +| Optimism | [Guia Erigon Baremetal](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

[Guia GETH Baremetal](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
[Guia GETH Docker](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | CPU de 4 núcleos e 8 threads
Ubuntu 22.04
16GB+ RAM
SSD NVMe com mais de 8 TiB
_última atualização em agosto de 2023_ | ✅ | +| Polygon | [Guia Docker](https://docs.infradao.com/archive-nodes-101/polygon/docker) | CPU de 16 núcleos
Ubuntu 22.04
32GB+ RAM
>= SSD NVMe com mais de 10 TiB
_última atualização em agosto de 2023_ | ✅ | +| Scroll | [Guia Baremetal](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
[Guia Docker](https://docs.infradao.com/archive-nodes-101/scroll/docker) | CPU de 4 núcleos e 8 threads
Debian 12
16GB+ RAM
SSD NVMe com mais de 1 TiB
_última atualização em 3 de abril de 2024_ | ✅ | From 03fc7254b1bbf1f78c6de786a6c0140eb2d806c0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:15:37 -0500 Subject: [PATCH 0417/1789] New translations supported-network-requirements.mdx (Russian) --- .../supported-network-requirements.mdx | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/website/src/pages/ru/indexing/supported-network-requirements.mdx b/website/src/pages/ru/indexing/supported-network-requirements.mdx index f1afe7cb7850..719759b1c8f1 100644 --- a/website/src/pages/ru/indexing/supported-network-requirements.mdx +++ b/website/src/pages/ru/indexing/supported-network-requirements.mdx @@ -2,17 +2,17 @@ title: Требования к поддерживаемым сетям --- -| Сеть | Гайды | Системные требования | Награды за индексирование | -| --- | --- | --- | :-: | -| Арбитрум | [Гайд по Baremetal](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
[Гайд по Docker](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ ядраа CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_последнее обновление в августе 2023_ | ✅ | -| Avalanche | [Гайд по Docker](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 ядра / 8 потоков CPU
Ubuntu 22.04
16GB+ RAM
>= 5 TiB NVMe SSD
_последнее обновление в августе 2023_ | ✅ | -| Base | [Гайд по Erigon Baremetal](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

[Гайд по GETH Baremetal](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
[Гайд по GETH Docker](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ ядер CPU
Debian 12/Ubuntu 22.04
16 GB RAM
>= 4.5TB (NVME preffered)
_последнее обновление 14 мая 2024_ | ✅ | -| Binance | [Гайд по Erigon Baremetal](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 ядер / 16 потоков CPU
Ubuntu 22.04
>=32 GB RAM
>= 14 TiB NVMe SSD
_последнее обновление 22 июня 2024_ | ✅ | -| Celo | [Гайд по Docker](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 2 TiB NVMe SSD
_последнее обновление в августе 2023_ | ✅ | -| Ethereum | [Гайд по Docker](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Более высокая тактовая частота по сравнению с количеством ядер
Ubuntu 22.04
16GB+ RAM
>=3TB (NVMe recommended)
_последнее обновление в августе 2023_ | ✅ | -| Fantom | [Гайд по Docker](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 ядра / 8 потоков CPU
Ubuntu 22.04
16GB+ RAM
>= 13 TiB NVMe SSD
_последнее обновление в августе 2023_ | ✅ | -| Gnosis | [Гайд по Baremetal](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 ядер / 12 потоков CPU
Ubuntu 22.04
16GB+ RAM
>= 3 TiB NVMe SSD
_последнее обновление в августе 2023_ | ✅ | -| Linea | [Гайд по Baremetal](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ ядра CPU
Ubuntu 22.04
16GB+ RAM
>= 1 TiB NVMe SSD
_последнее обновление 2 апреля 2024_ | ✅ | -| Optimism | [Гайд по Erigon Baremetal](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

[Гайд по GETH Baremetal](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
[Гайд по GETH Docker](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 ядра / 8 потоков CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_последнее обновление в августе 2023_ | ✅ | -| Polygon | [Гайд по Docker](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 ядра CPU
Ubuntu 22.04
32GB+ RAM
>= 10 TiB NVMe SSD
_последнее обновление в августе 2023_ | ✅ | -| Scroll | [Гайд по Baremetal](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
[Гайд по Docker](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 ядра / 8 потоков CPU
Debian 12
16GB+ RAM
>= 1 TiB NVMe SSD
_последнее обновление 3 апреля 2024_ | ✅ | +| Сеть | Гайды | Системные требования | Награды за индексирование | +| --------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :-----------------------: | +| Арбитрум | [Гайд по Baremetal](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
[Гайд по Docker](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ ядраа CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_последнее обновление в августе 2023_ | ✅ | +| Avalanche | [Гайд по Docker](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 ядра / 8 потоков CPU
Ubuntu 22.04
16GB+ RAM
>= 5 TiB NVMe SSD
_последнее обновление в августе 2023_ | ✅ | +| Base | [Гайд по Erigon Baremetal](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

[Гайд по GETH Baremetal](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
[Гайд по GETH Docker](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ ядер CPU
Debian 12/Ubuntu 22.04
16 GB RAM
>= 4.5TB (NVME preffered)
_последнее обновление 14 мая 2024_ | ✅ | +| Binance | [Гайд по Erigon Baremetal](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 ядер / 16 потоков CPU
Ubuntu 22.04
>=32 GB RAM
>= 14 TiB NVMe SSD
_последнее обновление 22 июня 2024_ | ✅ | +| Celo | [Гайд по Docker](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 2 TiB NVMe SSD
_последнее обновление в августе 2023_ | ✅ | +| Ethereum | [Гайд по Docker](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Более высокая тактовая частота по сравнению с количеством ядер
Ubuntu 22.04
16GB+ RAM
>=3TB (NVMe recommended)
_последнее обновление в августе 2023_ | ✅ | +| Fantom | [Гайд по Docker](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 ядра / 8 потоков CPU
Ubuntu 22.04
16GB+ RAM
>= 13 TiB NVMe SSD
_последнее обновление в августе 2023_ | ✅ | +| Gnosis | [Гайд по Baremetal](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 ядер / 12 потоков CPU
Ubuntu 22.04
16GB+ RAM
>= 3 TiB NVMe SSD
_последнее обновление в августе 2023_ | ✅ | +| Linea | [Гайд по Baremetal](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ ядра CPU
Ubuntu 22.04
16GB+ RAM
>= 1 TiB NVMe SSD
_последнее обновление 2 апреля 2024_ | ✅ | +| Optimism | [Гайд по Erigon Baremetal](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

[Гайд по GETH Baremetal](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
[Гайд по GETH Docker](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 ядра / 8 потоков CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_последнее обновление в августе 2023_ | ✅ | +| Polygon | [Гайд по Docker](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 ядра CPU
Ubuntu 22.04
32GB+ RAM
>= 10 TiB NVMe SSD
_последнее обновление в августе 2023_ | ✅ | +| Scroll | [Гайд по Baremetal](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
[Гайд по Docker](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 ядра / 8 потоков CPU
Debian 12
16GB+ RAM
>= 1 TiB NVMe SSD
_последнее обновление 3 апреля 2024_ | ✅ | From 528936bd2904c7c64e379a25ab771e87454c8647 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:15:38 -0500 Subject: [PATCH 0418/1789] New translations supported-network-requirements.mdx (Swedish) --- .../supported-network-requirements.mdx | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/website/src/pages/sv/indexing/supported-network-requirements.mdx b/website/src/pages/sv/indexing/supported-network-requirements.mdx index f7a4943afd1b..f6c91108bac9 100644 --- a/website/src/pages/sv/indexing/supported-network-requirements.mdx +++ b/website/src/pages/sv/indexing/supported-network-requirements.mdx @@ -2,17 +2,17 @@ title: Supported Network Requirements --- -| Nätverk | Guides | System Requirements | Indexing Rewards | -| --- | --- | --- | :-: | -| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 5 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
Debian 12/Ubuntu 22.04
16 GB RAM
>= 4.5TB (NVME preffered)
_last updated 14th May 2024_ | ✅ | -| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
Ubuntu 22.04
>=32 GB RAM
>= 14 TiB NVMe SSD
_last updated 22nd June 2024_ | ✅ | -| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 2 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
Ubuntu 22.04
16GB+ RAM
>=3TB (NVMe recommended)
_last updated August 2023_ | ✅ | -| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 13 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 3 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 2nd April 2024_ | ✅ | -| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
Ubuntu 22.04
32GB+ RAM
>= 10 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
Debian 12
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 3rd April 2024_ | ✅ | +| Nätverk | Guides | System Requirements | Indexing Rewards | +| --------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | :--------------: | +| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 5 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
Debian 12/Ubuntu 22.04
16 GB RAM
>= 4.5TB (NVME preffered)
_last updated 14th May 2024_ | ✅ | +| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
Ubuntu 22.04
>=32 GB RAM
>= 14 TiB NVMe SSD
_last updated 22nd June 2024_ | ✅ | +| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 2 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
Ubuntu 22.04
16GB+ RAM
>=3TB (NVMe recommended)
_last updated August 2023_ | ✅ | +| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 13 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 3 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 2nd April 2024_ | ✅ | +| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
Ubuntu 22.04
32GB+ RAM
>= 10 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
Debian 12
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 3rd April 2024_ | ✅ | From 8173d5a073daeb16a216d84a84218262691d066b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:15:39 -0500 Subject: [PATCH 0419/1789] New translations supported-network-requirements.mdx (Turkish) --- .../supported-network-requirements.mdx | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/website/src/pages/tr/indexing/supported-network-requirements.mdx b/website/src/pages/tr/indexing/supported-network-requirements.mdx index a106094cac7c..85eaad3b00c4 100644 --- a/website/src/pages/tr/indexing/supported-network-requirements.mdx +++ b/website/src/pages/tr/indexing/supported-network-requirements.mdx @@ -2,17 +2,17 @@ title: Desteklenen Ağ Gereksinimleri --- -| Ağ | Rehberler | Sistem Gereksinimleri | Endeksleme Ödülleri | -| --- | --- | --- | :-: | -| Arbitrum | [Baremetal Rehberi](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
[Docker Rehberi](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ çekirdekli CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_son güncelleme Ağustos 2023_ | ✅ | -| Avalanche | [Docker Rehberi](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 çekirdekli / 8 iş parçacıklı CPU
Ubuntu 22.04
16GB+ RAM
>= 5 TiB NVMe SSD
_son güncelleme Ağustos 2023_ | ✅ | -| Base | [Erigon Baremetal Rehberi](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

[GETH Baremetal Rehberi](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
[GETH Docker Rehberi](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ çekirdekli CPU
Debian 12/Ubuntu 22.04
16 GB RAM
>= 4.5TB (NVME tercih edilir)
_son güncelleme 14 Mayıs 2024_ | ✅ | -| Binance | [Erigon Baremetal Rehberi](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 çekirdekli / 16 iş parçacıklı CPU
Ubuntu 22.04
>=32 GB RAM
>= 14 TiB NVMe SSD
_son güncelleme 22 Haziran 2024_ | ✅ | -| Celo | [Docker Rehberi](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 çekirdekli / 8 iş parçacıklı CPU
Ubuntu 22.04
16GB+ RAM
>= 2 TiB NVMe SSD
_son güncelleme Ağustos 2023_ | ✅ | -| Ethereum | [Docker Rehberi](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Yüksek saat hızı, çekirdek sayısından daha önemlidir
Ubuntu 22.04
16GB+ RAM
>=3TB (NVMe önerilir)
_son güncelleme Ağustos 2023_ | ✅ | -| Fantom | [Docker Rehberi](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 çekirdekli / 8 iş parçacıklı CPU
Ubuntu 22.04
16GB+ RAM
>= 13 TiB NVMe SSD
_son güncelleme Ağustos 2023_ | ✅ | -| Gnosis | [Baremetal Rehberi](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 çekirdekli / 12 iş parçacıklı CPU
Ubuntu 22.04
16GB+ RAM
>= 3 TiB NVMe SSD
_son güncelleme Ağustos 2023_ | ✅ | -| Linea | [Baremetal Rehberi](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ çekirdekli CPU
Ubuntu 22.04
16GB+ RAM
>= 1 TiB NVMe SSD
_son güncelleme 2 Nisan 2024_ | ✅ | -| Optimism | [Erigon Baremetal Rehberi](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

[GETH Baremetal Rehberi](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
[GETH Docker Rehberi](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 çekirdekli / 8 iş parçacıklı CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_son güncelleme Ağustos 2023_ | ✅ | -| Polygon | [Docker Rehberi](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 çekirdekli CPU
Ubuntu 22.04
32GB+ RAM
>= 10 TiB NVMe SSD
_son güncelleme Ağustos 2023_ | ✅ | -| Scroll | [Baremetal Rehberi](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
[Docker Rehberi](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 çekirdekli / 8 iş parçacıklı CPU
Debian 12
16GB+ RAM
>= 1 TiB NVMe SSD
_son güncelleme 3 Nisan 2024_ | ✅ | +| Ağ | Rehberler | Sistem Gereksinimleri | Endeksleme Ödülleri | +| --------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------- | :-----------------: | +| Arbitrum | [Baremetal Rehberi](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
[Docker Rehberi](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ çekirdekli CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_son güncelleme Ağustos 2023_ | ✅ | +| Avalanche | [Docker Rehberi](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 çekirdekli / 8 iş parçacıklı CPU
Ubuntu 22.04
16GB+ RAM
>= 5 TiB NVMe SSD
_son güncelleme Ağustos 2023_ | ✅ | +| Base | [Erigon Baremetal Rehberi](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

[GETH Baremetal Rehberi](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
[GETH Docker Rehberi](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ çekirdekli CPU
Debian 12/Ubuntu 22.04
16 GB RAM
>= 4.5TB (NVME tercih edilir)
_son güncelleme 14 Mayıs 2024_ | ✅ | +| Binance | [Erigon Baremetal Rehberi](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 çekirdekli / 16 iş parçacıklı CPU
Ubuntu 22.04
>=32 GB RAM
>= 14 TiB NVMe SSD
_son güncelleme 22 Haziran 2024_ | ✅ | +| Celo | [Docker Rehberi](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 çekirdekli / 8 iş parçacıklı CPU
Ubuntu 22.04
16GB+ RAM
>= 2 TiB NVMe SSD
_son güncelleme Ağustos 2023_ | ✅ | +| Ethereum | [Docker Rehberi](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Yüksek saat hızı, çekirdek sayısından daha önemlidir
Ubuntu 22.04
16GB+ RAM
>=3TB (NVMe önerilir)
_son güncelleme Ağustos 2023_ | ✅ | +| Fantom | [Docker Rehberi](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 çekirdekli / 8 iş parçacıklı CPU
Ubuntu 22.04
16GB+ RAM
>= 13 TiB NVMe SSD
_son güncelleme Ağustos 2023_ | ✅ | +| Gnosis | [Baremetal Rehberi](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 çekirdekli / 12 iş parçacıklı CPU
Ubuntu 22.04
16GB+ RAM
>= 3 TiB NVMe SSD
_son güncelleme Ağustos 2023_ | ✅ | +| Linea | [Baremetal Rehberi](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ çekirdekli CPU
Ubuntu 22.04
16GB+ RAM
>= 1 TiB NVMe SSD
_son güncelleme 2 Nisan 2024_ | ✅ | +| Optimism | [Erigon Baremetal Rehberi](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

[GETH Baremetal Rehberi](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
[GETH Docker Rehberi](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 çekirdekli / 8 iş parçacıklı CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_son güncelleme Ağustos 2023_ | ✅ | +| Polygon | [Docker Rehberi](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 çekirdekli CPU
Ubuntu 22.04
32GB+ RAM
>= 10 TiB NVMe SSD
_son güncelleme Ağustos 2023_ | ✅ | +| Scroll | [Baremetal Rehberi](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
[Docker Rehberi](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 çekirdekli / 8 iş parçacıklı CPU
Debian 12
16GB+ RAM
>= 1 TiB NVMe SSD
_son güncelleme 3 Nisan 2024_ | ✅ | From ff0279da37ca8b050c2f38c6d3ad4237ab627505 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:15:40 -0500 Subject: [PATCH 0420/1789] New translations supported-network-requirements.mdx (Ukrainian) --- .../supported-network-requirements.mdx | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/website/src/pages/uk/indexing/supported-network-requirements.mdx b/website/src/pages/uk/indexing/supported-network-requirements.mdx index df15ef48d762..afbf755c0a5a 100644 --- a/website/src/pages/uk/indexing/supported-network-requirements.mdx +++ b/website/src/pages/uk/indexing/supported-network-requirements.mdx @@ -2,17 +2,17 @@ title: Supported Network Requirements --- -| Network | Guides | System Requirements | Indexing Rewards | -| --- | --- | --- | :-: | -| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 5 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
Debian 12/Ubuntu 22.04
16 GB RAM
>= 4.5TB (NVME preffered)
_last updated 14th May 2024_ | ✅ | -| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
Ubuntu 22.04
>=32 GB RAM
>= 14 TiB NVMe SSD
_last updated 22nd June 2024_ | ✅ | -| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 2 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
Ubuntu 22.04
16GB+ RAM
>=3TB (NVMe recommended)
_last updated August 2023_ | ✅ | -| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 13 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 3 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 2nd April 2024_ | ✅ | -| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
Ubuntu 22.04
32GB+ RAM
>= 10 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
Debian 12
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 3rd April 2024_ | ✅ | +| Network | Guides | System Requirements | Indexing Rewards | +| --------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | :--------------: | +| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 5 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
Debian 12/Ubuntu 22.04
16 GB RAM
>= 4.5TB (NVME preffered)
_last updated 14th May 2024_ | ✅ | +| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
Ubuntu 22.04
>=32 GB RAM
>= 14 TiB NVMe SSD
_last updated 22nd June 2024_ | ✅ | +| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 2 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
Ubuntu 22.04
16GB+ RAM
>=3TB (NVMe recommended)
_last updated August 2023_ | ✅ | +| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 13 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 3 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 2nd April 2024_ | ✅ | +| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
Ubuntu 22.04
32GB+ RAM
>= 10 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
Debian 12
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 3rd April 2024_ | ✅ | From 9ab444af77e27349a1d9847dfd2a3574800d9d95 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:15:41 -0500 Subject: [PATCH 0421/1789] New translations supported-network-requirements.mdx (Chinese Simplified) --- .../supported-network-requirements.mdx | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/website/src/pages/zh/indexing/supported-network-requirements.mdx b/website/src/pages/zh/indexing/supported-network-requirements.mdx index 31ca8ba7ecf4..72c9bba12a39 100644 --- a/website/src/pages/zh/indexing/supported-network-requirements.mdx +++ b/website/src/pages/zh/indexing/supported-network-requirements.mdx @@ -2,17 +2,17 @@ title: Supported Network Requirements --- -| Network | Guides | System Requirements | Indexing Rewards | -| --- | --- | --- | :-: | -| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 5 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
Debian 12/Ubuntu 22.04
16 GB RAM
>= 4.5TB (NVME preffered)
_last updated 14th May 2024_ | ✅ | -| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
Ubuntu 22.04
>=32 GB RAM
>= 14 TiB NVMe SSD
_last updated 22nd June 2024_ | ✅ | -| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 2 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| 以太坊 | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
Ubuntu 22.04
16GB+ RAM
>=3TB (NVMe recommended)
_last updated August 2023_ | ✅ | -| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 13 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 3 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 2nd April 2024_ | ✅ | -| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
Ubuntu 22.04
32GB+ RAM
>= 10 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
Debian 12
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 3rd April 2024_ | ✅ | +| Network | Guides | System Requirements | Indexing Rewards | +| --------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | :--------------: | +| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 5 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
Debian 12/Ubuntu 22.04
16 GB RAM
>= 4.5TB (NVME preffered)
_last updated 14th May 2024_ | ✅ | +| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
Ubuntu 22.04
>=32 GB RAM
>= 14 TiB NVMe SSD
_last updated 22nd June 2024_ | ✅ | +| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 2 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| 以太坊 | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
Ubuntu 22.04
16GB+ RAM
>=3TB (NVMe recommended)
_last updated August 2023_ | ✅ | +| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 13 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 3 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 2nd April 2024_ | ✅ | +| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
Ubuntu 22.04
32GB+ RAM
>= 10 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
Debian 12
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 3rd April 2024_ | ✅ | From c29a3e76914d9424e52f7d2a0f41ac1ef4d305c4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:15:42 -0500 Subject: [PATCH 0422/1789] New translations supported-network-requirements.mdx (Urdu (Pakistan)) --- .../supported-network-requirements.mdx | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/website/src/pages/ur/indexing/supported-network-requirements.mdx b/website/src/pages/ur/indexing/supported-network-requirements.mdx index f4b5a7768f13..ee345dee7c3f 100644 --- a/website/src/pages/ur/indexing/supported-network-requirements.mdx +++ b/website/src/pages/ur/indexing/supported-network-requirements.mdx @@ -2,17 +2,17 @@ title: Supported Network Requirements --- -| نیٹ ورک | Guides | System Requirements | Indexing Rewards | -| --- | --- | --- | :-: | -| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 5 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
Debian 12/Ubuntu 22.04
16 GB RAM
>= 4.5TB (NVME preffered)
_last updated 14th May 2024_ | ✅ | -| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
Ubuntu 22.04
>=32 GB RAM
>= 14 TiB NVMe SSD
_last updated 22nd June 2024_ | ✅ | -| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 2 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
Ubuntu 22.04
16GB+ RAM
>=3TB (NVMe recommended)
_last updated August 2023_ | ✅ | -| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 13 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 3 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 2nd April 2024_ | ✅ | -| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
Ubuntu 22.04
32GB+ RAM
>= 10 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
Debian 12
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 3rd April 2024_ | ✅ | +| نیٹ ورک | Guides | System Requirements | Indexing Rewards | +| --------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | :--------------: | +| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 5 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
Debian 12/Ubuntu 22.04
16 GB RAM
>= 4.5TB (NVME preffered)
_last updated 14th May 2024_ | ✅ | +| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
Ubuntu 22.04
>=32 GB RAM
>= 14 TiB NVMe SSD
_last updated 22nd June 2024_ | ✅ | +| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 2 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
Ubuntu 22.04
16GB+ RAM
>=3TB (NVMe recommended)
_last updated August 2023_ | ✅ | +| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 13 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 3 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 2nd April 2024_ | ✅ | +| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
Ubuntu 22.04
32GB+ RAM
>= 10 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
Debian 12
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 3rd April 2024_ | ✅ | From d41819aa29d76f59dca1531ccdbb4e485593048e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:15:43 -0500 Subject: [PATCH 0423/1789] New translations supported-network-requirements.mdx (Vietnamese) --- .../supported-network-requirements.mdx | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/website/src/pages/vi/indexing/supported-network-requirements.mdx b/website/src/pages/vi/indexing/supported-network-requirements.mdx index 50cd5e88b459..a8305e895706 100644 --- a/website/src/pages/vi/indexing/supported-network-requirements.mdx +++ b/website/src/pages/vi/indexing/supported-network-requirements.mdx @@ -2,17 +2,17 @@ title: Supported Network Requirements --- -| Mạng lưới | Guides | System Requirements | Indexing Rewards | -| --- | --- | --- | :-: | -| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 5 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
Debian 12/Ubuntu 22.04
16 GB RAM
>= 4.5TB (NVME preffered)
_last updated 14th May 2024_ | ✅ | -| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
Ubuntu 22.04
>=32 GB RAM
>= 14 TiB NVMe SSD
_last updated 22nd June 2024_ | ✅ | -| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 2 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
Ubuntu 22.04
16GB+ RAM
>=3TB (NVMe recommended)
_last updated August 2023_ | ✅ | -| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 13 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 3 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 2nd April 2024_ | ✅ | -| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
Ubuntu 22.04
32GB+ RAM
>= 10 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
Debian 12
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 3rd April 2024_ | ✅ | +| Mạng lưới | Guides | System Requirements | Indexing Rewards | +| --------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | :--------------: | +| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 5 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
Debian 12/Ubuntu 22.04
16 GB RAM
>= 4.5TB (NVME preffered)
_last updated 14th May 2024_ | ✅ | +| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
Ubuntu 22.04
>=32 GB RAM
>= 14 TiB NVMe SSD
_last updated 22nd June 2024_ | ✅ | +| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 2 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
Ubuntu 22.04
16GB+ RAM
>=3TB (NVMe recommended)
_last updated August 2023_ | ✅ | +| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 13 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 3 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 2nd April 2024_ | ✅ | +| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
Ubuntu 22.04
32GB+ RAM
>= 10 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
Debian 12
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 3rd April 2024_ | ✅ | From de3d240ee09b93f470dfdfe79c040a23c6096bab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:15:44 -0500 Subject: [PATCH 0424/1789] New translations supported-network-requirements.mdx (Marathi) --- .../supported-network-requirements.mdx | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/website/src/pages/mr/indexing/supported-network-requirements.mdx b/website/src/pages/mr/indexing/supported-network-requirements.mdx index a1a9e0338649..8d20c31f3fc5 100644 --- a/website/src/pages/mr/indexing/supported-network-requirements.mdx +++ b/website/src/pages/mr/indexing/supported-network-requirements.mdx @@ -2,17 +2,17 @@ title: Supported Network Requirements --- -| Network | Guides | System Requirements | Indexing Rewards | -| --- | --- | --- | :-: | -| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| हिमस्खलन | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 5 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
Debian 12/Ubuntu 22.04
16 GB RAM
>= 4.5TB (NVME preffered)
_last updated 14th May 2024_ | ✅ | -| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
Ubuntu 22.04
>=32 GB RAM
>= 14 TiB NVMe SSD
_last updated 22nd June 2024_ | ✅ | -| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 2 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| इथरियम | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
Ubuntu 22.04
16GB+ RAM
>=3TB (NVMe recommended)
_last updated August 2023_ | ✅ | -| फॅन्टम | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 13 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 3 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 2nd April 2024_ | ✅ | -| आशावाद | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| बहुभुज | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
Ubuntu 22.04
32GB+ RAM
>= 10 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
Debian 12
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 3rd April 2024_ | ✅ | +| Network | Guides | System Requirements | Indexing Rewards | +| -------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | :--------------: | +| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| हिमस्खलन | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 5 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
Debian 12/Ubuntu 22.04
16 GB RAM
>= 4.5TB (NVME preffered)
_last updated 14th May 2024_ | ✅ | +| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
Ubuntu 22.04
>=32 GB RAM
>= 14 TiB NVMe SSD
_last updated 22nd June 2024_ | ✅ | +| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 2 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| इथरियम | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
Ubuntu 22.04
16GB+ RAM
>=3TB (NVMe recommended)
_last updated August 2023_ | ✅ | +| फॅन्टम | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 13 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 3 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 2nd April 2024_ | ✅ | +| आशावाद | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| बहुभुज | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
Ubuntu 22.04
32GB+ RAM
>= 10 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
Debian 12
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 3rd April 2024_ | ✅ | From 729e4cf5100c7f0594ce45b69c6d0fb10cfab93b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:15:45 -0500 Subject: [PATCH 0425/1789] New translations supported-network-requirements.mdx (Hindi) --- .../supported-network-requirements.mdx | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/website/src/pages/hi/indexing/supported-network-requirements.mdx b/website/src/pages/hi/indexing/supported-network-requirements.mdx index 647eda3e6651..29673e7529a7 100644 --- a/website/src/pages/hi/indexing/supported-network-requirements.mdx +++ b/website/src/pages/hi/indexing/supported-network-requirements.mdx @@ -2,17 +2,17 @@ title: Supported Network Requirements --- -| नेटवर्क | Guides | System Requirements | Indexing Rewards | -| --- | --- | --- | :-: | -| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 5 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
Debian 12/Ubuntu 22.04
16 GB RAM
>= 4.5TB (NVME preffered)
_last updated 14th May 2024_ | ✅ | -| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
Ubuntu 22.04
>=32 GB RAM
>= 14 TiB NVMe SSD
_अंतिम बार अपडेट किया गया 22 जून 2024_ | ✅ | -| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 2 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
Ubuntu 22.04
16GB+ RAM
>=3TB (NVMe recommended)
_last updated August 2023_ | ✅ | -| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 13 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 3 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 2nd April 2024_ | ✅ | -| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
Ubuntu 22.04
32GB+ RAM
>= 10 TiB NVMe SSD
_last updated August 2023_ | ✅ | -| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
Debian 12
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 3rd April 2024_ | ✅ | +| नेटवर्क | Guides | System Requirements | Indexing Rewards | +| --------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | :--------------: | +| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 5 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
Debian 12/Ubuntu 22.04
16 GB RAM
>= 4.5TB (NVME preffered)
_last updated 14th May 2024_ | ✅ | +| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
Ubuntu 22.04
>=32 GB RAM
>= 14 TiB NVMe SSD
_अंतिम बार अपडेट किया गया 22 जून 2024_ | ✅ | +| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 2 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
Ubuntu 22.04
16GB+ RAM
>=3TB (NVMe recommended)
_last updated August 2023_ | ✅ | +| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 13 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 3 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 2nd April 2024_ | ✅ | +| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
Ubuntu 22.04
32GB+ RAM
>= 10 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
Debian 12
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 3rd April 2024_ | ✅ | From e5e870be95248a26ea6448cc109909f27127399b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:15:46 -0500 Subject: [PATCH 0426/1789] New translations supported-network-requirements.mdx (Swahili) --- .../supported-network-requirements.mdx | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 website/src/pages/sw/indexing/supported-network-requirements.mdx diff --git a/website/src/pages/sw/indexing/supported-network-requirements.mdx b/website/src/pages/sw/indexing/supported-network-requirements.mdx new file mode 100644 index 000000000000..afbf755c0a5a --- /dev/null +++ b/website/src/pages/sw/indexing/supported-network-requirements.mdx @@ -0,0 +1,18 @@ +--- +title: Supported Network Requirements +--- + +| Network | Guides | System Requirements | Indexing Rewards | +| --------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | :--------------: | +| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 5 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
Debian 12/Ubuntu 22.04
16 GB RAM
>= 4.5TB (NVME preffered)
_last updated 14th May 2024_ | ✅ | +| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
Ubuntu 22.04
>=32 GB RAM
>= 14 TiB NVMe SSD
_last updated 22nd June 2024_ | ✅ | +| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 2 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
Ubuntu 22.04
16GB+ RAM
>=3TB (NVMe recommended)
_last updated August 2023_ | ✅ | +| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 13 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 3 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
Ubuntu 22.04
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 2nd April 2024_ | ✅ | +| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

[GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
[GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
Ubuntu 22.04
16GB+ RAM
>= 8 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
Ubuntu 22.04
32GB+ RAM
>= 10 TiB NVMe SSD
_last updated August 2023_ | ✅ | +| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
[Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
Debian 12
16GB+ RAM
>= 1 TiB NVMe SSD
_last updated 3rd April 2024_ | ✅ | From d8eb89c3f43004298d4b3a8d226c1bdd677ee401 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:15:58 -0500 Subject: [PATCH 0427/1789] New translations firehose.mdx (Swahili) --- .../pages/sw/indexing/tooling/firehose.mdx | 24 +++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100644 website/src/pages/sw/indexing/tooling/firehose.mdx diff --git a/website/src/pages/sw/indexing/tooling/firehose.mdx b/website/src/pages/sw/indexing/tooling/firehose.mdx new file mode 100644 index 000000000000..0f0fdebbafd0 --- /dev/null +++ b/website/src/pages/sw/indexing/tooling/firehose.mdx @@ -0,0 +1,24 @@ +--- +title: Firehose +--- + +![Firehose Logo](/img/firehose-logo.png) + +Firehose is a new technology developed by StreamingFast working with The Graph Foundation. The product provides **previously unseen capabilities and speeds for indexing blockchain data** using a files-based and streaming-first approach. + +The Graph merges into Go Ethereum/geth with the adoption of [Live Tracer with v1.14.0 release](https://github.com/ethereum/go-ethereum/releases/tag/v1.14.0). + +Firehose extracts, transforms and saves blockchain data in a highly performant file-based strategy. Blockchain developers can then access data extracted by Firehose through binary data streams. Firehose is intended to stand as a replacement for The Graph’s original blockchain data extraction layer. + +## Firehose Documentation + +The Firehose documentation is currently maintained by the StreamingFast team [on the StreamingFast website](https://firehose.streamingfast.io/). + +### Getting Started + +- Read this [Firehose introduction](https://firehose.streamingfast.io/introduction/firehose-overview) to get an overview of what it is and why it was built. +- Learn about the [Prerequisites](https://firehose.streamingfast.io/introduction/prerequisites) to install and deploy Firehose. + +### Expand Your Knowledge + +- Learn about the different [Firehose components](https://firehose.streamingfast.io/architecture/components) available. From 40461dacd042b4443c33d8f15559b21dd21e0f40 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:15:59 -0500 Subject: [PATCH 0428/1789] New translations graphcast.mdx (Romanian) --- website/src/pages/ro/indexing/tooling/graphcast.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/ro/indexing/tooling/graphcast.mdx b/website/src/pages/ro/indexing/tooling/graphcast.mdx index cac63bbd9340..461fe3852377 100644 --- a/website/src/pages/ro/indexing/tooling/graphcast.mdx +++ b/website/src/pages/ro/indexing/tooling/graphcast.mdx @@ -10,10 +10,10 @@ Currently, the cost to broadcast information to other network participants is de The Graphcast SDK (Software Development Kit) allows developers to build Radios, which are gossip-powered applications that Indexers can run to serve a given purpose. We also intend to create a few Radios (or provide support to other developers/teams that wish to build Radios) for the following use cases: -- Real-time cross-checking of subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio/intro)). -- Conducting auctions and coordination for warp syncing subgraphs, substreams, and Firehose data from other Indexers. -- Self-reporting on active query analytics, including subgraph request volumes, fee volumes, etc. -- Self-reporting on indexing analytics, including subgraph indexing time, handler gas costs, indexing errors encountered, etc. +- Real-time cross-checking of Subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio/intro)). +- Conducting auctions and coordination for warp syncing Subgraphs, substreams, and Firehose data from other Indexers. +- Self-reporting on active query analytics, including Subgraph request volumes, fee volumes, etc. +- Self-reporting on indexing analytics, including Subgraph indexing time, handler gas costs, indexing errors encountered, etc. - Self-reporting on stack information including graph-node version, Postgres version, Ethereum client version, etc. ### Află mai multe From 094318a111c31eb917940f9da1a0fa5089c56056 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:16:00 -0500 Subject: [PATCH 0429/1789] New translations graphcast.mdx (French) --- website/src/pages/fr/indexing/tooling/graphcast.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/fr/indexing/tooling/graphcast.mdx b/website/src/pages/fr/indexing/tooling/graphcast.mdx index 5edccfb10588..e24e9904bdd8 100644 --- a/website/src/pages/fr/indexing/tooling/graphcast.mdx +++ b/website/src/pages/fr/indexing/tooling/graphcast.mdx @@ -10,10 +10,10 @@ Actuellement, le coût de diffusion d’informations vers d’autres participant Le SDK Graphcast (Software Development Kit) permet aux développeurs de créer des radios, qui sont des applications basées sur les potins que les indexeurs peuvent exécuter dans un but donné. Nous avons également l'intention de créer quelques radios (ou de fournir une assistance à d'autres développeurs/équipes qui souhaitent créer des radios) pour les cas d'utilisation suivants : -- Real-time cross-checking of subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio/intro)). -- Réalisation d'enchères et coordination pour les subgraphs, les substreams, et les données Firehose de synchronisation de distorsion provenant d'autres indexeurs. -- Auto-rapport sur l'analyse des requêtes actives, y compris les volumes de requêtes de subgraphs, les volumes de frais, etc. -- Auto-rapport sur l'analyse de l'indexation, y compris le temps d'indexation des subgraphs, les coûts des gaz de traitement, les erreurs d'indexation rencontrées, etc. +- Real-time cross-checking of Subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio/intro)). +- Conducting auctions and coordination for warp syncing Subgraphs, substreams, and Firehose data from other Indexers. +- Self-reporting on active query analytics, including Subgraph request volumes, fee volumes, etc. +- Self-reporting on indexing analytics, including Subgraph indexing time, handler gas costs, indexing errors encountered, etc. - Auto-déclaration sur les informations de la pile, y compris la version du graph-node, la version Postgres, la version du client Ethereum, etc. ### En savoir plus From 13dfe387dc0aa936e0e9900724c52add72631ef6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:16:01 -0500 Subject: [PATCH 0430/1789] New translations graphcast.mdx (Spanish) --- website/src/pages/es/indexing/tooling/graphcast.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/es/indexing/tooling/graphcast.mdx b/website/src/pages/es/indexing/tooling/graphcast.mdx index 3da74365af91..3fef530ae421 100644 --- a/website/src/pages/es/indexing/tooling/graphcast.mdx +++ b/website/src/pages/es/indexing/tooling/graphcast.mdx @@ -10,10 +10,10 @@ En la actualidad, el costo de transmitir información a otros participantes de l El Graphcast SDK (Kit de Desarrollo de Software) permite a los desarrolladores construir Radios, que son aplicaciones impulsadas por gossip que los Indexadores pueden utilizar con una finalidad específica. También queremos crear algunas Radios (o dar soporte a otros desarrolladores/equipos que deseen construir Radios) para los siguientes casos de uso: -- Real-time cross-checking of subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio/intro)). -- Llevar a cabo subastas y coordinar warp-syncing de datos de subgrafos, substreams y Firehose de otros Indexadores. -- Autoinforme sobre análisis de consultas activas, incluidos volúmenes de consultas de subgrafos, volúmenes de tarifas, etc. -- Generar informes propios sobre análisis del proceso de indexación, que incluyan período de indexación de subgrafos, costos de gas handler, indexación de errores encontrados, etc. +- Real-time cross-checking of Subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio/intro)). +- Conducting auctions and coordination for warp syncing Subgraphs, substreams, and Firehose data from other Indexers. +- Self-reporting on active query analytics, including Subgraph request volumes, fee volumes, etc. +- Self-reporting on indexing analytics, including Subgraph indexing time, handler gas costs, indexing errors encountered, etc. - Generar informes propios sobre información de stack que incluyan versión del graph-node, la versión de Postgres, la versión del cliente de Ethereum, etc. ### Aprende más From 7942344ac586362849dcd3701b9661873c62ca15 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:16:02 -0500 Subject: [PATCH 0431/1789] New translations graphcast.mdx (Arabic) --- website/src/pages/ar/indexing/tooling/graphcast.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/ar/indexing/tooling/graphcast.mdx b/website/src/pages/ar/indexing/tooling/graphcast.mdx index 8fc00976ec28..d084edcd7067 100644 --- a/website/src/pages/ar/indexing/tooling/graphcast.mdx +++ b/website/src/pages/ar/indexing/tooling/graphcast.mdx @@ -10,10 +10,10 @@ Currently, the cost to broadcast information to other network participants is de The Graphcast SDK (Software Development Kit) allows developers to build Radios, which are gossip-powered applications that Indexers can run to serve a given purpose. We also intend to create a few Radios (or provide support to other developers/teams that wish to build Radios) for the following use cases: -- Real-time cross-checking of subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio/intro)). -- Conducting auctions and coordination for warp syncing subgraphs, substreams, and Firehose data from other Indexers. -- Self-reporting on active query analytics, including subgraph request volumes, fee volumes, etc. -- Self-reporting on indexing analytics, including subgraph indexing time, handler gas costs, indexing errors encountered, etc. +- Real-time cross-checking of Subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio/intro)). +- Conducting auctions and coordination for warp syncing Subgraphs, substreams, and Firehose data from other Indexers. +- Self-reporting on active query analytics, including Subgraph request volumes, fee volumes, etc. +- Self-reporting on indexing analytics, including Subgraph indexing time, handler gas costs, indexing errors encountered, etc. - Self-reporting on stack information including graph-node version, Postgres version, Ethereum client version, etc. ### Learn More From 564e49b1ec68dff4e8701c9429856c466568e907 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:16:03 -0500 Subject: [PATCH 0432/1789] New translations graphcast.mdx (Czech) --- website/src/pages/cs/indexing/tooling/graphcast.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/cs/indexing/tooling/graphcast.mdx b/website/src/pages/cs/indexing/tooling/graphcast.mdx index aec7d84070c3..5aa86adcc8da 100644 --- a/website/src/pages/cs/indexing/tooling/graphcast.mdx +++ b/website/src/pages/cs/indexing/tooling/graphcast.mdx @@ -10,10 +10,10 @@ V současné době jsou náklady na vysílání informací ostatním účastník Graphcast SDK (Vývoj softwaru Kit) umožňuje vývojářům vytvářet rádia, což jsou aplikace napájené drby, které mohou indexery spouštět k danému účelu. Máme také v úmyslu vytvořit několik Radios (nebo poskytnout podporu jiným vývojářům/týmům, které chtějí Radios vytvořit) pro následující případy použití: -- Křížová kontrola integrity dat subgrafu v reálném čase ([Podgraf Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio/intro)). -- Provádění aukcí a koordinace pro warp synchronizaci podgrafů, substreamů a dat Firehose z jiných Indexerů. -- Vlastní hlášení o analýze aktivních dotazů, včetně objemů požadavků na dílčí grafy, objemů poplatků atd. -- Vlastní hlášení o analýze indexování, včetně času indexování podgrafů, nákladů na plyn obsluhy, zjištěných chyb indexování atd. +- Real-time cross-checking of Subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio/intro)). +- Conducting auctions and coordination for warp syncing Subgraphs, substreams, and Firehose data from other Indexers. +- Self-reporting on active query analytics, including Subgraph request volumes, fee volumes, etc. +- Self-reporting on indexing analytics, including Subgraph indexing time, handler gas costs, indexing errors encountered, etc. - Vlastní hlášení informací o zásobníku včetně verze grafového uzlu, verze Postgres, verze klienta Ethereum atd. ### Dozvědět se více From 26f7f7d229db1026dd8f2cc96335e620af82aa96 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:16:04 -0500 Subject: [PATCH 0433/1789] New translations graphcast.mdx (German) --- website/src/pages/de/indexing/tooling/graphcast.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/de/indexing/tooling/graphcast.mdx b/website/src/pages/de/indexing/tooling/graphcast.mdx index ed3f8db0e9d8..31b688913790 100644 --- a/website/src/pages/de/indexing/tooling/graphcast.mdx +++ b/website/src/pages/de/indexing/tooling/graphcast.mdx @@ -10,10 +10,10 @@ Derzeit werden die Kosten für die Übertragung von Informationen an andere Netz Das Graphcast SDK (Software Development Kit) ermöglicht es Entwicklern, Radios zu erstellen, d.h. klatschgesteuerte Anwendungen, die Indexer ausführen können, um einen bestimmten Zweck zu erfüllen. Wir beabsichtigen auch, einige Radios für die folgenden Anwendungsfälle zu erstellen (oder anderen Entwicklern/Teams, die Radios erstellen möchten, Unterstützung zu bieten): -- Echtzeit-Überprüfung der Integrität von Subgraph-Daten ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio/intro)). -- Durchführung von Auktionen und Koordination für die Warp-Synchronisierung von Subgraphen, Substreams und Firehose-Daten von anderen Indexern. -- Selbstauskunft über die Analyse aktiver Abfragen, einschließlich Subgraph-Anfragevolumen, Gebührenvolumen usw. -- Selbstauskunft über die Indizierungsanalyse, einschließlich der Zeit für die Indizierung von Subgraphen, Gaskosten für die Bearbeitung, aufgetretene Indexierungsfehler usw. +- Real-time cross-checking of Subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio/intro)). +- Conducting auctions and coordination for warp syncing Subgraphs, substreams, and Firehose data from other Indexers. +- Self-reporting on active query analytics, including Subgraph request volumes, fee volumes, etc. +- Self-reporting on indexing analytics, including Subgraph indexing time, handler gas costs, indexing errors encountered, etc. - Selbstauskunft über Stack-Informationen, einschließlich Graph-Node-Version, Postgres-Version, Ethereum-Client-Version, usw. ### Weitere Informationen From 8a0231f625c8c12de3ef0d14408724d1a7a45f1a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:16:05 -0500 Subject: [PATCH 0434/1789] New translations graphcast.mdx (Italian) --- website/src/pages/it/indexing/tooling/graphcast.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/it/indexing/tooling/graphcast.mdx b/website/src/pages/it/indexing/tooling/graphcast.mdx index 6d0cd00b7784..366d38044fd6 100644 --- a/website/src/pages/it/indexing/tooling/graphcast.mdx +++ b/website/src/pages/it/indexing/tooling/graphcast.mdx @@ -10,10 +10,10 @@ Attualmente, il costo per trasmettere informazioni ad altri partecipanti alla re L'SDK (Software Development Kit) di Graphcast consente agli sviluppatori di creare radio, che sono applicazioni alimentate da gossip che gli indexer possono eseguire per servire un determinato scopo. Intendiamo inoltre creare alcune radio (o fornire supporto ad altri sviluppatori/team che desiderano creare radio) per i seguenti casi d'uso: -- Real-time cross-checking of subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio/intro)). -- Conduzione di aste e coordinamento per la sincronizzazione warp di subgraph, substream e dati Firehose da altri indexer. -- Autodichiarazione sulle analisi delle query attive, compresi i volumi delle richieste di subgraph, i volumi delle commissioni, ecc. -- Autodichiarazione sull'analisi dell'indicizzazione, compresi i tempi di indicizzazione dei subgraph, i costi del gas per i gestori, gli errori di indicizzazione riscontrati, ecc. +- Real-time cross-checking of Subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio/intro)). +- Conducting auctions and coordination for warp syncing Subgraphs, substreams, and Firehose data from other Indexers. +- Self-reporting on active query analytics, including Subgraph request volumes, fee volumes, etc. +- Self-reporting on indexing analytics, including Subgraph indexing time, handler gas costs, indexing errors encountered, etc. - Autodichiarazione delle informazioni sullo stack, tra cui la versione del graph-node, la versione di Postgres, la versione del client Ethereum, ecc. ### Scopri di più From eeea252804a73a6788dbc07e62de03e3615f1a35 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:16:06 -0500 Subject: [PATCH 0435/1789] New translations graphcast.mdx (Japanese) --- website/src/pages/ja/indexing/tooling/graphcast.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/ja/indexing/tooling/graphcast.mdx b/website/src/pages/ja/indexing/tooling/graphcast.mdx index b9d89010f922..0a1fe3e92964 100644 --- a/website/src/pages/ja/indexing/tooling/graphcast.mdx +++ b/website/src/pages/ja/indexing/tooling/graphcast.mdx @@ -10,10 +10,10 @@ title: グラフキャスト Graphcast SDK (ソフトウェア開発キット) を使用すると、開発者はラジオを構築できます。これは、インデクサーが特定の目的を果たすために実行できる、ゴシップを利用したアプリケーションです。また、次のユースケースのために、いくつかのラジオを作成する (または、ラジオを作成したい他の開発者/チームにサポートを提供する) 予定です: -- Real-time cross-checking of subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio/intro)). -- サブグラフ、サブストリーム、および他のインデクサーからの Firehose データをワープ同期するためのオークションと調整の実施。 -- サブグラフのリクエスト量、料金の量などを含む、アクティブなクエリ分析に関する自己報告。 -- サブグラフのインデックス作成時間、ハンドラー ガスのコスト、発生したインデックス作成エラーなどを含む、インデックス作成分析に関する自己報告。 +- Real-time cross-checking of Subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio/intro)). +- Conducting auctions and coordination for warp syncing Subgraphs, substreams, and Firehose data from other Indexers. +- Self-reporting on active query analytics, including Subgraph request volumes, fee volumes, etc. +- Self-reporting on indexing analytics, including Subgraph indexing time, handler gas costs, indexing errors encountered, etc. - グラフノードのバージョン、Postgres のバージョン、Ethereum クライアントのバージョンなどを含むスタック情報の自己報告。 ### もっと詳しく知る From 0076d6dae8e27ff2f97b4bece3fdd244b30b4d08 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:16:07 -0500 Subject: [PATCH 0436/1789] New translations graphcast.mdx (Korean) --- website/src/pages/ko/indexing/tooling/graphcast.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/ko/indexing/tooling/graphcast.mdx b/website/src/pages/ko/indexing/tooling/graphcast.mdx index 4072877a1257..d1795e9be577 100644 --- a/website/src/pages/ko/indexing/tooling/graphcast.mdx +++ b/website/src/pages/ko/indexing/tooling/graphcast.mdx @@ -10,10 +10,10 @@ Currently, the cost to broadcast information to other network participants is de The Graphcast SDK (Software Development Kit) allows developers to build Radios, which are gossip-powered applications that Indexers can run to serve a given purpose. We also intend to create a few Radios (or provide support to other developers/teams that wish to build Radios) for the following use cases: -- Real-time cross-checking of subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio/intro)). -- Conducting auctions and coordination for warp syncing subgraphs, substreams, and Firehose data from other Indexers. -- Self-reporting on active query analytics, including subgraph request volumes, fee volumes, etc. -- Self-reporting on indexing analytics, including subgraph indexing time, handler gas costs, indexing errors encountered, etc. +- Real-time cross-checking of Subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio/intro)). +- Conducting auctions and coordination for warp syncing Subgraphs, substreams, and Firehose data from other Indexers. +- Self-reporting on active query analytics, including Subgraph request volumes, fee volumes, etc. +- Self-reporting on indexing analytics, including Subgraph indexing time, handler gas costs, indexing errors encountered, etc. - Self-reporting on stack information including graph-node version, Postgres version, Ethereum client version, etc. ### Learn More From 43e83054171d8fa5d359429c532ebe2dfb0c401b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:16:07 -0500 Subject: [PATCH 0437/1789] New translations graphcast.mdx (Dutch) --- website/src/pages/nl/indexing/tooling/graphcast.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/nl/indexing/tooling/graphcast.mdx b/website/src/pages/nl/indexing/tooling/graphcast.mdx index cbc12c17f95b..9a712c6dd64a 100644 --- a/website/src/pages/nl/indexing/tooling/graphcast.mdx +++ b/website/src/pages/nl/indexing/tooling/graphcast.mdx @@ -10,10 +10,10 @@ Currently, the cost to broadcast information to other network participants is de The Graphcast SDK (Software Development Kit) allows developers to build Radios, which are gossip-powered applications that Indexers can run to serve a given purpose. We also intend to create a few Radios (or provide support to other developers/teams that wish to build Radios) for the following use cases: -- Real-time cross-checking of subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio/intro)). -- Conducting auctions and coordination for warp syncing subgraphs, substreams, and Firehose data from other Indexers. -- Self-reporting on active query analytics, including subgraph request volumes, fee volumes, etc. -- Self-reporting on indexing analytics, including subgraph indexing time, handler gas costs, indexing errors encountered, etc. +- Real-time cross-checking of Subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio/intro)). +- Conducting auctions and coordination for warp syncing Subgraphs, substreams, and Firehose data from other Indexers. +- Self-reporting on active query analytics, including Subgraph request volumes, fee volumes, etc. +- Self-reporting on indexing analytics, including Subgraph indexing time, handler gas costs, indexing errors encountered, etc. - Self-reporting on stack information including graph-node version, Postgres version, Ethereum client version, etc. ### Leer Meer From 4cf689da078dea52d1078af2a78b735577e1adf7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:16:08 -0500 Subject: [PATCH 0438/1789] New translations graphcast.mdx (Polish) --- website/src/pages/pl/indexing/tooling/graphcast.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/pl/indexing/tooling/graphcast.mdx b/website/src/pages/pl/indexing/tooling/graphcast.mdx index 18639dc9acc8..a790c5800c7e 100644 --- a/website/src/pages/pl/indexing/tooling/graphcast.mdx +++ b/website/src/pages/pl/indexing/tooling/graphcast.mdx @@ -10,10 +10,10 @@ Obecnie koszt przekazywania informacji innym uczestnikom sieci jest uzależniony SDK Graphcast (Software Development Kit) umożliwia programistom budowanie "Radios", czyli aplikacji opartych na przekazywaniu plotek, które indekserzy mogą uruchamiać w celu spełnienia określonego zadania. Planujemy również stworzyć kilka takich aplikacji Radios (lub udzielać wsparcia innym programistom/zespołom, które chcą w ich budowaniu uczestniczyć) dla następujących przypadków użycia: -- Real-time cross-checking of subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio/intro)). -- Przeprowadzanie aukcji i koordynacja synchronizacji warp subgrafów, substreamów oraz danych Firehose od innych indekserów. -- Raportowanie na temat aktywnej analizy zapytań, w tym wolumenów zapytań do subgrafów, wolumenów opłat itp. -- Raportowanie na temat analizy indeksowania, w tym czasu indeksowania subgrafów, kosztów gazu dla osób obsługujących zapytanie, napotkanych błędów indeksowania itp. +- Real-time cross-checking of Subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio/intro)). +- Conducting auctions and coordination for warp syncing Subgraphs, substreams, and Firehose data from other Indexers. +- Self-reporting on active query analytics, including Subgraph request volumes, fee volumes, etc. +- Self-reporting on indexing analytics, including Subgraph indexing time, handler gas costs, indexing errors encountered, etc. - Raportowanie informacji na temat stosu, w tym wersji graph-node, wersji Postgres oraz wersji klienta Ethereum itp. ### Dowiedz się więcej From 6b3d24ee0f95ad95683097646bebfb67b82c6621 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:16:09 -0500 Subject: [PATCH 0439/1789] New translations graphcast.mdx (Portuguese) --- website/src/pages/pt/indexing/tooling/graphcast.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/pt/indexing/tooling/graphcast.mdx b/website/src/pages/pt/indexing/tooling/graphcast.mdx index e57b6b206900..a6978669ba46 100644 --- a/website/src/pages/pt/indexing/tooling/graphcast.mdx +++ b/website/src/pages/pt/indexing/tooling/graphcast.mdx @@ -10,10 +10,10 @@ Atualmente, o custo de transmitir informações para outros participantes de red O SDK (Kit de Programação de Software) do Graphcast permite aos programadores construir Rádios, que são aplicativos movidos a mexericos, que os Indexers podem executar por um certo propósito. Nós também pretendemos criar alguns Rádios (ou oferecer apoio para outros programadores/outras equipas que desejam construir Rádios) para os seguintes casos de uso: -- Verificação em tempo real de integridade dos dados de um subgraph ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio/intro)). -- Condução de leilões e coordenação para a sincronização de subgraphs, substreams e dados do Firehose de outros Indexers. -- Autorrelatos em analíticas ativas de queries, inclusive volumes de pedidos de subgraphs, volumes de taxas, etc. -- Autorrelatos em analíticas de indexação, como tempo de indexação de subgraphs, custos de gas de handlers, erros encontrados, etc. +- Real-time cross-checking of Subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio/intro)). +- Conducting auctions and coordination for warp syncing Subgraphs, substreams, and Firehose data from other Indexers. +- Self-reporting on active query analytics, including Subgraph request volumes, fee volumes, etc. +- Self-reporting on indexing analytics, including Subgraph indexing time, handler gas costs, indexing errors encountered, etc. - Autorrelatos em informações de stack, incluindo versão do graph-node, versão do Postgres, versão do cliente Ethereum, etc. ### Aprenda Mais From d1242d2176d541b365190fa7c02eabb882f074c5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:16:10 -0500 Subject: [PATCH 0440/1789] New translations graphcast.mdx (Russian) --- website/src/pages/ru/indexing/tooling/graphcast.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/ru/indexing/tooling/graphcast.mdx b/website/src/pages/ru/indexing/tooling/graphcast.mdx index a3c391cf3e4f..aca6be2fb5d1 100644 --- a/website/src/pages/ru/indexing/tooling/graphcast.mdx +++ b/website/src/pages/ru/indexing/tooling/graphcast.mdx @@ -10,10 +10,10 @@ Currently, the cost to broadcast information to other network participants is de The Graphcast SDK (Software Development Kit) allows developers to build Radios, which are gossip-powered applications that Indexers can run to serve a given purpose. We also intend to create a few Radios (or provide support to other developers/teams that wish to build Radios) for the following use cases: -- Перекрестная проверка целостности данных субграфа в режиме реального времени (Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio/intro)). -- Conducting auctions and coordination for warp syncing subgraphs, substreams, and Firehose data from other Indexers. -- Self-reporting on active query analytics, including subgraph request volumes, fee volumes, etc. -- Self-reporting on indexing analytics, including subgraph indexing time, handler gas costs, indexing errors encountered, etc. +- Real-time cross-checking of Subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio/intro)). +- Conducting auctions and coordination for warp syncing Subgraphs, substreams, and Firehose data from other Indexers. +- Self-reporting on active query analytics, including Subgraph request volumes, fee volumes, etc. +- Self-reporting on indexing analytics, including Subgraph indexing time, handler gas costs, indexing errors encountered, etc. - Self-reporting on stack information including graph-node version, Postgres version, Ethereum client version, etc. ### Узнать больше From 5a28697c7fa489602a93d9c23ca2e52399503369 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:16:11 -0500 Subject: [PATCH 0441/1789] New translations graphcast.mdx (Swedish) --- website/src/pages/sv/indexing/tooling/graphcast.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/sv/indexing/tooling/graphcast.mdx b/website/src/pages/sv/indexing/tooling/graphcast.mdx index 213029e1836b..56b93af13fc2 100644 --- a/website/src/pages/sv/indexing/tooling/graphcast.mdx +++ b/website/src/pages/sv/indexing/tooling/graphcast.mdx @@ -10,10 +10,10 @@ För närvarande avgörs kostnaden för att sända information till andra nätve Graphcast SDK (Utrustning för programvaruutveckling) gör det möjligt för utvecklare att bygga Radios, vilka är applikationer som drivs av gossipeffekt och som indexare kan köra för att tjäna ett visst syfte. Vi avser också att skapa några Radios (eller ge stöd åt andra utvecklare/team som önskar bygga Radios) för följande användningsområden: -- Real-time cross-checking of subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio/intro)). -- Genomföra auktioner och koordinering för warp-synkronisering av delgrafer, delströmmar och Firehose-data från andra indexare. -- Självrapportering om aktiv frågeanalys, inklusive delgrafförfrågningsvolym, avgiftsvolym etc. -- Självrapportering om indexeringanalys, inklusive tid för delgrafindexering, gasavgifter för handler, påträffade indexeringsfel etc. +- Real-time cross-checking of Subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio/intro)). +- Conducting auctions and coordination for warp syncing Subgraphs, substreams, and Firehose data from other Indexers. +- Self-reporting on active query analytics, including Subgraph request volumes, fee volumes, etc. +- Self-reporting on indexing analytics, including Subgraph indexing time, handler gas costs, indexing errors encountered, etc. - Självrapportering om stackinformation inklusive graph-node-version, Postgres-version, Ethereum-klientversion etc. ### Läs mer From b27d158ae4627a37975cf10ae4785942c181eae4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:16:12 -0500 Subject: [PATCH 0442/1789] New translations graphcast.mdx (Turkish) --- website/src/pages/tr/indexing/tooling/graphcast.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/tr/indexing/tooling/graphcast.mdx b/website/src/pages/tr/indexing/tooling/graphcast.mdx index d0bce650e2ae..910a7911a774 100644 --- a/website/src/pages/tr/indexing/tooling/graphcast.mdx +++ b/website/src/pages/tr/indexing/tooling/graphcast.mdx @@ -10,10 +10,10 @@ Currently, the cost to broadcast information to other network participants is de The Graphcast SDK (Software Development Kit) allows developers to build Radios, which are gossip-powered applications that Indexers can run to serve a given purpose. We also intend to create a few Radios (or provide support to other developers/teams that wish to build Radios) for the following use cases: -- Real-time cross-checking of subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio/intro)). -- Conducting auctions and coordination for warp syncing subgraphs, substreams, and Firehose data from other Indexers. -- Self-reporting on active query analytics, including subgraph request volumes, fee volumes, etc. -- Self-reporting on indexing analytics, including subgraph indexing time, handler gas costs, indexing errors encountered, etc. +- Real-time cross-checking of Subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio/intro)). +- Conducting auctions and coordination for warp syncing Subgraphs, substreams, and Firehose data from other Indexers. +- Self-reporting on active query analytics, including Subgraph request volumes, fee volumes, etc. +- Self-reporting on indexing analytics, including Subgraph indexing time, handler gas costs, indexing errors encountered, etc. - Self-reporting on stack information including graph-node version, Postgres version, Ethereum client version, etc. ### Daha Fazla Bilgi Edin From 92d218b9f838bb60ed36f67cbe4ab86e351603ca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:16:13 -0500 Subject: [PATCH 0443/1789] New translations graphcast.mdx (Ukrainian) --- website/src/pages/uk/indexing/tooling/graphcast.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/uk/indexing/tooling/graphcast.mdx b/website/src/pages/uk/indexing/tooling/graphcast.mdx index 4072877a1257..d1795e9be577 100644 --- a/website/src/pages/uk/indexing/tooling/graphcast.mdx +++ b/website/src/pages/uk/indexing/tooling/graphcast.mdx @@ -10,10 +10,10 @@ Currently, the cost to broadcast information to other network participants is de The Graphcast SDK (Software Development Kit) allows developers to build Radios, which are gossip-powered applications that Indexers can run to serve a given purpose. We also intend to create a few Radios (or provide support to other developers/teams that wish to build Radios) for the following use cases: -- Real-time cross-checking of subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio/intro)). -- Conducting auctions and coordination for warp syncing subgraphs, substreams, and Firehose data from other Indexers. -- Self-reporting on active query analytics, including subgraph request volumes, fee volumes, etc. -- Self-reporting on indexing analytics, including subgraph indexing time, handler gas costs, indexing errors encountered, etc. +- Real-time cross-checking of Subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio/intro)). +- Conducting auctions and coordination for warp syncing Subgraphs, substreams, and Firehose data from other Indexers. +- Self-reporting on active query analytics, including Subgraph request volumes, fee volumes, etc. +- Self-reporting on indexing analytics, including Subgraph indexing time, handler gas costs, indexing errors encountered, etc. - Self-reporting on stack information including graph-node version, Postgres version, Ethereum client version, etc. ### Learn More From 60fdbf022ad1fbd26c9633f322386e1ae8e999d6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:16:14 -0500 Subject: [PATCH 0444/1789] New translations graphcast.mdx (Chinese Simplified) --- website/src/pages/zh/indexing/tooling/graphcast.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/zh/indexing/tooling/graphcast.mdx b/website/src/pages/zh/indexing/tooling/graphcast.mdx index 6e29da450727..1196099ae046 100644 --- a/website/src/pages/zh/indexing/tooling/graphcast.mdx +++ b/website/src/pages/zh/indexing/tooling/graphcast.mdx @@ -10,10 +10,10 @@ title: Graphcast Graphcast SDK(软件开发工具包)允许开发人员构建Radio,这是一种使用gossip协议的应用程序,索引人可以运行这些应用程序来服务于特定的目的。我们还打算为以下用例创建一些Radio(或为希望构建Radio的其他开发人员/团队提供支持): -- Real-time cross-checking of subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio/intro)). -- \-对来自其他索引人的warp同步中的子图、子流和Firehose数据进行拍卖和协调。 -- \-主动查询分析的自我报告,包括子图请求量、费用量等。 -- \-索引分析的自我报告,包括子图索引时间、处理程序gas成本、遇到的索引错误等。 +- Real-time cross-checking of Subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio/intro)). +- Conducting auctions and coordination for warp syncing Subgraphs, substreams, and Firehose data from other Indexers. +- Self-reporting on active query analytics, including Subgraph request volumes, fee volumes, etc. +- Self-reporting on indexing analytics, including Subgraph indexing time, handler gas costs, indexing errors encountered, etc. - \-自报栈信息,包括graph节点版本、Postgres版本、以太坊客户端版本等。 ### 了解更多 From 602cfd72b769e64029927c14870445a7c24eedbb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:16:15 -0500 Subject: [PATCH 0445/1789] New translations graphcast.mdx (Urdu (Pakistan)) --- website/src/pages/ur/indexing/tooling/graphcast.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/ur/indexing/tooling/graphcast.mdx b/website/src/pages/ur/indexing/tooling/graphcast.mdx index 4a7b5e2c4cfd..280366873bb3 100644 --- a/website/src/pages/ur/indexing/tooling/graphcast.mdx +++ b/website/src/pages/ur/indexing/tooling/graphcast.mdx @@ -10,10 +10,10 @@ title: گراف کاسٹ گراف کاسٹ SDK (سافٹ ویئر ڈویلپمنٹ کٹ) ڈویلپرز کو ریڈیو بنانے کی اجازت دیتا ہے، جو گپ شپ سے چلنے والی ایپلیکیشنز ہیں جنہیں انڈیکسرز ایک مقررہ مقصد کی تکمیل کے لیے چلا سکتے ہیں۔ ہم مندرجہ ذیل استعمال کے معاملات کے لیے چند ریڈیوز بنانے کا ارادہ رکھتے ہیں (یا دیگر ڈویلپرز/ٹیموں کو مدد فراہم کرتے ہیں جو ریڈیو بنانا چاہتے ہیں): -- Real-time cross-checking of subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio/intro)). -- دوسرے انڈیکسرز سے وارپ سنکنگ سب گرافس، سب اسٹریمز، اور فائر ہوز ڈیٹا کے لیے نیلامی اور کوآرڈینیشن کا انعقاد. -- فعال کیوری کے تجزیات پر خود رپورٹنگ، بشمول سب گراف کی درخواست والیوم، فیس والیوم وغیرہ. -- انڈیکسنگ کے تجزیات پر خود رپورٹنگ، بشمول سب گراف انڈیکسنگ کا وقت، ہینڈلر گیس کے اخراجات، انڈیکسنگ کی غلطیوں کا سامنا کرنا وغیرہ. +- Real-time cross-checking of Subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio/intro)). +- Conducting auctions and coordination for warp syncing Subgraphs, substreams, and Firehose data from other Indexers. +- Self-reporting on active query analytics, including Subgraph request volumes, fee volumes, etc. +- Self-reporting on indexing analytics, including Subgraph indexing time, handler gas costs, indexing errors encountered, etc. - اسٹیک کی معلومات پر خود رپورٹنگ بشمول گراف نوڈ ورژن، Postgres ورژن، ایتھیریم کلائنٹ ورژن، وغیرہ. ### مزید جانیے From 2027da65c74e70b78f0606f941aaee271a204b52 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:16:16 -0500 Subject: [PATCH 0446/1789] New translations graphcast.mdx (Vietnamese) --- website/src/pages/vi/indexing/tooling/graphcast.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/vi/indexing/tooling/graphcast.mdx b/website/src/pages/vi/indexing/tooling/graphcast.mdx index 2c523a014098..2b541a818654 100644 --- a/website/src/pages/vi/indexing/tooling/graphcast.mdx +++ b/website/src/pages/vi/indexing/tooling/graphcast.mdx @@ -10,10 +10,10 @@ Currently, the cost to broadcast information to other network participants is de The Graphcast SDK (Software Development Kit) allows developers to build Radios, which are gossip-powered applications that Indexers can run to serve a given purpose. We also intend to create a few Radios (or provide support to other developers/teams that wish to build Radios) for the following use cases: -- Real-time cross-checking of subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio/intro)). -- Conducting auctions and coordination for warp syncing subgraphs, substreams, and Firehose data from other Indexers. -- Self-reporting on active query analytics, including subgraph request volumes, fee volumes, etc. -- Self-reporting on indexing analytics, including subgraph indexing time, handler gas costs, indexing errors encountered, etc. +- Real-time cross-checking of Subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio/intro)). +- Conducting auctions and coordination for warp syncing Subgraphs, substreams, and Firehose data from other Indexers. +- Self-reporting on active query analytics, including Subgraph request volumes, fee volumes, etc. +- Self-reporting on indexing analytics, including Subgraph indexing time, handler gas costs, indexing errors encountered, etc. - Self-reporting on stack information including graph-node version, Postgres version, Ethereum client version, etc. ### Learn More From c6d463a6d40bd351fcf759997463879274f35452 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:16:17 -0500 Subject: [PATCH 0447/1789] New translations graphcast.mdx (Marathi) --- website/src/pages/mr/indexing/tooling/graphcast.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/mr/indexing/tooling/graphcast.mdx b/website/src/pages/mr/indexing/tooling/graphcast.mdx index 46e7c77e864d..966849766b7a 100644 --- a/website/src/pages/mr/indexing/tooling/graphcast.mdx +++ b/website/src/pages/mr/indexing/tooling/graphcast.mdx @@ -10,10 +10,10 @@ Is there something you'd like to learn from or share with your fellow Indexers i ग्राफकास्ट SDK (सॉफ्टवेअर डेव्हलपमेंट किट) विकसकांना रेडिओ तयार करण्यास अनुमती देते, जे गॉसिप-शक्तीवर चालणारे अनुप्रयोग आहेत जे निर्देशांक दिलेल्या उद्देशासाठी चालवू शकतात. खालील वापराच्या प्रकरणांसाठी काही रेडिओ तयार करण्याचा आमचा मानस आहे (किंवा रेडिओ तयार करू इच्छिणाऱ्या इतर विकासकांना/संघांना समर्थन पुरवणे): -- Real-time cross-checking of subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio/intro)). -- Conducting auctions and coordination for warp syncing subgraphs, substreams, and Firehose data from other Indexers. -- Self-reporting on active query analytics, including subgraph request volumes, fee volumes, etc. -- Self-reporting on indexing analytics, including subgraph indexing time, handler gas costs, indexing errors encountered, etc. +- Real-time cross-checking of Subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio/intro)). +- Conducting auctions and coordination for warp syncing Subgraphs, substreams, and Firehose data from other Indexers. +- Self-reporting on active query analytics, including Subgraph request volumes, fee volumes, etc. +- Self-reporting on indexing analytics, including Subgraph indexing time, handler gas costs, indexing errors encountered, etc. - Self-reporting on stack information including graph-node version, Postgres version, Ethereum client version, etc. ### अधिक जाणून घ्या From a75666ff663f675fa4a5b2e9d56d23e0b411067d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:16:18 -0500 Subject: [PATCH 0448/1789] New translations graphcast.mdx (Hindi) --- website/src/pages/hi/indexing/tooling/graphcast.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/hi/indexing/tooling/graphcast.mdx b/website/src/pages/hi/indexing/tooling/graphcast.mdx index 216fc0a502c5..f4978a7b800d 100644 --- a/website/src/pages/hi/indexing/tooling/graphcast.mdx +++ b/website/src/pages/hi/indexing/tooling/graphcast.mdx @@ -10,10 +10,10 @@ title: Graphcast ग्राफकास्ट एसडीके (सॉफ्टवेयर डेवलपमेंट किट) डेवलपर्स को रेडियो बनाने की अनुमति देता है, जो गपशप-संचालित अनुप्रयोग हैं जो इंडेक्सर्स किसी दिए गए उद्देश्य को पूरा करने के लिए चला सकते हैं। हम निम्नलिखित उपयोग के मामलों के लिए कुछ रेडियो बनाने का भी इरादा रखते हैं (या अन्य डेवलपर्स/टीमों को सहायता प्रदान करते हैं जो रेडियो बनाना चाहते हैं): -- Real-time cross-checking of subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio/intro)). -- अन्य इंडेक्सर्स से ताना सिंकिंग सबग्राफ, सबस्ट्रीम और फायरहोज डेटा के लिए नीलामी और समन्वय आयोजित करना। -- सक्रिय क्वेरी एनालिटिक्स पर स्व-रिपोर्टिंग, जिसमें सबग्राफ अनुरोध मात्रा, शुल्क मात्रा आदि शामिल हैं। -- इंडेक्सिंग एनालिटिक्स पर सेल्फ-रिपोर्टिंग, जिसमें सबग्राफ इंडेक्सिंग टाइम, हैंडलर गैस कॉस्ट, इंडेक्सिंग एरर, आदि शामिल हैं। +- Real-time cross-checking of Subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio/intro)). +- Conducting auctions and coordination for warp syncing Subgraphs, substreams, and Firehose data from other Indexers. +- Self-reporting on active query analytics, including Subgraph request volumes, fee volumes, etc. +- Self-reporting on indexing analytics, including Subgraph indexing time, handler gas costs, indexing errors encountered, etc. - ग्राफ-नोड संस्करण, पोस्टग्रेज संस्करण, एथेरियम क्लाइंट संस्करण, आदि सहित स्टैक जानकारी पर स्व-रिपोर्टिंग। ### और अधिक जानें From 6cc8f9000df993427ef74afecf0d56ad2090d1fc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:16:19 -0500 Subject: [PATCH 0449/1789] New translations graphcast.mdx (Swahili) --- .../pages/sw/indexing/tooling/graphcast.mdx | 21 +++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 website/src/pages/sw/indexing/tooling/graphcast.mdx diff --git a/website/src/pages/sw/indexing/tooling/graphcast.mdx b/website/src/pages/sw/indexing/tooling/graphcast.mdx new file mode 100644 index 000000000000..d1795e9be577 --- /dev/null +++ b/website/src/pages/sw/indexing/tooling/graphcast.mdx @@ -0,0 +1,21 @@ +--- +title: Graphcast +--- + +## Introduction + +Is there something you'd like to learn from or share with your fellow Indexers in an automated manner, but it's too much hassle or costs too much gas? + +Currently, the cost to broadcast information to other network participants is determined by gas fees on the Ethereum blockchain. Graphcast solves this problem by acting as an optional decentralized, distributed peer-to-peer (P2P) communication tool that allows Indexers across the network to exchange information in real time. The cost of exchanging P2P messages is near zero, with the tradeoff of no data integrity guarantees. Nevertheless, Graphcast aims to provide message validity guarantees (i.e. that the message is valid and signed by a known protocol participant) with an open design space of reputation models. + +The Graphcast SDK (Software Development Kit) allows developers to build Radios, which are gossip-powered applications that Indexers can run to serve a given purpose. We also intend to create a few Radios (or provide support to other developers/teams that wish to build Radios) for the following use cases: + +- Real-time cross-checking of Subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio/intro)). +- Conducting auctions and coordination for warp syncing Subgraphs, substreams, and Firehose data from other Indexers. +- Self-reporting on active query analytics, including Subgraph request volumes, fee volumes, etc. +- Self-reporting on indexing analytics, including Subgraph indexing time, handler gas costs, indexing errors encountered, etc. +- Self-reporting on stack information including graph-node version, Postgres version, Ethereum client version, etc. + +### Learn More + +If you would like to learn more about Graphcast, [check out the documentation here.](https://docs.graphops.xyz/graphcast/intro) From d35fcc4c919b4fe34a7aba43984789521269e44c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:16:20 -0500 Subject: [PATCH 0450/1789] New translations benefits.mdx (Romanian) --- website/src/pages/ro/resources/benefits.mdx | 76 ++++++++++----------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/website/src/pages/ro/resources/benefits.mdx b/website/src/pages/ro/resources/benefits.mdx index 6e698c54af73..ebc1b62b67a3 100644 --- a/website/src/pages/ro/resources/benefits.mdx +++ b/website/src/pages/ro/resources/benefits.mdx @@ -27,47 +27,47 @@ Query costs may vary; the quoted cost is the average at time of publication (Mar ## Low Volume User (less than 100,000 queries per month) -| Cost Comparison | Self Hosted | Rețeaua The Graph | -| :-: | :-: | :-: | -| Monthly server cost\* | $350 per month | $0 | -| Query costs | $0+ | $0 per month | -| Engineering time | $400 per month | None, built into the network with globally distributed Indexers | -| Queries per month | Limited to infra capabilities | 100,000 (Free Plan) | -| Cost per query | $0 | $0 | -| Infrastructure | Centralized | Decentralized | -| Geographic redundancy | $750+ per additional node | Included | -| Uptime | Varies | 99.9%+ | -| Total Monthly Costs | $750+ | $0 | +| Cost Comparison | Self Hosted | Rețeaua The Graph | +| :--------------------------: | :-------------------------------------: | :-------------------------------------------------------------: | +| Monthly server cost\* | $350 per month | $0 | +| Query costs | $0+ | $0 per month | +| Engineering time | $400 per month | None, built into the network with globally distributed Indexers | +| Queries per month | Limited to infra capabilities | 100,000 (Free Plan) | +| Cost per query | $0 | $0 | +| Infrastructure | Centralized | Decentralized | +| Geographic redundancy | $750+ per additional node | Included | +| Uptime | Varies | 99.9%+ | +| Total Monthly Costs | $750+ | $0 | ## Medium Volume User (~3M queries per month) -| Cost Comparison | Self Hosted | Rețeaua The Graph | -| :-: | :-: | :-: | -| Monthly server cost\* | $350 per month | $0 | -| Query costs | $500 per month | $120 per month | -| Engineering time | $800 per month | None, built into the network with globally distributed Indexers | -| Queries per month | Limited to infra capabilities | ~3,000,000 | -| Cost per query | $0 | $0.00004 | -| Infrastructure | Centralized | Decentralized | -| Engineering expense | $200 per hour | Included | -| Geographic redundancy | $1,200 in total costs per additional node | Included | -| Uptime | Varies | 99.9%+ | -| Total Monthly Costs | $1,650+ | $120 | +| Cost Comparison | Self Hosted | Rețeaua The Graph | +| :--------------------------: | :----------------------------------------: | :-------------------------------------------------------------: | +| Monthly server cost\* | $350 per month | $0 | +| Query costs | $500 per month | $120 per month | +| Engineering time | $800 per month | None, built into the network with globally distributed Indexers | +| Queries per month | Limited to infra capabilities | ~3,000,000 | +| Cost per query | $0 | $0.00004 | +| Infrastructure | Centralized | Decentralized | +| Engineering expense | $200 per hour | Included | +| Geographic redundancy | $1,200 in total costs per additional node | Included | +| Uptime | Varies | 99.9%+ | +| Total Monthly Costs | $1,650+ | $120 | ## High Volume User (~30M queries per month) -| Cost Comparison | Self Hosted | Rețeaua The Graph | -| :-: | :-: | :-: | -| Monthly server cost\* | $1100 per month, per node | $0 | -| Query costs | $4000 | $1,200 per month | -| Number of nodes needed | 10 | Not applicable | -| Engineering time | $6,000 or more per month | None, built into the network with globally distributed Indexers | -| Queries per month | Limited to infra capabilities | ~30,000,000 | -| Cost per query | $0 | $0.00004 | -| Infrastructure | Centralized | Decentralized | -| Geographic redundancy | $1,200 in total costs per additional node | Included | -| Uptime | Varies | 99.9%+ | -| Total Monthly Costs | $11,000+ | $1,200 | +| Cost Comparison | Self Hosted | Rețeaua The Graph | +| :--------------------------: | :-----------------------------------------: | :-------------------------------------------------------------: | +| Monthly server cost\* | $1100 per month, per node | $0 | +| Query costs | $4000 | $1,200 per month | +| Number of nodes needed | 10 | Not applicable | +| Engineering time | $6,000 or more per month | None, built into the network with globally distributed Indexers | +| Queries per month | Limited to infra capabilities | ~30,000,000 | +| Cost per query | $0 | $0.00004 | +| Infrastructure | Centralized | Decentralized | +| Geographic redundancy | $1,200 in total costs per additional node | Included | +| Uptime | Varies | 99.9%+ | +| Total Monthly Costs | $11,000+ | $1,200 | \*including costs for backup: $50-$100 per month @@ -75,9 +75,9 @@ Query costs may vary; the quoted cost is the average at time of publication (Mar Reflects cost for data consumer. Query fees are still paid to Indexers for Free Plan queries. -Estimated costs are only for Ethereum Mainnet subgraphs — costs are even higher when self hosting a `graph-node` on other networks. Some users may need to update their subgraph to a new version. Due to Ethereum gas fees, an update costs ~$50 at time of writing. Note that gas fees on [Arbitrum](/archived/arbitrum/arbitrum-faq/) are substantially lower than Ethereum mainnet. +Estimated costs are only for Ethereum Mainnet Subgraphs — costs are even higher when self hosting a `graph-node` on other networks. Some users may need to update their Subgraph to a new version. Due to Ethereum gas fees, an update costs ~$50 at time of writing. Note that gas fees on [Arbitrum](/archived/arbitrum/arbitrum-faq/) are substantially lower than Ethereum mainnet. -Curating signal on a subgraph is an optional one-time, net-zero cost (e.g., $1k in signal can be curated on a subgraph, and later withdrawn—with potential to earn returns in the process). +Curating signal on a Subgraph is an optional one-time, net-zero cost (e.g., $1k in signal can be curated on a Subgraph, and later withdrawn—with potential to earn returns in the process). ## No Setup Costs & Greater Operational Efficiency @@ -89,4 +89,4 @@ The Graph’s decentralized network gives users access to geographic redundancy Bottom line: The Graph Network is less expensive, easier to use, and produces superior results compared to running a `graph-node` locally. -Start using The Graph Network today, and learn how to [publish your subgraph to The Graph's decentralized network](/subgraphs/quick-start/). +Start using The Graph Network today, and learn how to [publish your Subgraph to The Graph's decentralized network](/subgraphs/quick-start/). From d61f239b1bfd3e7b4cab2ba3dce062d0f838b1f4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:16:21 -0500 Subject: [PATCH 0451/1789] New translations benefits.mdx (French) --- website/src/pages/fr/resources/benefits.mdx | 79 ++++++++++----------- 1 file changed, 39 insertions(+), 40 deletions(-) diff --git a/website/src/pages/fr/resources/benefits.mdx b/website/src/pages/fr/resources/benefits.mdx index b39ea9fa5ca4..77407f040adb 100644 --- a/website/src/pages/fr/resources/benefits.mdx +++ b/website/src/pages/fr/resources/benefits.mdx @@ -27,58 +27,57 @@ Les coûts d'interrogation peuvent varier ; le coût indiqué est la moyenne au ## Utilisateur à faible volume (moins de 100 000 requêtes par mois) -| Cost Comparison | Auto-hébergé | The Graph Network | -| :-: | :-: | :-: | -| Coût mensuel du serveur\* | 350 $ au mois | 0 $ | -| Frais de requête | - 0 $ | 0$ par mois | -| Temps d'ingénierie | 400 $ au mois | Aucun, intégré au réseau avec des indexeurs distribués à l'échelle mondiale | -| Requêtes au mois | Limité aux capacités infra | 100 000 (Plan Gratuit) | -| Tarif par requête | 0 $ | 0$ | -| Infrastructure | Centralisée | Décentralisée | -| La redondance géographique | 750$+ par nœud complémentaire | Compris | -| Temps de disponibilité | Variable | - 99.9% | -| Total des coûts mensuels | 750 $+ | 0 $ | +| Cost Comparison | Auto-hébergé | The Graph Network | +| :----------------------------: | :--------------------------------------: | :-------------------------------------------------------------------------: | +| Coût mensuel du serveur\* | 350 $ au mois | 0 $ | +| Frais de requête | - 0 $ | 0$ par mois | +| Temps d'ingénierie | 400 $ au mois | Aucun, intégré au réseau avec des indexeurs distribués à l'échelle mondiale | +| Requêtes au mois | Limité aux capacités infra | 100 000 (Plan Gratuit) | +| Tarif par requête | 0 $ | 0$ | +| Infrastructure | Centralisée | Décentralisée | +| La redondance géographique | 750$+ par nœud complémentaire | Compris | +| Temps de disponibilité | Variable | - 99.9% | +| Total des coûts mensuels | 750 $+ | 0 $ | ## Utilisateur à volume moyen (~3M requêtes par mois) -| Cost Comparison | Auto-hébergé | The Graph Network | -| :-: | :-: | :-: | -| Coût mensuel du serveur\* | 350 $ au mois | 0 $ | -| Frais de requête | 500 $ au mois | 120$ par mois | -| Temps d'ingénierie | 800 $ au mois | Aucun, intégré au réseau avec des indexeurs distribués à l'échelle mondiale | -| Requêtes au mois | Limité aux capacités infra | ~3,000,000 | -| Tarif par requête | 0 $ | $0.00004 | -| Infrastructure | Centralisée | Décentralisée | -| Frais d'ingénierie | 200 $ au mois | Compris | -| La redondance géographique | 1 200 $ coût total par nœud supplémentaire | Compris | -| Temps de disponibilité | Variable | - 99.9% | -| Total des coûts mensuels | 1 650 $+ | 120$ | +| Cost Comparison | Auto-hébergé | The Graph Network | +| :----------------------------: | :-----------------------------------------: | :-------------------------------------------------------------------------: | +| Coût mensuel du serveur\* | 350 $ au mois | 0 $ | +| Frais de requête | 500 $ au mois | 120$ par mois | +| Temps d'ingénierie | 800 $ au mois | Aucun, intégré au réseau avec des indexeurs distribués à l'échelle mondiale | +| Requêtes au mois | Limité aux capacités infra | ~3,000,000 | +| Tarif par requête | 0 $ | $0.00004 | +| Infrastructure | Centralisée | Décentralisée | +| Frais d'ingénierie | 200 $ au mois | Compris | +| La redondance géographique | 1 200 $ coût total par nœud supplémentaire | Compris | +| Temps de disponibilité | Variable | - 99.9% | +| Total des coûts mensuels | 1 650 $+ | 120$ | ## Utilisateur à volume élevé (~30M requêtes par mois) -| Cost Comparison | Auto-hébergé | The Graph Network | -| :-: | :-: | :-: | -| Coût mensuel du serveur\* | 1100 $ au mois, par nœud | 0 $ | -| Frais de requête | 4000 $ | 1 200 $ par mois | -| Nombre de nœuds obligatoires | 10 | Sans objet | -| Temps d'ingénierie | 6000 $ ou plus au mois | Aucun, intégré au réseau avec des indexeurs distribués à l'échelle mondiale | -| Requêtes au mois | Limité aux capacités infra | ~30,000,000 | -| Tarif par requête | 0 $ | $0.00004 | -| Infrastructure | Centralisée | Décentralisée | -| La redondance géographique | 1 200 $ coût total par nœud supplémentaire | Compris | -| Temps de disponibilité | Variable | - 99.9% | -| Total des coûts mensuels | 11 000 $+ | 1,200$ | +| Cost Comparison | Auto-hébergé | The Graph Network | +| :----------------------------: | :------------------------------------------: | :-------------------------------------------------------------------------: | +| Coût mensuel du serveur\* | 1100 $ au mois, par nœud | 0 $ | +| Frais de requête | 4000 $ | 1 200 $ par mois | +| Nombre de nœuds obligatoires | 10 | Sans objet | +| Temps d'ingénierie | 6000 $ ou plus au mois | Aucun, intégré au réseau avec des indexeurs distribués à l'échelle mondiale | +| Requêtes au mois | Limité aux capacités infra | ~30,000,000 | +| Tarif par requête | 0 $ | $0.00004 | +| Infrastructure | Centralisée | Décentralisée | +| La redondance géographique | 1 200 $ coût total par nœud supplémentaire | Compris | +| Temps de disponibilité | Variable | - 99.9% | +| Total des coûts mensuels | 11 000 $+ | 1,200$ | \*y compris les coûts de sauvegarde : $50-$ à 100 dollars au mois Temps d'ingénierie basé sur une hypothèse de 200 $ de l'heure -Reflète le coût pour le consommateur de données. Les frais de requête sont toujours payés aux Indexeurs pour -les requêtes du Plan Gratuit. +Reflète le coût pour le consommateur de données. Les frais de requête sont toujours payés aux Indexeurs pour les requêtes du Plan Gratuit. -Les coûts estimés concernent uniquement les subgraphs sur le Mainnet d'Ethereum — les coûts sont encore plus élevés lorsqu’un `graph-node` est auto-hébergé sur d’autres réseaux. Certains utilisateurs peuvent avoir besoin de mettre à jour leur subgraph vers une nouvelle version. En raison des frais de gas sur Ethereum, une mise à jour coûte environ 50 $ au moment de la rédaction. Notez que les frais de gas sur [Arbitrum](/archived/arbitrum/arbitrum-faq/) sont nettement inférieurs à ceux du Mainnet d'Ethereum. +Estimated costs are only for Ethereum Mainnet Subgraphs — costs are even higher when self hosting a `graph-node` on other networks. Some users may need to update their Subgraph to a new version. Due to Ethereum gas fees, an update costs ~$50 at time of writing. Note that gas fees on [Arbitrum](/archived/arbitrum/arbitrum-faq/) are substantially lower than Ethereum mainnet. -Émettre un signal sur un subgraph est un cout net, nul optionnel et unique (par exemple, 1 000 $ de signal peuvent être conservés sur un subgraph, puis retirés - avec la possibilité de gagner des revenus au cours du processus). +Curating signal on a Subgraph is an optional one-time, net-zero cost (e.g., $1k in signal can be curated on a Subgraph, and later withdrawn—with potential to earn returns in the process). ## Pas de Coûts d’Installation & Plus grande Efficacité Opérationnelle @@ -90,4 +89,4 @@ Le réseau décentralisé de The Graph offre aux utilisateurs une redondance gé En résumé : The Graph Network est moins cher, plus facile à utiliser et produit des résultats supérieurs à ceux obtenus par l'exécution locale d'un `graph-node`. -Commencez à utiliser The Graph Network dès aujourd’hui et découvrez comment [publier votre subgraph sur le réseau décentralisé de The Graph](/subgraphs/quick-start/). +Start using The Graph Network today, and learn how to [publish your Subgraph to The Graph's decentralized network](/subgraphs/quick-start/). From 0931515fc039d6d52379bab877f1e83da4ba3fbf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:16:23 -0500 Subject: [PATCH 0452/1789] New translations benefits.mdx (Spanish) --- website/src/pages/es/resources/benefits.mdx | 76 ++++++++++----------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/website/src/pages/es/resources/benefits.mdx b/website/src/pages/es/resources/benefits.mdx index e50969112dde..509c70f8a198 100644 --- a/website/src/pages/es/resources/benefits.mdx +++ b/website/src/pages/es/resources/benefits.mdx @@ -27,47 +27,47 @@ Query costs may vary; the quoted cost is the average at time of publication (Mar ## Low Volume User (less than 100,000 queries per month) -| Comparación de costos | Self Hosted | The Graph Network | -| :-: | :-: | :-: | -| Costo mensual del servidor\* | $350 por mes | $0 | -| Costos de consulta | $0+ | $0 per month | -| Tiempo de ingeniería | $400 por mes | Ninguno, integrado en la red con Indexadores distribuidos globalmente | -| Consultas por mes | Limitado a capacidades de infraestructura | 100,000 (Free Plan) | -| Costo por consulta | $0 | $0 | -| Infrastructure | Centralizado | Descentralizado | -| Redundancia geográfica | $750+ por nodo adicional | Incluido | -| Tiempo de actividad | Varía | 99.9%+ | -| Costos mensuales totales | $750+ | $0 | +| Comparación de costos | Self Hosted | The Graph Network | +| :------------------------------: | :---------------------------------------: | :-------------------------------------------------------------------: | +| Costo mensual del servidor\* | $350 por mes | $0 | +| Costos de consulta | $0+ | $0 per month | +| Tiempo de ingeniería | $400 por mes | Ninguno, integrado en la red con Indexadores distribuidos globalmente | +| Consultas por mes | Limitado a capacidades de infraestructura | 100,000 (Free Plan) | +| Costo por consulta | $0 | $0 | +| Infrastructure | Centralizado | Descentralizado | +| Redundancia geográfica | $750+ por nodo adicional | Incluido | +| Tiempo de actividad | Varía | 99.9%+ | +| Costos mensuales totales | $750+ | $0 | ## Medium Volume User (~3M queries per month) -| Comparación de costos | Self Hosted | The Graph Network | -| :-: | :-: | :-: | -| Costo mensual del servidor\* | $350 por mes | $0 | -| Costos de consulta | $500 por mes | $120 per month | -| Tiempo de ingeniería | $800 por mes | Ninguno, integrado en la red con Indexadores distribuidos globalmente | -| Consultas por mes | Limitado a capacidades de infraestructura | ~3,000,000 | -| Costo por consulta | $0 | $0.00004 | -| Infrastructure | Centralizado | Descentralizado | -| Gastos de ingeniería | $200 por hora | Incluido | -| Redundancia geográfica | $1,200 en costos totales por nodo adicional | Incluido | -| Tiempo de actividad | Varía | 99.9%+ | -| Costos mensuales totales | $1,650+ | $120 | +| Comparación de costos | Self Hosted | The Graph Network | +| :------------------------------: | :-----------------------------------------: | :-------------------------------------------------------------------: | +| Costo mensual del servidor\* | $350 por mes | $0 | +| Costos de consulta | $500 por mes | $120 per month | +| Tiempo de ingeniería | $800 por mes | Ninguno, integrado en la red con Indexadores distribuidos globalmente | +| Consultas por mes | Limitado a capacidades de infraestructura | ~3,000,000 | +| Costo por consulta | $0 | $0.00004 | +| Infrastructure | Centralizado | Descentralizado | +| Gastos de ingeniería | $200 por hora | Incluido | +| Redundancia geográfica | $1,200 en costos totales por nodo adicional | Incluido | +| Tiempo de actividad | Varía | 99.9%+ | +| Costos mensuales totales | $1,650+ | $120 | ## High Volume User (~30M queries per month) -| Comparación de costos | Self Hosted | The Graph Network | -| :-: | :-: | :-: | -| Costo mensual del servidor\* | $1100 por mes, por nodo | $0 | -| Costos de consulta | $4000 | $1,200 per month | -| Número de nodos necesarios | 10 | No aplica | -| Tiempo de ingeniería | $6,000 o más por mes | Ninguno, integrado en la red con Indexadores distribuidos globalmente | -| Consultas por mes | Limitado a capacidades de infraestructura | ~30,000,000 | -| Costo por consulta | $0 | $0.00004 | -| Infrastructure | Centralizado | Descentralizado | -| Redundancia geográfica | $1,200 en costos totales por nodo adicional | Incluido | -| Tiempo de actividad | Varía | 99.9%+ | -| Costos mensuales totales | $11,000+ | $1,200 | +| Comparación de costos | Self Hosted | The Graph Network | +| :------------------------------: | :-----------------------------------------: | :-------------------------------------------------------------------: | +| Costo mensual del servidor\* | $1100 por mes, por nodo | $0 | +| Costos de consulta | $4000 | $1,200 per month | +| Número de nodos necesarios | 10 | No aplica | +| Tiempo de ingeniería | $6,000 o más por mes | Ninguno, integrado en la red con Indexadores distribuidos globalmente | +| Consultas por mes | Limitado a capacidades de infraestructura | ~30,000,000 | +| Costo por consulta | $0 | $0.00004 | +| Infrastructure | Centralizado | Descentralizado | +| Redundancia geográfica | $1,200 en costos totales por nodo adicional | Incluido | +| Tiempo de actividad | Varía | 99.9%+ | +| Costos mensuales totales | $11,000+ | $1,200 | \*incluidos los costos de copia de seguridad: $50-$100 por mes @@ -75,9 +75,9 @@ Query costs may vary; the quoted cost is the average at time of publication (Mar Reflects cost for data consumer. Query fees are still paid to Indexers for Free Plan queries. -Estimated costs are only for Ethereum Mainnet subgraphs — costs are even higher when self hosting a `graph-node` on other networks. Some users may need to update their subgraph to a new version. Due to Ethereum gas fees, an update costs ~$50 at time of writing. Note that gas fees on [Arbitrum](/archived/arbitrum/arbitrum-faq/) are substantially lower than Ethereum mainnet. +Estimated costs are only for Ethereum Mainnet Subgraphs — costs are even higher when self hosting a `graph-node` on other networks. Some users may need to update their Subgraph to a new version. Due to Ethereum gas fees, an update costs ~$50 at time of writing. Note that gas fees on [Arbitrum](/archived/arbitrum/arbitrum-faq/) are substantially lower than Ethereum mainnet. -La señal de curación en un subgrafo es una acción opcional de única vez y no tiene costo neto (por ejemplo, se pueden curar $1k en señales en un subgrafo y luego retirarlas, con el potencial de obtener retornos en el proceso). +Curating signal on a Subgraph is an optional one-time, net-zero cost (e.g., $1k in signal can be curated on a Subgraph, and later withdrawn—with potential to earn returns in the process). ## No Setup Costs & Greater Operational Efficiency @@ -89,4 +89,4 @@ The Graph’s decentralized network gives users access to geographic redundancy Bottom line: The Graph Network is less expensive, easier to use, and produces superior results compared to running a `graph-node` locally. -Start using The Graph Network today, and learn how to [publish your subgraph to The Graph's decentralized network](/subgraphs/quick-start/). +Start using The Graph Network today, and learn how to [publish your Subgraph to The Graph's decentralized network](/subgraphs/quick-start/). From 013d0b01f971174b0122e577fc80e6b61551b09e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:16:24 -0500 Subject: [PATCH 0453/1789] New translations benefits.mdx (Arabic) --- website/src/pages/ar/resources/benefits.mdx | 76 ++++++++++----------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/website/src/pages/ar/resources/benefits.mdx b/website/src/pages/ar/resources/benefits.mdx index 2e1a0834591c..6899e348a912 100644 --- a/website/src/pages/ar/resources/benefits.mdx +++ b/website/src/pages/ar/resources/benefits.mdx @@ -27,47 +27,47 @@ Query costs may vary; the quoted cost is the average at time of publication (Mar ## Low Volume User (less than 100,000 queries per month) -| Cost Comparison | Self Hosted | The Graph Network | -| :-: | :-: | :-: | -| Monthly server cost\* | $350 per month | $0 | -| Query costs | $0+ | $0 per month | -| Engineering time | $400 per month | None, built into the network with globally distributed Indexers | -| Queries per month | Limited to infra capabilities | 100,000 (Free Plan) | -| Cost per query | $0 | $0 | -| Infrastructure | Centralized | Decentralized | -| Geographic redundancy | $750+ per additional node | Included | -| Uptime | Varies | 99.9%+ | -| Total Monthly Costs | $750+ | $0 | +| Cost Comparison | Self Hosted | The Graph Network | +| :--------------------------: | :-------------------------------------: | :-------------------------------------------------------------: | +| Monthly server cost\* | $350 per month | $0 | +| Query costs | $0+ | $0 per month | +| Engineering time | $400 per month | None, built into the network with globally distributed Indexers | +| Queries per month | Limited to infra capabilities | 100,000 (Free Plan) | +| Cost per query | $0 | $0 | +| Infrastructure | Centralized | Decentralized | +| Geographic redundancy | $750+ per additional node | Included | +| Uptime | Varies | 99.9%+ | +| Total Monthly Costs | $750+ | $0 | ## Medium Volume User (~3M queries per month) -| Cost Comparison | Self Hosted | The Graph Network | -| :-: | :-: | :-: | -| Monthly server cost\* | $350 per month | $0 | -| Query costs | $500 per month | $120 per month | -| Engineering time | $800 per month | None, built into the network with globally distributed Indexers | -| Queries per month | Limited to infra capabilities | ~3,000,000 | -| Cost per query | $0 | $0.00004 | -| Infrastructure | Centralized | Decentralized | -| Engineering expense | $200 per hour | Included | -| Geographic redundancy | $1,200 in total costs per additional node | Included | -| Uptime | Varies | 99.9%+ | -| Total Monthly Costs | $1,650+ | $120 | +| Cost Comparison | Self Hosted | The Graph Network | +| :--------------------------: | :----------------------------------------: | :-------------------------------------------------------------: | +| Monthly server cost\* | $350 per month | $0 | +| Query costs | $500 per month | $120 per month | +| Engineering time | $800 per month | None, built into the network with globally distributed Indexers | +| Queries per month | Limited to infra capabilities | ~3,000,000 | +| Cost per query | $0 | $0.00004 | +| Infrastructure | Centralized | Decentralized | +| Engineering expense | $200 per hour | Included | +| Geographic redundancy | $1,200 in total costs per additional node | Included | +| Uptime | Varies | 99.9%+ | +| Total Monthly Costs | $1,650+ | $120 | ## High Volume User (~30M queries per month) -| Cost Comparison | Self Hosted | The Graph Network | -| :-: | :-: | :-: | -| Monthly server cost\* | $1100 per month, per node | $0 | -| Query costs | $4000 | $1,200 per month | -| Number of nodes needed | 10 | Not applicable | -| Engineering time | $6,000 or more per month | None, built into the network with globally distributed Indexers | -| Queries per month | Limited to infra capabilities | ~30,000,000 | -| Cost per query | $0 | $0.00004 | -| Infrastructure | Centralized | Decentralized | -| Geographic redundancy | $1,200 in total costs per additional node | Included | -| Uptime | Varies | 99.9%+ | -| Total Monthly Costs | $11,000+ | $1,200 | +| Cost Comparison | Self Hosted | The Graph Network | +| :--------------------------: | :-----------------------------------------: | :-------------------------------------------------------------: | +| Monthly server cost\* | $1100 per month, per node | $0 | +| Query costs | $4000 | $1,200 per month | +| Number of nodes needed | 10 | Not applicable | +| Engineering time | $6,000 or more per month | None, built into the network with globally distributed Indexers | +| Queries per month | Limited to infra capabilities | ~30,000,000 | +| Cost per query | $0 | $0.00004 | +| Infrastructure | Centralized | Decentralized | +| Geographic redundancy | $1,200 in total costs per additional node | Included | +| Uptime | Varies | 99.9%+ | +| Total Monthly Costs | $11,000+ | $1,200 | \*including costs for backup: $50-$100 per month @@ -75,9 +75,9 @@ Query costs may vary; the quoted cost is the average at time of publication (Mar Reflects cost for data consumer. Query fees are still paid to Indexers for Free Plan queries. -Estimated costs are only for Ethereum Mainnet subgraphs — costs are even higher when self hosting a `graph-node` on other networks. Some users may need to update their subgraph to a new version. Due to Ethereum gas fees, an update costs ~$50 at time of writing. Note that gas fees on [Arbitrum](/archived/arbitrum/arbitrum-faq/) are substantially lower than Ethereum mainnet. +Estimated costs are only for Ethereum Mainnet Subgraphs — costs are even higher when self hosting a `graph-node` on other networks. Some users may need to update their Subgraph to a new version. Due to Ethereum gas fees, an update costs ~$50 at time of writing. Note that gas fees on [Arbitrum](/archived/arbitrum/arbitrum-faq/) are substantially lower than Ethereum mainnet. -Curating signal on a subgraph is an optional one-time, net-zero cost (e.g., $1k in signal can be curated on a subgraph, and later withdrawn—with potential to earn returns in the process). +Curating signal on a Subgraph is an optional one-time, net-zero cost (e.g., $1k in signal can be curated on a Subgraph, and later withdrawn—with potential to earn returns in the process). ## No Setup Costs & Greater Operational Efficiency @@ -89,4 +89,4 @@ The Graph’s decentralized network gives users access to geographic redundancy Bottom line: The Graph Network is less expensive, easier to use, and produces superior results compared to running a `graph-node` locally. -Start using The Graph Network today, and learn how to [publish your subgraph to The Graph's decentralized network](/subgraphs/quick-start/). +Start using The Graph Network today, and learn how to [publish your Subgraph to The Graph's decentralized network](/subgraphs/quick-start/). From 867e9fea3cbd4d01c81fcd077a23b801a87e3e89 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:16:25 -0500 Subject: [PATCH 0454/1789] New translations benefits.mdx (Czech) --- website/src/pages/cs/resources/benefits.mdx | 76 ++++++++++----------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/website/src/pages/cs/resources/benefits.mdx b/website/src/pages/cs/resources/benefits.mdx index e18158242265..c0c0031d3f7b 100644 --- a/website/src/pages/cs/resources/benefits.mdx +++ b/website/src/pages/cs/resources/benefits.mdx @@ -27,47 +27,47 @@ Query costs may vary; the quoted cost is the average at time of publication (Mar ## Low Volume User (less than 100,000 queries per month) -| Srovnání nákladů | Vlastní hostitel | The Graph Network | -| :-: | :-: | :-: | -| Měsíční náklady na server\* | $350 měsíčně | $0 | -| Náklady na dotazování | $0+ | $0 per month | -| Inženýrský čas | $400 měsíčně | Žádné, zabudované do sítě s globálně distribuovanými indexery | -| Dotazy za měsíc | Omezeno na infra schopnosti | 100,000 (Free Plan) | -| Náklady na jeden dotaz | $0 | $0 | -| Infrastructure | Centralizovaný | Decentralizované | -| Geografická redundancy | $750+ Usd za další uzel | Zahrnuto | -| Provozuschopnost | Různé | 99.9%+ | -| Celkové měsíční náklady | $750+ | $0 | +| Srovnání nákladů | Vlastní hostitel | The Graph Network | +| :-------------------------: | :-------------------------------------: | :-----------------------------------------------------------: | +| Měsíční náklady na server\* | $350 měsíčně | $0 | +| Náklady na dotazování | $0+ | $0 per month | +| Inženýrský čas | $400 měsíčně | Žádné, zabudované do sítě s globálně distribuovanými indexery | +| Dotazy za měsíc | Omezeno na infra schopnosti | 100,000 (Free Plan) | +| Náklady na jeden dotaz | $0 | $0 | +| Infrastructure | Centralizovaný | Decentralizované | +| Geografická redundancy | $750+ Usd za další uzel | Zahrnuto | +| Provozuschopnost | Různé | 99.9%+ | +| Celkové měsíční náklady | $750+ | $0 | ## Medium Volume User (~3M queries per month) -| Srovnání nákladů | Vlastní hostitel | The Graph Network | -| :-: | :-: | :-: | -| Měsíční náklady na server\* | $350 měsíčně | $0 | -| Náklady na dotazování | $500 měsíčně | $120 per month | -| Inženýrský čas | $800 měsíčně | Žádné, zabudované do sítě s globálně distribuovanými indexery | -| Dotazy za měsíc | Omezeno na infra schopnosti | ~3,000,000 | -| Náklady na jeden dotaz | $0 | $0.00004 | -| Infrastructure | Centralizovaný | Decentralizované | -| Výdaje inženýrskou | $200 za hodinu | Zahrnuto | -| Geografická redundancy | $1,200 celkových nákladů na další uzel | Zahrnuto | -| Provozuschopnost | Různé | 99.9%+ | -| Celkové měsíční náklady | $1,650+ | $120 | +| Srovnání nákladů | Vlastní hostitel | The Graph Network | +| :-------------------------: | :----------------------------------------: | :-----------------------------------------------------------: | +| Měsíční náklady na server\* | $350 měsíčně | $0 | +| Náklady na dotazování | $500 měsíčně | $120 per month | +| Inženýrský čas | $800 měsíčně | Žádné, zabudované do sítě s globálně distribuovanými indexery | +| Dotazy za měsíc | Omezeno na infra schopnosti | ~3,000,000 | +| Náklady na jeden dotaz | $0 | $0.00004 | +| Infrastructure | Centralizovaný | Decentralizované | +| Výdaje inženýrskou | $200 za hodinu | Zahrnuto | +| Geografická redundancy | $1,200 celkových nákladů na další uzel | Zahrnuto | +| Provozuschopnost | Různé | 99.9%+ | +| Celkové měsíční náklady | $1,650+ | $120 | ## High Volume User (~30M queries per month) -| Srovnání nákladů | Vlastní hostitel | The Graph Network | -| :-: | :-: | :-: | -| Měsíční náklady na server\* | $1100 měsíčně za uzel | $0 | -| Náklady na dotazování | $4000 | $1,200 per month | -| Počet potřebných uzlů | 10 | Nepoužije se | -| Inženýrský čas | 6$, 000 nebo více měsíčně | Žádné, zabudované do sítě s globálně distribuovanými indexery | -| Dotazy za měsíc | Omezeno na infra schopnosti | ~30,000,000 | -| Náklady na jeden dotaz | $0 | $0.00004 | -| Infrastructure | Centralizovaný | Decentralizované | -| Geografická redundancy | $1,200 celkových nákladů na další uzel | Zahrnuto | -| Provozuschopnost | Různé | 99.9%+ | -| Celkové měsíční náklady | $11,000+ | $1,200 | +| Srovnání nákladů | Vlastní hostitel | The Graph Network | +| :-------------------------: | :-----------------------------------------: | :-----------------------------------------------------------: | +| Měsíční náklady na server\* | $1100 měsíčně za uzel | $0 | +| Náklady na dotazování | $4000 | $1,200 per month | +| Počet potřebných uzlů | 10 | Nepoužije se | +| Inženýrský čas | 6$, 000 nebo více měsíčně | Žádné, zabudované do sítě s globálně distribuovanými indexery | +| Dotazy za měsíc | Omezeno na infra schopnosti | ~30,000,000 | +| Náklady na jeden dotaz | $0 | $0.00004 | +| Infrastructure | Centralizovaný | Decentralizované | +| Geografická redundancy | $1,200 celkových nákladů na další uzel | Zahrnuto | +| Provozuschopnost | Různé | 99.9%+ | +| Celkové měsíční náklady | $11,000+ | $1,200 | \*včetně nákladů na zálohování: $50-$100 měsíčně @@ -75,9 +75,9 @@ Query costs may vary; the quoted cost is the average at time of publication (Mar Reflects cost for data consumer. Query fees are still paid to Indexers for Free Plan queries. -Estimated costs are only for Ethereum Mainnet subgraphs — costs are even higher when self hosting a `graph-node` on other networks. Some users may need to update their subgraph to a new version. Due to Ethereum gas fees, an update costs ~$50 at time of writing. Note that gas fees on [Arbitrum](/archived/arbitrum/arbitrum-faq/) are substantially lower than Ethereum mainnet. +Estimated costs are only for Ethereum Mainnet Subgraphs — costs are even higher when self hosting a `graph-node` on other networks. Some users may need to update their Subgraph to a new version. Due to Ethereum gas fees, an update costs ~$50 at time of writing. Note that gas fees on [Arbitrum](/archived/arbitrum/arbitrum-faq/) are substantially lower than Ethereum mainnet. -Kurátorování signálu na podgrafu je volitelný jednorázový čistý nulový náklad (např. na podgrafu lze kurátorovat signál v hodnotě $1k a později jej stáhnout - s potenciálem získat v tomto procesu výnosy). +Curating signal on a Subgraph is an optional one-time, net-zero cost (e.g., $1k in signal can be curated on a Subgraph, and later withdrawn—with potential to earn returns in the process). ## No Setup Costs & Greater Operational Efficiency @@ -89,4 +89,4 @@ The Graph’s decentralized network gives users access to geographic redundancy Bottom line: The Graph Network is less expensive, easier to use, and produces superior results compared to running a `graph-node` locally. -Start using The Graph Network today, and learn how to [publish your subgraph to The Graph's decentralized network](/subgraphs/quick-start/). +Start using The Graph Network today, and learn how to [publish your Subgraph to The Graph's decentralized network](/subgraphs/quick-start/). From 3145ed35679565751341bdc7191986be524514e8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:16:26 -0500 Subject: [PATCH 0455/1789] New translations benefits.mdx (German) --- website/src/pages/de/resources/benefits.mdx | 79 ++++++++++----------- 1 file changed, 39 insertions(+), 40 deletions(-) diff --git a/website/src/pages/de/resources/benefits.mdx b/website/src/pages/de/resources/benefits.mdx index 24c816c0784e..d20d0f5ae37a 100644 --- a/website/src/pages/de/resources/benefits.mdx +++ b/website/src/pages/de/resources/benefits.mdx @@ -27,58 +27,57 @@ Die Abfragekosten können variieren; die angegebenen Kosten sind der Durchschnit ## Benutzer mit geringem Volumen (weniger als 100.000 Abfragen pro Monat) -| Kostenvergleich | Selbst gehostet | The Graph Network | -| :-: | :-: | :-: | -| Monatliche Serverkosten\* | $350 pro Monat | $0 | -| Abfragekosten | $0+ | $0 pro Monat | -| Entwicklungszeit | $400 pro Monat | Keine, eingebaut in das Netzwerk mit global verteilten Indexern | -| Abfragen pro Monat | Begrenzt auf infrastrukturelle Funktionen | 100.000 (kostenloser Plan) | -| Kosten pro Abfrage | $0 | $0 | -| Infrastructure | Zentralisiert | Dezentralisiert | -| Geografische Redundanz | $750+ pro zusätzlichem Knoten | Eingeschlossen | -| Betriebszeit | Variiert | 99.9%+ | -| Monatliche Gesamtkosten | $750+ | $0 | +| Kostenvergleich | Selbst gehostet | The Graph Network | +| :--------------------------: | :---------------------------------------: | :-------------------------------------------------------------: | +| Monatliche Serverkosten\* | $350 pro Monat | $0 | +| Abfragekosten | $0+ | $0 pro Monat | +| Entwicklungszeit | $400 pro Monat | Keine, eingebaut in das Netzwerk mit global verteilten Indexern | +| Abfragen pro Monat | Begrenzt auf infrastrukturelle Funktionen | 100.000 (kostenloser Plan) | +| Kosten pro Abfrage | $0 | $0 | +| Infrastructure | Zentralisiert | Dezentralisiert | +| Geografische Redundanz | $750+ pro zusätzlichem Knoten | Eingeschlossen | +| Betriebszeit | Variiert | 99.9%+ | +| Monatliche Gesamtkosten | $750+ | $0 | ## Benutzer mit mittlerem Volumen (~3M Abfragen pro Monat) -| Kostenvergleich | Selbst gehostet | The Graph Network | -| :-: | :-: | :-: | -| Monatliche Serverkosten\* | $350 pro Monat | $0 | -| Abfragekosten | $500 pro Monat | $120 pro Monat | -| Entwicklungszeit | $800 pro Monat | Keine, eingebaut in das Netzwerk mit global verteilten Indexern | -| Abfragen pro Monat | Begrenzt auf infrastrukturelle Funktionen | ~3,000,000 | -| Kosten pro Abfrage | $0 | $0.00004 | -| Infrastructure | Zentralisiert | Dezentralisiert | -| Engineering-Kosten | $200 pro Stunde | Eingeschlossen | -| Geografische Redundanz | $1,200 Gesamtkosten pro zusätzlichem Knoten | Eingeschlossen | -| Betriebszeit | Variiert | 99.9%+ | -| Monatliche Gesamtkosten | $1.650+ | $120 | +| Kostenvergleich | Selbst gehostet | The Graph Network | +| :--------------------------: | :-----------------------------------------: | :-------------------------------------------------------------: | +| Monatliche Serverkosten\* | $350 pro Monat | $0 | +| Abfragekosten | $500 pro Monat | $120 pro Monat | +| Entwicklungszeit | $800 pro Monat | Keine, eingebaut in das Netzwerk mit global verteilten Indexern | +| Abfragen pro Monat | Begrenzt auf infrastrukturelle Funktionen | ~3,000,000 | +| Kosten pro Abfrage | $0 | $0.00004 | +| Infrastructure | Zentralisiert | Dezentralisiert | +| Engineering-Kosten | $200 pro Stunde | Eingeschlossen | +| Geografische Redundanz | $1,200 Gesamtkosten pro zusätzlichem Knoten | Eingeschlossen | +| Betriebszeit | Variiert | 99.9%+ | +| Monatliche Gesamtkosten | $1.650+ | $120 | ## Benutzer mit hohem Volumen (~30M Abfragen pro Monat) -| Kostenvergleich | Selbst gehostet | The Graph Network | -| :-: | :-: | :-: | -| Monatliche Serverkosten\* | $1100 pro Monat, pro Knoten | $0 | -| Abfragekosten | $4000 | $1,200 pro Monat | -| Anzahl der benötigten Knoten | 10 | Nicht anwendbar | -| Entwicklungszeit | $6,000 oder mehr pro Monat | Keine, eingebaut in das Netzwerk mit global verteilten Indexern | -| Abfragen pro Monat | Begrenzt auf infrastrukturelle Funktionen | ~30,000,000 | -| Kosten pro Abfrage | $0 | $0.00004 | -| Infrastructure | Zentralisiert | Dezentralisiert | -| Geografische Redundanz | $1,200 Gesamtkosten pro zusätzlichem Knoten | Eingeschlossen | -| Betriebszeit | Variiert | 99.9%+ | -| Monatliche Gesamtkosten | $11,000+ | $1,200 | +| Kostenvergleich | Selbst gehostet | The Graph Network | +| :--------------------------: | :-----------------------------------------: | :-------------------------------------------------------------: | +| Monatliche Serverkosten\* | $1100 pro Monat, pro Knoten | $0 | +| Abfragekosten | $4000 | $1,200 pro Monat | +| Anzahl der benötigten Knoten | 10 | Nicht anwendbar | +| Entwicklungszeit | $6,000 oder mehr pro Monat | Keine, eingebaut in das Netzwerk mit global verteilten Indexern | +| Abfragen pro Monat | Begrenzt auf infrastrukturelle Funktionen | ~30,000,000 | +| Kosten pro Abfrage | $0 | $0.00004 | +| Infrastructure | Zentralisiert | Dezentralisiert | +| Geografische Redundanz | $1,200 Gesamtkosten pro zusätzlichem Knoten | Eingeschlossen | +| Betriebszeit | Variiert | 99.9%+ | +| Monatliche Gesamtkosten | $11,000+ | $1,200 | \*einschließlich der Kosten für die Datensicherung: $50-$100 pro Monat Engineering-Zeit auf der Grundlage von 200 $ pro Stunde angenommen -Reflektiert die Kosten für den Datenkonsumenten. Für Abfragen im Rahmen des „Free Plan“ werden nach wie vor -Abfragegebühren an Indexer gezahlt. +Reflektiert die Kosten für den Datenkonsumenten. Für Abfragen im Rahmen des „Free Plan“ werden nach wie vor Abfragegebühren an Indexer gezahlt. -Die geschätzten Kosten gelten nur für Ethereum Mainnet Subgraphen - die Kosten sind noch höher, wenn man selbst einen `graph-node` in anderen Netzwerken hostet. Einige Nutzer müssen ihren Subgraphen möglicherweise auf eine neue Version aktualisieren. Aufgrund der Ethereum-Gas-Gebühren kostet ein Update zum Zeitpunkt des Schreibens ~$50. Beachten Sie, dass die Gasgebühren auf [Arbitrum](/archived/arbitrum/arbitrum-faq/) wesentlich niedriger sind als im Ethereum Mainnet. +Estimated costs are only for Ethereum Mainnet Subgraphs — costs are even higher when self hosting a `graph-node` on other networks. Some users may need to update their Subgraph to a new version. Due to Ethereum gas fees, an update costs ~$50 at time of writing. Note that gas fees on [Arbitrum](/archived/arbitrum/arbitrum-faq/) are substantially lower than Ethereum mainnet. -Das Kuratieren von Signalen auf einem Subgraphen ist eine optionale, einmalige Investition mit Netto-Nullkosten (z.B. können Signale im Wert von 1.000 Dollar auf einem Subgraphen kuratiert und später wieder abgezogen werden - mit dem Potenzial, dabei Renditen zu erzielen). +Curating signal on a Subgraph is an optional one-time, net-zero cost (e.g., $1k in signal can be curated on a Subgraph, and later withdrawn—with potential to earn returns in the process). ## Keine Einrichtungskosten und größere betriebliche Effizienz @@ -90,4 +89,4 @@ Das dezentralisierte Netzwerk von The Graph bietet den Nutzern Zugang zu einer g Unterm Strich: Das The Graph Network ist kostengünstiger, einfacher zu benutzen und liefert bessere Ergebnisse als ein lokaler `graph-node`. -Beginnen Sie noch heute mit der Nutzung von The Graph Network und erfahren Sie, wie Sie [Ihren Subgraphут im dezentralen Netzwerk von The Graph veröffentlichen](/subgraphs/quick-start/). +Start using The Graph Network today, and learn how to [publish your Subgraph to The Graph's decentralized network](/subgraphs/quick-start/). From b9e40934f75a718cccddd8c2e5b1074d0507e0ef Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:16:27 -0500 Subject: [PATCH 0456/1789] New translations benefits.mdx (Italian) --- website/src/pages/it/resources/benefits.mdx | 76 ++++++++++----------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/website/src/pages/it/resources/benefits.mdx b/website/src/pages/it/resources/benefits.mdx index 01393da864a1..48c8f909359d 100644 --- a/website/src/pages/it/resources/benefits.mdx +++ b/website/src/pages/it/resources/benefits.mdx @@ -27,47 +27,47 @@ Query costs may vary; the quoted cost is the average at time of publication (Mar ## Low Volume User (less than 100,000 queries per month) -| Confronto costi | Self Hosted | The Graph Network | -| :-: | :-: | :-: | -| Costo mensile del server\* | $350 al mese | $0 | -| Costi di query | $0+ | $0 per month | -| Tempo di progettazione | $400 al mese | Nessuno, integrato nella rete con indicizzatori distribuiti a livello globale | -| Query al mese | Limitato alle capacità di infra | 100,000 (Free Plan) | -| Costo per query | $0 | $0 | -| Infrastructure | Centralizzato | Decentralizzato | -| Ridondanza geografica | $750+ per nodo aggiuntivo | Incluso | -| Tempo di attività | Variabile | 99.9%+ | -| Costo totale mensile | $750+ | $0 | +| Confronto costi | Self Hosted | The Graph Network | +| :--------------------------------: | :-------------------------------------: | :---------------------------------------------------------------------------: | +| Costo mensile del server\* | $350 al mese | $0 | +| Costi di query | $0+ | $0 per month | +| Tempo di progettazione | $400 al mese | Nessuno, integrato nella rete con indicizzatori distribuiti a livello globale | +| Query al mese | Limitato alle capacità di infra | 100,000 (Free Plan) | +| Costo per query | $0 | $0 | +| Infrastructure | Centralizzato | Decentralizzato | +| Ridondanza geografica | $750+ per nodo aggiuntivo | Incluso | +| Tempo di attività | Variabile | 99.9%+ | +| Costo totale mensile | $750+ | $0 | ## Medium Volume User (~3M queries per month) -| Confronto costi | Self Hosted | The Graph Network | -| :-: | :-: | :-: | -| Costo mensile del server\* | $350 al mese | $0 | -| Costi di query | $500 al mese | $120 per month | -| Tempo di progettazione | $800 al mese | Nessuno, integrato nella rete con indicizzatori distribuiti a livello globale | -| Query al mese | Limitato alle capacità di infra | ~3,000,000 | -| Costo per query | $0 | $0.00004 | -| Infrastructure | Centralizzato | Decentralizzato | -| Costi di ingegneria | $200 all'ora | Incluso | -| Ridondanza geografica | $1.200 di costi totali per nodo aggiuntivo | Incluso | -| Tempo di attività | Variabile | 99.9%+ | -| Costo totale mensile | $1,650+ | $120 | +| Confronto costi | Self Hosted | The Graph Network | +| :--------------------------------: | :----------------------------------------: | :---------------------------------------------------------------------------: | +| Costo mensile del server\* | $350 al mese | $0 | +| Costi di query | $500 al mese | $120 per month | +| Tempo di progettazione | $800 al mese | Nessuno, integrato nella rete con indicizzatori distribuiti a livello globale | +| Query al mese | Limitato alle capacità di infra | ~3,000,000 | +| Costo per query | $0 | $0.00004 | +| Infrastructure | Centralizzato | Decentralizzato | +| Costi di ingegneria | $200 all'ora | Incluso | +| Ridondanza geografica | $1.200 di costi totali per nodo aggiuntivo | Incluso | +| Tempo di attività | Variabile | 99.9%+ | +| Costo totale mensile | $1,650+ | $120 | ## High Volume User (~30M queries per month) -| Confronto costi | Self Hosted | The Graph Network | -| :-: | :-: | :-: | -| Costo mensile del server\* | $1100 al mese, per nodo | $0 | -| Costi di query | $4000 | $1,200 per month | -| Numero di nodi necessari | 10 | Non applicabile | -| Tempo di progettazione | $6.000 o più al mese | Nessuno, integrato nella rete con indicizzatori distribuiti a livello globale | -| Query al mese | Limitato alle capacità di infra | ~30,000,000 | -| Costo per query | $0 | $0.00004 | -| Infrastructure | Centralizzato | Decentralizzato | -| Ridondanza geografica | $1.200 di costi totali per nodo aggiuntivo | Incluso | -| Tempo di attività | Variabile | 99.9%+ | -| Costo totale mensile | $11,000+ | $1,200 | +| Confronto costi | Self Hosted | The Graph Network | +| :--------------------------------: | :-----------------------------------------: | :---------------------------------------------------------------------------: | +| Costo mensile del server\* | $1100 al mese, per nodo | $0 | +| Costi di query | $4000 | $1,200 per month | +| Numero di nodi necessari | 10 | Non applicabile | +| Tempo di progettazione | $6.000 o più al mese | Nessuno, integrato nella rete con indicizzatori distribuiti a livello globale | +| Query al mese | Limitato alle capacità di infra | ~30,000,000 | +| Costo per query | $0 | $0.00004 | +| Infrastructure | Centralizzato | Decentralizzato | +| Ridondanza geografica | $1.200 di costi totali per nodo aggiuntivo | Incluso | +| Tempo di attività | Variabile | 99.9%+ | +| Costo totale mensile | $11,000+ | $1,200 | \*inclusi i costi per il backup: $50-$100 al mese @@ -75,9 +75,9 @@ Query costs may vary; the quoted cost is the average at time of publication (Mar Reflects cost for data consumer. Query fees are still paid to Indexers for Free Plan queries. -Estimated costs are only for Ethereum Mainnet subgraphs — costs are even higher when self hosting a `graph-node` on other networks. Some users may need to update their subgraph to a new version. Due to Ethereum gas fees, an update costs ~$50 at time of writing. Note that gas fees on [Arbitrum](/archived/arbitrum/arbitrum-faq/) are substantially lower than Ethereum mainnet. +Estimated costs are only for Ethereum Mainnet Subgraphs — costs are even higher when self hosting a `graph-node` on other networks. Some users may need to update their Subgraph to a new version. Due to Ethereum gas fees, an update costs ~$50 at time of writing. Note that gas fees on [Arbitrum](/archived/arbitrum/arbitrum-faq/) are substantially lower than Ethereum mainnet. -La curation del segnale su un subgraph è opzionale, una tantum, a costo zero (ad esempio, $1.000 in segnale possono essere curati su un subgraph e successivamente ritirati, con un potenziale di guadagno nel processo). +Curating signal on a Subgraph is an optional one-time, net-zero cost (e.g., $1k in signal can be curated on a Subgraph, and later withdrawn—with potential to earn returns in the process). ## No Setup Costs & Greater Operational Efficiency @@ -89,4 +89,4 @@ The Graph’s decentralized network gives users access to geographic redundancy Bottom line: The Graph Network is less expensive, easier to use, and produces superior results compared to running a `graph-node` locally. -Start using The Graph Network today, and learn how to [publish your subgraph to The Graph's decentralized network](/subgraphs/quick-start/). +Start using The Graph Network today, and learn how to [publish your Subgraph to The Graph's decentralized network](/subgraphs/quick-start/). From 899a67d3c0fc29e1e55c918b29e18b490fae75db Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:16:28 -0500 Subject: [PATCH 0457/1789] New translations benefits.mdx (Japanese) --- website/src/pages/ja/resources/benefits.mdx | 76 ++++++++++----------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/website/src/pages/ja/resources/benefits.mdx b/website/src/pages/ja/resources/benefits.mdx index f3c7204743fb..8a86396805ea 100644 --- a/website/src/pages/ja/resources/benefits.mdx +++ b/website/src/pages/ja/resources/benefits.mdx @@ -27,47 +27,47 @@ Query costs may vary; the quoted cost is the average at time of publication (Mar ## Low Volume User (less than 100,000 queries per month) -| コスト比較 | セルフホスト | グラフネットワーク | -| :-: | :-: | :-: | -| 月額サーバー代 | $350/月 | $0 | -| クエリコスト | $0+ | $0 per month | -| エンジニアリングタイム | $400/月 | なし/ グローバルに分散されたインデクサーでネットワークに組み込まれる | -| 月ごとのクエリ | インフラ機能に限定 | 100,000 (Free Plan) | -| クエリごとのコスト | $0 | $0 | -| Infrastructure | 集中管理型 | 分散型 | -| 地理的な冗長性 | 追加1ノードにつき$750+ | 含まれる | -| アップタイム | バリエーション | 99.9%+ | -| 月額費用合計 | $750+ | $0 | +| コスト比較 | セルフホスト | グラフネットワーク | +| :---------------------: | :-------------------------------------: | :---------------------------------: | +| 月額サーバー代 | $350/月 | $0 | +| クエリコスト | $0+ | $0 per month | +| エンジニアリングタイム | $400/月 | なし/ グローバルに分散されたインデクサーでネットワークに組み込まれる | +| 月ごとのクエリ | インフラ機能に限定 | 100,000 (Free Plan) | +| クエリごとのコスト | $0 | $0 | +| Infrastructure | 集中管理型 | 分散型 | +| 地理的な冗長性 | 追加1ノードにつき$750+ | 含まれる | +| アップタイム | バリエーション | 99.9%+ | +| 月額費用合計 | $750+ | $0 | ## Medium Volume User (~3M queries per month) -| コスト比較 | セルフホスト | グラフネットワーク | -| :-: | :-: | :-: | -| 月額サーバー代 | $350/月 | $0 | -| クエリコスト | $500/月 | $120 per month | -| エンジニアリングタイム | $800/月 | なし/ グローバルに分散されたインデクサーでネットワークに組み込まれる | -| 月ごとのクエリ | インフラ機能に限定 | ~3,000,000 | -| クエリごとのコスト | $0 | $0.00004 | -| Infrastructure | 集中管理型 | 分散型 | -| エンジニアリングコスト | $200/時 | 含まれる | -| 地理的な冗長性 | ノード追加1台につき合計1,200ドル | 含まれる | -| アップタイム | バリエーション | 99.9%+ | -| 月額費用合計 | $1,650+ | $120 | +| コスト比較 | セルフホスト | グラフネットワーク | +| :---------------------: | :----------------------------------------: | :---------------------------------: | +| 月額サーバー代 | $350/月 | $0 | +| クエリコスト | $500/月 | $120 per month | +| エンジニアリングタイム | $800/月 | なし/ グローバルに分散されたインデクサーでネットワークに組み込まれる | +| 月ごとのクエリ | インフラ機能に限定 | ~3,000,000 | +| クエリごとのコスト | $0 | $0.00004 | +| Infrastructure | 集中管理型 | 分散型 | +| エンジニアリングコスト | $200/時 | 含まれる | +| 地理的な冗長性 | ノード追加1台につき合計1,200ドル | 含まれる | +| アップタイム | バリエーション | 99.9%+ | +| 月額費用合計 | $1,650+ | $120 | ## High Volume User (~30M queries per month) -| コスト比較 | セルフホスト | グラフネットワーク | -| :-: | :-: | :-: | -| 月額サーバー代 | $1100/月(ノードごと) | $0 | -| クエリコスト | $4000 | $1,200 per month | -| 必要ノード数 | 10 | 該当なし | -| エンジニアリングタイム | $6,000/月 | なし/ グローバルに分散されたインデクサーでネットワークに組み込まれる | -| 月ごとのクエリ | インフラ機能に限定 | ~30,000,000 | -| クエリごとのコスト | $0 | $0.00004 | -| Infrastructure | 集中管理型 | 分散型 | -| 地理的な冗長性 | ノード追加1台につき合計1,200ドル | 含まれる | -| アップタイム | バリエーション | 99.9%+ | -| 月額費用合計 | $11,000+ | $1,200 | +| コスト比較 | セルフホスト | グラフネットワーク | +| :---------------------: | :-----------------------------------------: | :---------------------------------: | +| 月額サーバー代 | $1100/月(ノードごと) | $0 | +| クエリコスト | $4000 | $1,200 per month | +| 必要ノード数 | 10 | 該当なし | +| エンジニアリングタイム | $6,000/月 | なし/ グローバルに分散されたインデクサーでネットワークに組み込まれる | +| 月ごとのクエリ | インフラ機能に限定 | ~30,000,000 | +| クエリごとのコスト | $0 | $0.00004 | +| Infrastructure | 集中管理型 | 分散型 | +| 地理的な冗長性 | ノード追加1台につき合計1,200ドル | 含まれる | +| アップタイム | バリエーション | 99.9%+ | +| 月額費用合計 | $11,000+ | $1,200 | \*バックアップ費用含む:月額$50〜$100 @@ -75,9 +75,9 @@ Query costs may vary; the quoted cost is the average at time of publication (Mar Reflects cost for data consumer. Query fees are still paid to Indexers for Free Plan queries. -Estimated costs are only for Ethereum Mainnet subgraphs — costs are even higher when self hosting a `graph-node` on other networks. Some users may need to update their subgraph to a new version. Due to Ethereum gas fees, an update costs ~$50 at time of writing. Note that gas fees on [Arbitrum](/archived/arbitrum/arbitrum-faq/) are substantially lower than Ethereum mainnet. +Estimated costs are only for Ethereum Mainnet Subgraphs — costs are even higher when self hosting a `graph-node` on other networks. Some users may need to update their Subgraph to a new version. Due to Ethereum gas fees, an update costs ~$50 at time of writing. Note that gas fees on [Arbitrum](/archived/arbitrum/arbitrum-faq/) are substantially lower than Ethereum mainnet. -サブグラフ上のシグナルのキュレーションは、オプションで1回限り、ネットゼロのコストで可能です(例えば、$1,000のシグナルをサブグラフ上でキュレーションし、後で引き出すことができ、その過程でリターンを得る可能性があります)。 +Curating signal on a Subgraph is an optional one-time, net-zero cost (e.g., $1k in signal can be curated on a Subgraph, and later withdrawn—with potential to earn returns in the process). ## No Setup Costs & Greater Operational Efficiency @@ -89,4 +89,4 @@ The Graph’s decentralized network gives users access to geographic redundancy Bottom line: The Graph Network is less expensive, easier to use, and produces superior results compared to running a `graph-node` locally. -Start using The Graph Network today, and learn how to [publish your subgraph to The Graph's decentralized network](/subgraphs/quick-start/). +Start using The Graph Network today, and learn how to [publish your Subgraph to The Graph's decentralized network](/subgraphs/quick-start/). From ab179513c448cf80c76bc62f6982540ba4269fd8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:16:29 -0500 Subject: [PATCH 0458/1789] New translations benefits.mdx (Korean) --- website/src/pages/ko/resources/benefits.mdx | 76 ++++++++++----------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/website/src/pages/ko/resources/benefits.mdx b/website/src/pages/ko/resources/benefits.mdx index 06b1b5594b1f..1c264a6a72b9 100644 --- a/website/src/pages/ko/resources/benefits.mdx +++ b/website/src/pages/ko/resources/benefits.mdx @@ -27,47 +27,47 @@ Query costs may vary; the quoted cost is the average at time of publication (Mar ## Low Volume User (less than 100,000 queries per month) -| Cost Comparison | Self Hosted | The Graph 네트워크 | -| :-: | :-: | :-: | -| Monthly server cost\* | $350 per month | $0 | -| Query costs | $0+ | $0 per month | -| Engineering time | $400 per month | None, built into the network with globally distributed Indexers | -| Queries per month | Limited to infra capabilities | 100,000 (Free Plan) | -| Cost per query | $0 | $0 | -| Infrastructure | Centralized | Decentralized | -| Geographic redundancy | $750+ per additional node | Included | -| Uptime | Varies | 99.9%+ | -| Total Monthly Costs | $750+ | $0 | +| Cost Comparison | Self Hosted | The Graph 네트워크 | +| :--------------------------: | :-------------------------------------: | :-------------------------------------------------------------: | +| Monthly server cost\* | $350 per month | $0 | +| Query costs | $0+ | $0 per month | +| Engineering time | $400 per month | None, built into the network with globally distributed Indexers | +| Queries per month | Limited to infra capabilities | 100,000 (Free Plan) | +| Cost per query | $0 | $0 | +| Infrastructure | Centralized | Decentralized | +| Geographic redundancy | $750+ per additional node | Included | +| Uptime | Varies | 99.9%+ | +| Total Monthly Costs | $750+ | $0 | ## Medium Volume User (~3M queries per month) -| Cost Comparison | Self Hosted | The Graph 네트워크 | -| :-: | :-: | :-: | -| Monthly server cost\* | $350 per month | $0 | -| Query costs | $500 per month | $120 per month | -| Engineering time | $800 per month | None, built into the network with globally distributed Indexers | -| Queries per month | Limited to infra capabilities | ~3,000,000 | -| Cost per query | $0 | $0.00004 | -| Infrastructure | Centralized | Decentralized | -| Engineering expense | $200 per hour | Included | -| Geographic redundancy | $1,200 in total costs per additional node | Included | -| Uptime | Varies | 99.9%+ | -| Total Monthly Costs | $1,650+ | $120 | +| Cost Comparison | Self Hosted | The Graph 네트워크 | +| :--------------------------: | :----------------------------------------: | :-------------------------------------------------------------: | +| Monthly server cost\* | $350 per month | $0 | +| Query costs | $500 per month | $120 per month | +| Engineering time | $800 per month | None, built into the network with globally distributed Indexers | +| Queries per month | Limited to infra capabilities | ~3,000,000 | +| Cost per query | $0 | $0.00004 | +| Infrastructure | Centralized | Decentralized | +| Engineering expense | $200 per hour | Included | +| Geographic redundancy | $1,200 in total costs per additional node | Included | +| Uptime | Varies | 99.9%+ | +| Total Monthly Costs | $1,650+ | $120 | ## High Volume User (~30M queries per month) -| Cost Comparison | Self Hosted | The Graph 네트워크 | -| :-: | :-: | :-: | -| Monthly server cost\* | $1100 per month, per node | $0 | -| Query costs | $4000 | $1,200 per month | -| Number of nodes needed | 10 | Not applicable | -| Engineering time | $6,000 or more per month | None, built into the network with globally distributed Indexers | -| Queries per month | Limited to infra capabilities | ~30,000,000 | -| Cost per query | $0 | $0.00004 | -| Infrastructure | Centralized | Decentralized | -| Geographic redundancy | $1,200 in total costs per additional node | Included | -| Uptime | Varies | 99.9%+ | -| Total Monthly Costs | $11,000+ | $1,200 | +| Cost Comparison | Self Hosted | The Graph 네트워크 | +| :--------------------------: | :-----------------------------------------: | :-------------------------------------------------------------: | +| Monthly server cost\* | $1100 per month, per node | $0 | +| Query costs | $4000 | $1,200 per month | +| Number of nodes needed | 10 | Not applicable | +| Engineering time | $6,000 or more per month | None, built into the network with globally distributed Indexers | +| Queries per month | Limited to infra capabilities | ~30,000,000 | +| Cost per query | $0 | $0.00004 | +| Infrastructure | Centralized | Decentralized | +| Geographic redundancy | $1,200 in total costs per additional node | Included | +| Uptime | Varies | 99.9%+ | +| Total Monthly Costs | $11,000+ | $1,200 | \*including costs for backup: $50-$100 per month @@ -75,9 +75,9 @@ Query costs may vary; the quoted cost is the average at time of publication (Mar Reflects cost for data consumer. Query fees are still paid to Indexers for Free Plan queries. -Estimated costs are only for Ethereum Mainnet subgraphs — costs are even higher when self hosting a `graph-node` on other networks. Some users may need to update their subgraph to a new version. Due to Ethereum gas fees, an update costs ~$50 at time of writing. Note that gas fees on [Arbitrum](/archived/arbitrum/arbitrum-faq/) are substantially lower than Ethereum mainnet. +Estimated costs are only for Ethereum Mainnet Subgraphs — costs are even higher when self hosting a `graph-node` on other networks. Some users may need to update their Subgraph to a new version. Due to Ethereum gas fees, an update costs ~$50 at time of writing. Note that gas fees on [Arbitrum](/archived/arbitrum/arbitrum-faq/) are substantially lower than Ethereum mainnet. -Curating signal on a subgraph is an optional one-time, net-zero cost (e.g., $1k in signal can be curated on a subgraph, and later withdrawn—with potential to earn returns in the process). +Curating signal on a Subgraph is an optional one-time, net-zero cost (e.g., $1k in signal can be curated on a Subgraph, and later withdrawn—with potential to earn returns in the process). ## No Setup Costs & Greater Operational Efficiency @@ -89,4 +89,4 @@ The Graph’s decentralized network gives users access to geographic redundancy Bottom line: The Graph Network is less expensive, easier to use, and produces superior results compared to running a `graph-node` locally. -Start using The Graph Network today, and learn how to [publish your subgraph to The Graph's decentralized network](/subgraphs/quick-start/). +Start using The Graph Network today, and learn how to [publish your Subgraph to The Graph's decentralized network](/subgraphs/quick-start/). From cced800507797afe2eb60da9a80524c6e026a239 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:16:30 -0500 Subject: [PATCH 0459/1789] New translations benefits.mdx (Dutch) --- website/src/pages/nl/resources/benefits.mdx | 76 ++++++++++----------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/website/src/pages/nl/resources/benefits.mdx b/website/src/pages/nl/resources/benefits.mdx index c02a029cb137..6a3068d67e2c 100644 --- a/website/src/pages/nl/resources/benefits.mdx +++ b/website/src/pages/nl/resources/benefits.mdx @@ -27,47 +27,47 @@ Query costs may vary; the quoted cost is the average at time of publication (Mar ## Low Volume User (less than 100,000 queries per month) -| Kostenvergelijking | Zelf hosten | De Graph Netwerk | -| :-: | :-: | :-: | -| Maandelijkse serverkosten | $350 per maand | $0 | -| Querykosten | $0+ | $0 per month | -| Onderhoud tijd | $400 per maand | Geen, deze kosten worden opgevangen door het wereldwijd gedistribueerde netwerk van indexeerders | -| Aantal queries per maand | Beperkt tot infrastructuurcapaciteiten | 100,000 (Free Plan) | -| Kosten per query | $0 | $0 | -| Infrastructure | Gecentraliseerd | Gedecentraliseerd | -| Geografische redundantie | $750+ per extra node | Inbegrepen | -| Uptime | Wisselend | 99,9%+ | -| Totale maandelijkse kosten | $750+ | $0 | +| Kostenvergelijking | Zelf hosten | De Graph Netwerk | +| :------------------------: | :-------------------------------------: | :----------------------------------------------------------------------------------------------: | +| Maandelijkse serverkosten | $350 per maand | $0 | +| Querykosten | $0+ | $0 per month | +| Onderhoud tijd | $400 per maand | Geen, deze kosten worden opgevangen door het wereldwijd gedistribueerde netwerk van indexeerders | +| Aantal queries per maand | Beperkt tot infrastructuurcapaciteiten | 100,000 (Free Plan) | +| Kosten per query | $0 | $0 | +| Infrastructure | Gecentraliseerd | Gedecentraliseerd | +| Geografische redundantie | $750+ per extra node | Inbegrepen | +| Uptime | Wisselend | 99,9%+ | +| Totale maandelijkse kosten | $750+ | $0 | ## Medium Volume User (~3M queries per month) -| Kostenvergelijking | Zelf hosten | De Graph Netwerk | -| :-: | :-: | :-: | -| Maandelijkse serverkosten | $350 per maand | $0 | -| Querykosten | $500 per maand | $120 per month | -| Onderhoud tijd | $800 per maand | Geen, deze kosten worden opgevangen door het wereldwijd gedistribueerde netwerk van indexeerders | -| Aantal queries per maand | Beperkt tot infrastructuurcapaciteiten | ~3,000,000 | -| Kosten per query | $0 | $0.00004 | -| Infrastructure | Gecentraliseerd | Gedecentraliseerd | -| Technische personeelskosten | $200 per uur | Inbegrepen | -| Geografische redundantie | $1200 totale kosten per extra node | Inbegrepen | -| Uptime | Wisselend | 99,9%+ | -| Totale maandelijkse kosten | $1,650+ | $120 | +| Kostenvergelijking | Zelf hosten | De Graph Netwerk | +| :-------------------------: | :----------------------------------------: | :----------------------------------------------------------------------------------------------: | +| Maandelijkse serverkosten | $350 per maand | $0 | +| Querykosten | $500 per maand | $120 per month | +| Onderhoud tijd | $800 per maand | Geen, deze kosten worden opgevangen door het wereldwijd gedistribueerde netwerk van indexeerders | +| Aantal queries per maand | Beperkt tot infrastructuurcapaciteiten | ~3,000,000 | +| Kosten per query | $0 | $0.00004 | +| Infrastructure | Gecentraliseerd | Gedecentraliseerd | +| Technische personeelskosten | $200 per uur | Inbegrepen | +| Geografische redundantie | $1200 totale kosten per extra node | Inbegrepen | +| Uptime | Wisselend | 99,9%+ | +| Totale maandelijkse kosten | $1,650+ | $120 | ## High Volume User (~30M queries per month) -| Kostenvergelijking | Zelf hosten | De Graph Netwerk | -| :-: | :-: | :-: | -| Maandelijkse serverkosten | $1100 per maand, per node | $0 | -| Querykosten | $4000 | $1,200 per month | -| Aantal benodigde nodes | 10 | Niet van toepassing | -| Onderhoud tijd | $6000 of meer per maand | Geen, deze kosten worden opgevangen door het wereldwijd gedistribueerde netwerk van indexeerders | -| Aantal queries per maand | Beperkt tot infrastructuurcapaciteiten | ~30,000,000 | -| Kosten per query | $0 | $0.00004 | -| Infrastructure | Gecentraliseerd | Gedecentraliseerd | -| Geografische redundantie | $1200 totale kosten per extra node | Inbegrepen | -| Uptime | Wisselend | 99,9%+ | -| Totale maandelijkse kosten | $11,000+ | $1,200 | +| Kostenvergelijking | Zelf hosten | De Graph Netwerk | +| :------------------------: | :-----------------------------------------: | :----------------------------------------------------------------------------------------------: | +| Maandelijkse serverkosten | $1100 per maand, per node | $0 | +| Querykosten | $4000 | $1,200 per month | +| Aantal benodigde nodes | 10 | Niet van toepassing | +| Onderhoud tijd | $6000 of meer per maand | Geen, deze kosten worden opgevangen door het wereldwijd gedistribueerde netwerk van indexeerders | +| Aantal queries per maand | Beperkt tot infrastructuurcapaciteiten | ~30,000,000 | +| Kosten per query | $0 | $0.00004 | +| Infrastructure | Gecentraliseerd | Gedecentraliseerd | +| Geografische redundantie | $1200 totale kosten per extra node | Inbegrepen | +| Uptime | Wisselend | 99,9%+ | +| Totale maandelijkse kosten | $11,000+ | $1,200 | \*inclusief kosten voor een back-up: $50-$100 per maand @@ -75,9 +75,9 @@ Query costs may vary; the quoted cost is the average at time of publication (Mar Reflects cost for data consumer. Query fees are still paid to Indexers for Free Plan queries. -Estimated costs are only for Ethereum Mainnet subgraphs — costs are even higher when self hosting a `graph-node` on other networks. Some users may need to update their subgraph to a new version. Due to Ethereum gas fees, an update costs ~$50 at time of writing. Note that gas fees on [Arbitrum](/archived/arbitrum/arbitrum-faq/) are substantially lower than Ethereum mainnet. +Estimated costs are only for Ethereum Mainnet Subgraphs — costs are even higher when self hosting a `graph-node` on other networks. Some users may need to update their Subgraph to a new version. Due to Ethereum gas fees, an update costs ~$50 at time of writing. Note that gas fees on [Arbitrum](/archived/arbitrum/arbitrum-faq/) are substantially lower than Ethereum mainnet. -Signaal cureren op een subgraph is een optionele eenmalige, kostenneutrale actie (bijv. $1000 aan signaal kan worden gecureerd op een subgraph en later worden opgenomen - met het potentieel om rendementen te verdienen tijdens het proces). +Curating signal on a Subgraph is an optional one-time, net-zero cost (e.g., $1k in signal can be curated on a Subgraph, and later withdrawn—with potential to earn returns in the process). ## No Setup Costs & Greater Operational Efficiency @@ -89,4 +89,4 @@ The Graph’s decentralized network gives users access to geographic redundancy Bottom line: The Graph Network is less expensive, easier to use, and produces superior results compared to running a `graph-node` locally. -Start using The Graph Network today, and learn how to [publish your subgraph to The Graph's decentralized network](/subgraphs/quick-start/). +Start using The Graph Network today, and learn how to [publish your Subgraph to The Graph's decentralized network](/subgraphs/quick-start/). From 9938147a2c409f0229ed126037ed88ddf93e0518 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:16:31 -0500 Subject: [PATCH 0460/1789] New translations benefits.mdx (Polish) --- website/src/pages/pl/resources/benefits.mdx | 76 ++++++++++----------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/website/src/pages/pl/resources/benefits.mdx b/website/src/pages/pl/resources/benefits.mdx index d788b11bcd7a..8eb098f7a76c 100644 --- a/website/src/pages/pl/resources/benefits.mdx +++ b/website/src/pages/pl/resources/benefits.mdx @@ -27,47 +27,47 @@ Query costs may vary; the quoted cost is the average at time of publication (Mar ## Low Volume User (less than 100,000 queries per month) -| Cost Comparison | Self Hosted | Sieć The Graph | -| :-: | :-: | :-: | -| Monthly server cost\* | $350 per month | $0 | -| Query costs | $0+ | $0 per month | -| Engineering time | $400 per month | None, built into the network with globally distributed Indexers | -| Queries per month | Limited to infra capabilities | 100,000 (Free Plan) | -| Cost per query | $0 | $0 | -| Infrastructure | Centralized | Decentralized | -| Geographic redundancy | $750+ per additional node | Included | -| Uptime | Varies | 99.9%+ | -| Total Monthly Costs | $750+ | $0 | +| Cost Comparison | Self Hosted | Sieć The Graph | +| :--------------------------: | :-------------------------------------: | :-------------------------------------------------------------: | +| Monthly server cost\* | $350 per month | $0 | +| Query costs | $0+ | $0 per month | +| Engineering time | $400 per month | None, built into the network with globally distributed Indexers | +| Queries per month | Limited to infra capabilities | 100,000 (Free Plan) | +| Cost per query | $0 | $0 | +| Infrastructure | Centralized | Decentralized | +| Geographic redundancy | $750+ per additional node | Included | +| Uptime | Varies | 99.9%+ | +| Total Monthly Costs | $750+ | $0 | ## Medium Volume User (~3M queries per month) -| Cost Comparison | Self Hosted | Sieć The Graph | -| :-: | :-: | :-: | -| Monthly server cost\* | $350 per month | $0 | -| Query costs | $500 per month | $120 per month | -| Engineering time | $800 per month | None, built into the network with globally distributed Indexers | -| Queries per month | Limited to infra capabilities | ~3,000,000 | -| Cost per query | $0 | $0.00004 | -| Infrastructure | Centralized | Decentralized | -| Engineering expense | $200 per hour | Included | -| Geographic redundancy | $1,200 in total costs per additional node | Included | -| Uptime | Varies | 99.9%+ | -| Total Monthly Costs | $1,650+ | $120 | +| Cost Comparison | Self Hosted | Sieć The Graph | +| :--------------------------: | :----------------------------------------: | :-------------------------------------------------------------: | +| Monthly server cost\* | $350 per month | $0 | +| Query costs | $500 per month | $120 per month | +| Engineering time | $800 per month | None, built into the network with globally distributed Indexers | +| Queries per month | Limited to infra capabilities | ~3,000,000 | +| Cost per query | $0 | $0.00004 | +| Infrastructure | Centralized | Decentralized | +| Engineering expense | $200 per hour | Included | +| Geographic redundancy | $1,200 in total costs per additional node | Included | +| Uptime | Varies | 99.9%+ | +| Total Monthly Costs | $1,650+ | $120 | ## High Volume User (~30M queries per month) -| Cost Comparison | Self Hosted | Sieć The Graph | -| :-: | :-: | :-: | -| Monthly server cost\* | $1100 per month, per node | $0 | -| Query costs | $4000 | $1,200 per month | -| Number of nodes needed | 10 | Not applicable | -| Engineering time | $6,000 or more per month | None, built into the network with globally distributed Indexers | -| Queries per month | Limited to infra capabilities | ~30,000,000 | -| Cost per query | $0 | $0.00004 | -| Infrastructure | Centralized | Decentralized | -| Geographic redundancy | $1,200 in total costs per additional node | Included | -| Uptime | Varies | 99.9%+ | -| Total Monthly Costs | $11,000+ | $1,200 | +| Cost Comparison | Self Hosted | Sieć The Graph | +| :--------------------------: | :-----------------------------------------: | :-------------------------------------------------------------: | +| Monthly server cost\* | $1100 per month, per node | $0 | +| Query costs | $4000 | $1,200 per month | +| Number of nodes needed | 10 | Not applicable | +| Engineering time | $6,000 or more per month | None, built into the network with globally distributed Indexers | +| Queries per month | Limited to infra capabilities | ~30,000,000 | +| Cost per query | $0 | $0.00004 | +| Infrastructure | Centralized | Decentralized | +| Geographic redundancy | $1,200 in total costs per additional node | Included | +| Uptime | Varies | 99.9%+ | +| Total Monthly Costs | $11,000+ | $1,200 | \*including costs for backup: $50-$100 per month @@ -75,9 +75,9 @@ Query costs may vary; the quoted cost is the average at time of publication (Mar Reflects cost for data consumer. Query fees are still paid to Indexers for Free Plan queries. -Estimated costs are only for Ethereum Mainnet subgraphs — costs are even higher when self hosting a `graph-node` on other networks. Some users may need to update their subgraph to a new version. Due to Ethereum gas fees, an update costs ~$50 at time of writing. Note that gas fees on [Arbitrum](/archived/arbitrum/arbitrum-faq/) are substantially lower than Ethereum mainnet. +Estimated costs are only for Ethereum Mainnet Subgraphs — costs are even higher when self hosting a `graph-node` on other networks. Some users may need to update their Subgraph to a new version. Due to Ethereum gas fees, an update costs ~$50 at time of writing. Note that gas fees on [Arbitrum](/archived/arbitrum/arbitrum-faq/) are substantially lower than Ethereum mainnet. -Curating signal on a subgraph is an optional one-time, net-zero cost (e.g., $1k in signal can be curated on a subgraph, and later withdrawn—with potential to earn returns in the process). +Curating signal on a Subgraph is an optional one-time, net-zero cost (e.g., $1k in signal can be curated on a Subgraph, and later withdrawn—with potential to earn returns in the process). ## No Setup Costs & Greater Operational Efficiency @@ -89,4 +89,4 @@ The Graph’s decentralized network gives users access to geographic redundancy Bottom line: The Graph Network is less expensive, easier to use, and produces superior results compared to running a `graph-node` locally. -Start using The Graph Network today, and learn how to [publish your subgraph to The Graph's decentralized network](/subgraphs/quick-start/). +Start using The Graph Network today, and learn how to [publish your Subgraph to The Graph's decentralized network](/subgraphs/quick-start/). From b17f55d885f42a70297037d6949c832ecbd3fb4b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:16:32 -0500 Subject: [PATCH 0461/1789] New translations benefits.mdx (Portuguese) --- website/src/pages/pt/resources/benefits.mdx | 79 ++++++++++----------- 1 file changed, 39 insertions(+), 40 deletions(-) diff --git a/website/src/pages/pt/resources/benefits.mdx b/website/src/pages/pt/resources/benefits.mdx index 536f02bd4a05..adb3f4feb191 100644 --- a/website/src/pages/pt/resources/benefits.mdx +++ b/website/src/pages/pt/resources/benefits.mdx @@ -27,58 +27,57 @@ Os custos de query podem variar; o custo citado é o normal até o fechamento da ## Utilizador de Baixo Volume (menos de 100 mil queries por mês) -| Comparação de Custos | Auto-hospedagem | The Graph Network | -| :-: | :-: | :-: | -| Custo mensal de servidor\* | $350 por mês | $0 | -| Custos de query | $0+ | $0 por mês | -| Tempo de engenharia | $400 por mês | Nenhum, embutido na rede com Indexadores distribuídos globalmente | -| Queries por mês | Limitadas pelas capabilidades da infra | 100 mil (Plano Grátis) | -| Custo por query | $0 | $0 | -| Infrastructure | Centralizada | Descentralizada | -| Redundância geográfica | $750+ por node adicional | Incluída | -| Uptime (disponibilidade) | Varia | 99.9%+ | -| Custos mensais totais | $750+ | $0 | +| Comparação de Custos | Auto-hospedagem | The Graph Network | +| :-----------------------------: | :-------------------------------------: | :---------------------------------------------------------------: | +| Custo mensal de servidor\* | $350 por mês | $0 | +| Custos de query | $0+ | $0 por mês | +| Tempo de engenharia | $400 por mês | Nenhum, embutido na rede com Indexadores distribuídos globalmente | +| Queries por mês | Limitadas pelas capabilidades da infra | 100 mil (Plano Grátis) | +| Custo por query | $0 | $0 | +| Infrastructure | Centralizada | Descentralizada | +| Redundância geográfica | $750+ por node adicional | Incluída | +| Uptime (disponibilidade) | Varia | 99.9%+ | +| Custos mensais totais | $750+ | $0 | ## Utilizador de Volume Médio (cerca de 3 milhões de queries por mês) -| Comparação de Custos | Auto-hospedagem | The Graph Network | -| :-: | :-: | :-: | -| Custo mensal de servidor\* | $350 por mês | $0 | -| Custos de query | $500 por mês | $120 por mês | -| Tempo de engenharia | $800 por mês | Nenhum, embutido na rede com Indexadores distribuídos globalmente | -| Queries por mês | Limitadas pelas capabilidades da infra | ~3 milhões | -| Custo por query | $0 | $0.00004 | -| Infrastructure | Centralizada | Descentralizada | -| Custo de engenharia | $200 por hora | Incluída | -| Redundância geográfica | $1.200 em custos totais por node adicional | Incluída | -| Uptime (disponibilidade) | Varia | 99.9%+ | -| Custos mensais totais | $1.650+ | $120 | +| Comparação de Custos | Auto-hospedagem | The Graph Network | +| :-----------------------------: | :----------------------------------------: | :---------------------------------------------------------------: | +| Custo mensal de servidor\* | $350 por mês | $0 | +| Custos de query | $500 por mês | $120 por mês | +| Tempo de engenharia | $800 por mês | Nenhum, embutido na rede com Indexadores distribuídos globalmente | +| Queries por mês | Limitadas pelas capabilidades da infra | ~3 milhões | +| Custo por query | $0 | $0.00004 | +| Infrastructure | Centralizada | Descentralizada | +| Custo de engenharia | $200 por hora | Incluída | +| Redundância geográfica | $1.200 em custos totais por node adicional | Incluída | +| Uptime (disponibilidade) | Varia | 99.9%+ | +| Custos mensais totais | $1.650+ | $120 | ## Utilizador de Volume Alto (cerca de 30 milhões de queries por mês) -| Comparação de Custos | Auto-hospedagem | The Graph Network | -| :-: | :-: | :-: | -| Custo mensal de servidor\* | $1.100 por mês, por node | $0 | -| Custos de query | $4.000 | $1,200 por mês | -| Número de nodes necessário | 10 | Não se aplica | -| Tempo de engenharia | $6.000 ou mais por mês | Nenhum, embutido na rede com Indexadores distribuídos globalmente | -| Queries por mês | Limitadas pelas capabilidades da infra | Cerca de 30 milhões | -| Custo por query | $0 | $0.00004 | -| Infrastructure | Centralizada | Descentralizada | -| Redundância geográfica | $1.200 em custos totais por node adicional | Incluída | -| Uptime (disponibilidade) | Varia | 99.9%+ | -| Custos mensais totais | $11.000+ | $1.200 | +| Comparação de Custos | Auto-hospedagem | The Graph Network | +| :-----------------------------: | :-----------------------------------------: | :---------------------------------------------------------------: | +| Custo mensal de servidor\* | $1.100 por mês, por node | $0 | +| Custos de query | $4.000 | $1,200 por mês | +| Número de nodes necessário | 10 | Não se aplica | +| Tempo de engenharia | $6.000 ou mais por mês | Nenhum, embutido na rede com Indexadores distribuídos globalmente | +| Queries por mês | Limitadas pelas capabilidades da infra | Cerca de 30 milhões | +| Custo por query | $0 | $0.00004 | +| Infrastructure | Centralizada | Descentralizada | +| Redundância geográfica | $1.200 em custos totais por node adicional | Incluída | +| Uptime (disponibilidade) | Varia | 99.9%+ | +| Custos mensais totais | $11.000+ | $1.200 | \*com custos de backup incluídos: $50-$100 por mês Tempo de engenharia baseado numa hipótese de $200 por hora -Reflete o custo ao consumidor de dados. Taxas de query ainda são pagas a Indexadores por queries do Plano -Grátis. +Reflete o custo ao consumidor de dados. Taxas de query ainda são pagas a Indexadores por queries do Plano Grátis. -Os custos estimados são apenas para subgraphs na Mainnet do Ethereum — os custos são maiores ao auto-hospedar um graph-node em outras redes. Alguns utilizadores devem atualizar o seu subgraph a uma versão mais recente. Até o fechamento deste texto, devido às taxas de gas do Ethereum, uma atualização custa cerca de 50 dólares. Note que as taxas de gás no [Arbitrum](/archived/arbitrum/arbitrum-faq/) são muito menores que as da mainnet do Ethereum. +Estimated costs are only for Ethereum Mainnet Subgraphs — costs are even higher when self hosting a `graph-node` on other networks. Some users may need to update their Subgraph to a new version. Due to Ethereum gas fees, an update costs ~$50 at time of writing. Note that gas fees on [Arbitrum](/archived/arbitrum/arbitrum-faq/) are substantially lower than Ethereum mainnet. -Curar um sinal em um subgraph é um custo opcional, único, e zero-líquido (por ex., $1 mil em um subgraph pode ser curado em um subgraph, e depois retirado — com potencial para ganhar retornos no processo). +Curating signal on a Subgraph is an optional one-time, net-zero cost (e.g., $1k in signal can be curated on a Subgraph, and later withdrawn—with potential to earn returns in the process). ## Zero Custos de Preparação e Mais Eficiência Operacional @@ -90,4 +89,4 @@ A rede descentralizada do The Graph permite que os utilizadores acessem redundâ Enfim: A Graph Network é mais barata e fácil de usar, e produz resultados melhores comparados à execução local de um graph-node. -Comece a usar a Graph Network hoje, e aprenda como [editar o seu subgraph na rede descentralizada do The Graph](/subgraphs/quick-start/). +Start using The Graph Network today, and learn how to [publish your Subgraph to The Graph's decentralized network](/subgraphs/quick-start/). From 0cf8eaeb507f495744b59faed2792c69f7d6451e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:16:33 -0500 Subject: [PATCH 0462/1789] New translations benefits.mdx (Russian) --- website/src/pages/ru/resources/benefits.mdx | 76 ++++++++++----------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/website/src/pages/ru/resources/benefits.mdx b/website/src/pages/ru/resources/benefits.mdx index df6eeac7c628..e5689bcb5f5a 100644 --- a/website/src/pages/ru/resources/benefits.mdx +++ b/website/src/pages/ru/resources/benefits.mdx @@ -27,47 +27,47 @@ Query costs may vary; the quoted cost is the average at time of publication (Mar ## Low Volume User (less than 100,000 queries per month) -| Сравнение затрат | Самостоятельный хостинг | Сеть The Graph | -| :-: | :-: | :-: | -| Ежемесячная стоимость сервера\* | $350 в месяц | $0 | -| Стоимость запроса | $0+ | $0 per month | -| Время разработки | $400 в месяц | Нет, встроен в сеть с глобально распределенными Индексаторами | -| Запросы в месяц | Ограничен возможностями инфраструктуры | 100,000 (Free Plan) | -| Стоимость одного запроса | $0 | $0 | -| Infrastructure | Централизованная | Децентрализованная | -| Географическая избыточность | $750+ за каждую дополнительную ноду | Включено | -| Время безотказной работы | Варьируется | 99.9%+ | -| Общие ежемесячные расходы | $750+ | $0 | +| Сравнение затрат | Самостоятельный хостинг | Сеть The Graph | +| :-----------------------------: | :-------------------------------------: | :-----------------------------------------------------------: | +| Ежемесячная стоимость сервера\* | $350 в месяц | $0 | +| Стоимость запроса | $0+ | $0 per month | +| Время разработки | $400 в месяц | Нет, встроен в сеть с глобально распределенными Индексаторами | +| Запросы в месяц | Ограничен возможностями инфраструктуры | 100,000 (Free Plan) | +| Стоимость одного запроса | $0 | $0 | +| Infrastructure | Централизованная | Децентрализованная | +| Географическая избыточность | $750+ за каждую дополнительную ноду | Включено | +| Время безотказной работы | Варьируется | 99.9%+ | +| Общие ежемесячные расходы | $750+ | $0 | ## Medium Volume User (~3M queries per month) -| Сравнение затрат | Самостоятельный хостинг | Сеть The Graph | -| :-: | :-: | :-: | -| Ежемесячная стоимость сервера\* | $350 в месяц | $0 | -| Стоимость запроса | $500 в месяц | $120 per month | -| Время разработки | $800 в месяц | Нет, встроен в сеть с глобально распределенными Индексаторами | -| Запросы в месяц | Ограничен возможностями инфраструктуры | ~3,000,000 | -| Стоимость одного запроса | $0 | $0.00004 | -| Infrastructure | Централизованная | Децентрализованная | -| Инженерные расходы | $200 в час | Включено | -| Географическая избыточность | общие затраты на каждую дополнительную ноду составляют $1,200 | Включено | -| Время безотказной работы | Варьируется | 99.9%+ | -| Общие ежемесячные расходы | $1,650+ | $120 | +| Сравнение затрат | Самостоятельный хостинг | Сеть The Graph | +| :-----------------------------: | :-----------------------------------------------------------: | :-----------------------------------------------------------: | +| Ежемесячная стоимость сервера\* | $350 в месяц | $0 | +| Стоимость запроса | $500 в месяц | $120 per month | +| Время разработки | $800 в месяц | Нет, встроен в сеть с глобально распределенными Индексаторами | +| Запросы в месяц | Ограничен возможностями инфраструктуры | ~3,000,000 | +| Стоимость одного запроса | $0 | $0.00004 | +| Infrastructure | Централизованная | Децентрализованная | +| Инженерные расходы | $200 в час | Включено | +| Географическая избыточность | общие затраты на каждую дополнительную ноду составляют $1,200 | Включено | +| Время безотказной работы | Варьируется | 99.9%+ | +| Общие ежемесячные расходы | $1,650+ | $120 | ## High Volume User (~30M queries per month) -| Сравнение затрат | Самостоятельный хостинг | Сеть The Graph | -| :-: | :-: | :-: | -| Ежемесячная стоимость сервера\* | $1100 в месяц за ноду | $0 | -| Стоимость запроса | $4000 | $1,200 per month | -| Необходимое количество нод | 10 | Не подходит | -| Время разработки | $6,000 или больше в месяц | Нет, встроен в сеть с глобально распределенными Индексаторами | -| Запросы в месяц | Ограничен возможностями инфраструктуры | ~30,000,000 | -| Стоимость одного запроса | $0 | $0.00004 | -| Infrastructure | Централизованная | Децентрализованная | -| Географическая избыточность | общие затраты на каждую дополнительную ноду составляют $1,200 | Включено | -| Время безотказной работы | Варьируется | 99.9%+ | -| Общие ежемесячные расходы | $11,000+ | $1,200 | +| Сравнение затрат | Самостоятельный хостинг | Сеть The Graph | +| :-----------------------------: | :-----------------------------------------------------------: | :-----------------------------------------------------------: | +| Ежемесячная стоимость сервера\* | $1100 в месяц за ноду | $0 | +| Стоимость запроса | $4000 | $1,200 per month | +| Необходимое количество нод | 10 | Не подходит | +| Время разработки | $6,000 или больше в месяц | Нет, встроен в сеть с глобально распределенными Индексаторами | +| Запросы в месяц | Ограничен возможностями инфраструктуры | ~30,000,000 | +| Стоимость одного запроса | $0 | $0.00004 | +| Infrastructure | Централизованная | Децентрализованная | +| Географическая избыточность | общие затраты на каждую дополнительную ноду составляют $1,200 | Включено | +| Время безотказной работы | Варьируется | 99.9%+ | +| Общие ежемесячные расходы | $11,000+ | $1,200 | \* включая расходы на резервное копирование: $50-$100 в месяц @@ -75,9 +75,9 @@ Query costs may vary; the quoted cost is the average at time of publication (Mar Reflects cost for data consumer. Query fees are still paid to Indexers for Free Plan queries. -Estimated costs are only for Ethereum Mainnet subgraphs — costs are even higher when self hosting a `graph-node` on other networks. Some users may need to update their subgraph to a new version. Due to Ethereum gas fees, an update costs ~$50 at time of writing. Note that gas fees on [Arbitrum](/archived/arbitrum/arbitrum-faq/) are substantially lower than Ethereum mainnet. +Estimated costs are only for Ethereum Mainnet Subgraphs — costs are even higher when self hosting a `graph-node` on other networks. Some users may need to update their Subgraph to a new version. Due to Ethereum gas fees, an update costs ~$50 at time of writing. Note that gas fees on [Arbitrum](/archived/arbitrum/arbitrum-faq/) are substantially lower than Ethereum mainnet. -Курирование сигнала на субграфе - это необязательная единовременная стоимость, равная нулю (например, сигнал стоимостью 1 тыс. долларов может быть курирован на субграфе, а затем отозван - с возможностью получения прибыли в процессе). +Curating signal on a Subgraph is an optional one-time, net-zero cost (e.g., $1k in signal can be curated on a Subgraph, and later withdrawn—with potential to earn returns in the process). ## No Setup Costs & Greater Operational Efficiency @@ -89,4 +89,4 @@ The Graph’s decentralized network gives users access to geographic redundancy Bottom line: The Graph Network is less expensive, easier to use, and produces superior results compared to running a `graph-node` locally. -Start using The Graph Network today, and learn how to [publish your subgraph to The Graph's decentralized network](/subgraphs/quick-start/). +Start using The Graph Network today, and learn how to [publish your Subgraph to The Graph's decentralized network](/subgraphs/quick-start/). From 93b7c82738e233b5222c0e3b7e70f68a9945c030 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:16:34 -0500 Subject: [PATCH 0463/1789] New translations benefits.mdx (Swedish) --- website/src/pages/sv/resources/benefits.mdx | 76 ++++++++++----------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/website/src/pages/sv/resources/benefits.mdx b/website/src/pages/sv/resources/benefits.mdx index b3c5e957cb54..f227edf6f961 100644 --- a/website/src/pages/sv/resources/benefits.mdx +++ b/website/src/pages/sv/resources/benefits.mdx @@ -27,47 +27,47 @@ Query costs may vary; the quoted cost is the average at time of publication (Mar ## Low Volume User (less than 100,000 queries per month) -| Kostnadsjämförelse | Egen Värd | The Graph Nätverk | -| :-: | :-: | :-: | -| Månatlig kostnad för server\* | $350 per månad | $0 | -| Kostnad för frågor | $0+ | $0 per month | -| Konstruktionstid | $400 per månad | Ingen, inbyggd i nätverket med globalt distribuerade Indexers | -| Frågor per månad | Begränsad till infra kapacitet | 100,000 (Free Plan) | -| Kostnad per fråga | $0 | $0 | -| Infrastructure | Centraliserad | Decentraliserad | -| Geografisk redundans | $750+ per extra nod | Inkluderat | -| Drifttid | Varierande | 99.9%+ | -| Total Månadskostnad | $750+ | $0 | +| Kostnadsjämförelse | Egen Värd | The Graph Nätverk | +| :---------------------------: | :-------------------------------------: | :-----------------------------------------------------------: | +| Månatlig kostnad för server\* | $350 per månad | $0 | +| Kostnad för frågor | $0+ | $0 per month | +| Konstruktionstid | $400 per månad | Ingen, inbyggd i nätverket med globalt distribuerade Indexers | +| Frågor per månad | Begränsad till infra kapacitet | 100,000 (Free Plan) | +| Kostnad per fråga | $0 | $0 | +| Infrastructure | Centraliserad | Decentraliserad | +| Geografisk redundans | $750+ per extra nod | Inkluderat | +| Drifttid | Varierande | 99.9%+ | +| Total Månadskostnad | $750+ | $0 | ## Medium Volume User (~3M queries per month) -| Kostnadsjämförelse | Egen Värd | The Graph Nätverk | -| :-: | :-: | :-: | -| Månatlig kostnad för server\* | $350 per månad | $0 | -| Kostnad för frågor | $500 per månad | $120 per month | -| Konstruktionstid | $800 per månad | Ingen, inbyggd i nätverket med globalt distribuerade Indexers | -| Frågor per månad | Begränsad till infra kapacitet | ~3,000,000 | -| Kostnad per fråga | $0 | $0.00004 | -| Infrastructure | Centraliserad | Decentraliserad | -| Kostnader för ingenjörsarbete | $200 per timme | Inkluderat | -| Geografisk redundans | $1,200 i totala kostnader per extra nod | Inkluderat | -| Drifttid | Varierande | 99.9%+ | -| Total Månadskostnad | $1,650+ | $120 | +| Kostnadsjämförelse | Egen Värd | The Graph Nätverk | +| :---------------------------: | :----------------------------------------: | :-----------------------------------------------------------: | +| Månatlig kostnad för server\* | $350 per månad | $0 | +| Kostnad för frågor | $500 per månad | $120 per month | +| Konstruktionstid | $800 per månad | Ingen, inbyggd i nätverket med globalt distribuerade Indexers | +| Frågor per månad | Begränsad till infra kapacitet | ~3,000,000 | +| Kostnad per fråga | $0 | $0.00004 | +| Infrastructure | Centraliserad | Decentraliserad | +| Kostnader för ingenjörsarbete | $200 per timme | Inkluderat | +| Geografisk redundans | $1,200 i totala kostnader per extra nod | Inkluderat | +| Drifttid | Varierande | 99.9%+ | +| Total Månadskostnad | $1,650+ | $120 | ## High Volume User (~30M queries per month) -| Kostnadsjämförelse | Egen Värd | The Graph Nätverk | -| :-: | :-: | :-: | -| Månatlig kostnad för server\* | $1100 per månad, per nod | $0 | -| Kostnad för frågor | $4000 | $1,200 per month | -| Antal noder som behövs | 10 | Ej tillämpligt | -| Konstruktionstid | $6,000 eller mer per månad | Ingen, inbyggd i nätverket med globalt distribuerade Indexers | -| Frågor per månad | Begränsad till infra kapacitet | ~30,000,000 | -| Kostnad per fråga | $0 | $0.00004 | -| Infrastructure | Centraliserad | Decentraliserad | -| Geografisk redundans | $1,200 i totala kostnader per extra nod | Inkluderat | -| Drifttid | Varierande | 99.9%+ | -| Total Månadskostnad | $11,000+ | $1,200 | +| Kostnadsjämförelse | Egen Värd | The Graph Nätverk | +| :---------------------------: | :-----------------------------------------: | :-----------------------------------------------------------: | +| Månatlig kostnad för server\* | $1100 per månad, per nod | $0 | +| Kostnad för frågor | $4000 | $1,200 per month | +| Antal noder som behövs | 10 | Ej tillämpligt | +| Konstruktionstid | $6,000 eller mer per månad | Ingen, inbyggd i nätverket med globalt distribuerade Indexers | +| Frågor per månad | Begränsad till infra kapacitet | ~30,000,000 | +| Kostnad per fråga | $0 | $0.00004 | +| Infrastructure | Centraliserad | Decentraliserad | +| Geografisk redundans | $1,200 i totala kostnader per extra nod | Inkluderat | +| Drifttid | Varierande | 99.9%+ | +| Total Månadskostnad | $11,000+ | $1,200 | \*inklusive kostnader för backup: $50-$100 per månad @@ -75,9 +75,9 @@ Query costs may vary; the quoted cost is the average at time of publication (Mar Reflects cost for data consumer. Query fees are still paid to Indexers for Free Plan queries. -Estimated costs are only for Ethereum Mainnet subgraphs — costs are even higher when self hosting a `graph-node` on other networks. Some users may need to update their subgraph to a new version. Due to Ethereum gas fees, an update costs ~$50 at time of writing. Note that gas fees on [Arbitrum](/archived/arbitrum/arbitrum-faq/) are substantially lower than Ethereum mainnet. +Estimated costs are only for Ethereum Mainnet Subgraphs — costs are even higher when self hosting a `graph-node` on other networks. Some users may need to update their Subgraph to a new version. Due to Ethereum gas fees, an update costs ~$50 at time of writing. Note that gas fees on [Arbitrum](/archived/arbitrum/arbitrum-faq/) are substantially lower than Ethereum mainnet. -Att kurera signal på en subgraf är en valfri engångskostnad med noll nettokostnad (t.ex. $1k i signal kan kurera på en subgraf och senare dras tillbaka - med potential att tjäna avkastning i processen). +Curating signal on a Subgraph is an optional one-time, net-zero cost (e.g., $1k in signal can be curated on a Subgraph, and later withdrawn—with potential to earn returns in the process). ## No Setup Costs & Greater Operational Efficiency @@ -89,4 +89,4 @@ The Graph’s decentralized network gives users access to geographic redundancy Bottom line: The Graph Network is less expensive, easier to use, and produces superior results compared to running a `graph-node` locally. -Start using The Graph Network today, and learn how to [publish your subgraph to The Graph's decentralized network](/subgraphs/quick-start/). +Start using The Graph Network today, and learn how to [publish your Subgraph to The Graph's decentralized network](/subgraphs/quick-start/). From 956a109d58a5a88daf4ac7e6e0ca591b32ae9cb8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:16:35 -0500 Subject: [PATCH 0464/1789] New translations benefits.mdx (Turkish) --- website/src/pages/tr/resources/benefits.mdx | 76 ++++++++++----------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/website/src/pages/tr/resources/benefits.mdx b/website/src/pages/tr/resources/benefits.mdx index cb9b6e71d129..d18674c95147 100644 --- a/website/src/pages/tr/resources/benefits.mdx +++ b/website/src/pages/tr/resources/benefits.mdx @@ -27,47 +27,47 @@ Query costs may vary; the quoted cost is the average at time of publication (Mar ## Low Volume User (less than 100,000 queries per month) -| Cost Comparison | Self Hosted | Graph Ağı | -| :-: | :-: | :-: | -| Monthly server cost\* | $350 per month | $0 | -| Query costs | $0+ | $0 per month | -| Engineering time | $400 per month | None, built into the network with globally distributed Indexers | -| Queries per month | Limited to infra capabilities | 100,000 (Free Plan) | -| Cost per query | $0 | $0 | -| Infrastructure | Centralized | Decentralized | -| Geographic redundancy | $750+ per additional node | Included | -| Uptime | Varies | 99.9%+ | -| Total Monthly Costs | $750+ | $0 | +| Cost Comparison | Self Hosted | Graph Ağı | +| :--------------------------: | :-------------------------------------: | :-------------------------------------------------------------: | +| Monthly server cost\* | $350 per month | $0 | +| Query costs | $0+ | $0 per month | +| Engineering time | $400 per month | None, built into the network with globally distributed Indexers | +| Queries per month | Limited to infra capabilities | 100,000 (Free Plan) | +| Cost per query | $0 | $0 | +| Infrastructure | Centralized | Decentralized | +| Geographic redundancy | $750+ per additional node | Included | +| Uptime | Varies | 99.9%+ | +| Total Monthly Costs | $750+ | $0 | ## Medium Volume User (~3M queries per month) -| Cost Comparison | Self Hosted | Graph Ağı | -| :-: | :-: | :-: | -| Monthly server cost\* | $350 per month | $0 | -| Query costs | $500 per month | $120 per month | -| Engineering time | $800 per month | None, built into the network with globally distributed Indexers | -| Queries per month | Limited to infra capabilities | ~3,000,000 | -| Cost per query | $0 | $0.00004 | -| Infrastructure | Centralized | Decentralized | -| Engineering expense | $200 per hour | Included | -| Geographic redundancy | $1,200 in total costs per additional node | Included | -| Uptime | Varies | 99.9%+ | -| Total Monthly Costs | $1,650+ | $120 | +| Cost Comparison | Self Hosted | Graph Ağı | +| :--------------------------: | :----------------------------------------: | :-------------------------------------------------------------: | +| Monthly server cost\* | $350 per month | $0 | +| Query costs | $500 per month | $120 per month | +| Engineering time | $800 per month | None, built into the network with globally distributed Indexers | +| Queries per month | Limited to infra capabilities | ~3,000,000 | +| Cost per query | $0 | $0.00004 | +| Infrastructure | Centralized | Decentralized | +| Engineering expense | $200 per hour | Included | +| Geographic redundancy | $1,200 in total costs per additional node | Included | +| Uptime | Varies | 99.9%+ | +| Total Monthly Costs | $1,650+ | $120 | ## High Volume User (~30M queries per month) -| Cost Comparison | Self Hosted | Graph Ağı | -| :-: | :-: | :-: | -| Monthly server cost\* | $1100 per month, per node | $0 | -| Query costs | $4000 | $1,200 per month | -| Number of nodes needed | 10 | Not applicable | -| Engineering time | $6,000 or more per month | None, built into the network with globally distributed Indexers | -| Queries per month | Limited to infra capabilities | ~30,000,000 | -| Cost per query | $0 | $0.00004 | -| Infrastructure | Centralized | Decentralized | -| Geographic redundancy | $1,200 in total costs per additional node | Included | -| Uptime | Varies | 99.9%+ | -| Total Monthly Costs | $11,000+ | $1,200 | +| Cost Comparison | Self Hosted | Graph Ağı | +| :--------------------------: | :-----------------------------------------: | :-------------------------------------------------------------: | +| Monthly server cost\* | $1100 per month, per node | $0 | +| Query costs | $4000 | $1,200 per month | +| Number of nodes needed | 10 | Not applicable | +| Engineering time | $6,000 or more per month | None, built into the network with globally distributed Indexers | +| Queries per month | Limited to infra capabilities | ~30,000,000 | +| Cost per query | $0 | $0.00004 | +| Infrastructure | Centralized | Decentralized | +| Geographic redundancy | $1,200 in total costs per additional node | Included | +| Uptime | Varies | 99.9%+ | +| Total Monthly Costs | $11,000+ | $1,200 | \*including costs for backup: $50-$100 per month @@ -75,9 +75,9 @@ Query costs may vary; the quoted cost is the average at time of publication (Mar Reflects cost for data consumer. Query fees are still paid to Indexers for Free Plan queries. -Estimated costs are only for Ethereum Mainnet subgraphs — costs are even higher when self hosting a `graph-node` on other networks. Some users may need to update their subgraph to a new version. Due to Ethereum gas fees, an update costs ~$50 at time of writing. Note that gas fees on [Arbitrum](/archived/arbitrum/arbitrum-faq/) are substantially lower than Ethereum mainnet. +Estimated costs are only for Ethereum Mainnet Subgraphs — costs are even higher when self hosting a `graph-node` on other networks. Some users may need to update their Subgraph to a new version. Due to Ethereum gas fees, an update costs ~$50 at time of writing. Note that gas fees on [Arbitrum](/archived/arbitrum/arbitrum-faq/) are substantially lower than Ethereum mainnet. -Curating signal on a subgraph is an optional one-time, net-zero cost (e.g., $1k in signal can be curated on a subgraph, and later withdrawn—with potential to earn returns in the process). +Curating signal on a Subgraph is an optional one-time, net-zero cost (e.g., $1k in signal can be curated on a Subgraph, and later withdrawn—with potential to earn returns in the process). ## No Setup Costs & Greater Operational Efficiency @@ -89,4 +89,4 @@ The Graph’s decentralized network gives users access to geographic redundancy Bottom line: The Graph Network is less expensive, easier to use, and produces superior results compared to running a `graph-node` locally. -Start using The Graph Network today, and learn how to [publish your subgraph to The Graph's decentralized network](/subgraphs/quick-start/). +Start using The Graph Network today, and learn how to [publish your Subgraph to The Graph's decentralized network](/subgraphs/quick-start/). From df8bf661ab20af5490e30eb73e736f0a37a10942 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:16:36 -0500 Subject: [PATCH 0465/1789] New translations benefits.mdx (Ukrainian) --- website/src/pages/uk/resources/benefits.mdx | 76 ++++++++++----------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/website/src/pages/uk/resources/benefits.mdx b/website/src/pages/uk/resources/benefits.mdx index e433c11c5903..230691184e2b 100644 --- a/website/src/pages/uk/resources/benefits.mdx +++ b/website/src/pages/uk/resources/benefits.mdx @@ -27,47 +27,47 @@ Query costs may vary; the quoted cost is the average at time of publication (Mar ## Low Volume User (less than 100,000 queries per month) -| Порівняння вартості послуг | Самостійний хостинг | Graph мережа | -| :-: | :-: | :-: | -| Щомісячна плата за сервер\* | $350 на місяць | $0 | -| Вартість запитів | $0+ | $0 per month | -| Час технічного обслуговування | $400 на місяць | Немає, вбудовані в мережу з глобально розподіленими індексаторами | -| Кількість запитів за місяць | Обмежується інфраструктурними можливостями | 100,000 (Free Plan) | -| Вартість одного запиту | $0 | $0 | -| Infrastructure | Централізована | Децентралізована | -| Географічне резервування | $750+ за кожну додаткову ноду | Включено | -| Час безвідмовної роботи | Варіюється | 99.9%+ | -| Загальна сума щомісячних витрат | $750+ | $0 | +| Порівняння вартості послуг | Самостійний хостинг | Graph мережа | +| :---------------------------------------: | :----------------------------------------: | :---------------------------------------------------------------: | +| Щомісячна плата за сервер\* | $350 на місяць | $0 | +| Вартість запитів | $0+ | $0 per month | +| Час технічного обслуговування | $400 на місяць | Немає, вбудовані в мережу з глобально розподіленими індексаторами | +| Кількість запитів за місяць | Обмежується інфраструктурними можливостями | 100,000 (Free Plan) | +| Вартість одного запиту | $0 | $0 | +| Infrastructure | Централізована | Децентралізована | +| Географічне резервування | $750+ за кожну додаткову ноду | Включено | +| Час безвідмовної роботи | Варіюється | 99.9%+ | +| Загальна сума щомісячних витрат | $750+ | $0 | ## Medium Volume User (~3M queries per month) -| Порівняння вартості послуг | Самостійний хостинг | Graph мережа | -| :-: | :-: | :-: | -| Щомісячна плата за сервер\* | $350 на місяць | $0 | -| Вартість запитів | $500 на місяць | $120 per month | -| Час технічного обслуговування | $800 на місяць | Немає, вбудовані в мережу з глобально розподіленими індексаторами | -| Кількість запитів за місяць | Обмежується інфраструктурними можливостями | ~3,000,000 | -| Вартість одного запиту | $0 | $0.00004 | -| Infrastructure | Централізована | Децентралізована | -| Інженерно-технічні витрати | $200 на годину | Включено | -| Географічне резервування | $1,200 загальних витрат на кожну додаткову ноду | Включено | -| Час безвідмовної роботи | Варіюється | 99.9%+ | -| Загальна сума щомісячних витрат | $1,650+ | $120 | +| Порівняння вартості послуг | Самостійний хостинг | Graph мережа | +| :---------------------------------------: | :---------------------------------------------: | :---------------------------------------------------------------: | +| Щомісячна плата за сервер\* | $350 на місяць | $0 | +| Вартість запитів | $500 на місяць | $120 per month | +| Час технічного обслуговування | $800 на місяць | Немає, вбудовані в мережу з глобально розподіленими індексаторами | +| Кількість запитів за місяць | Обмежується інфраструктурними можливостями | ~3,000,000 | +| Вартість одного запиту | $0 | $0.00004 | +| Infrastructure | Централізована | Децентралізована | +| Інженерно-технічні витрати | $200 на годину | Включено | +| Географічне резервування | $1,200 загальних витрат на кожну додаткову ноду | Включено | +| Час безвідмовної роботи | Варіюється | 99.9%+ | +| Загальна сума щомісячних витрат | $1,650+ | $120 | ## High Volume User (~30M queries per month) -| Порівняння вартості послуг | Самостійний хостинг | Graph мережа | -| :-: | :-: | :-: | -| Щомісячна плата за сервер\* | $1100 на місяць, за одну ноду | $0 | -| Вартість запитів | $4000 | $1,200 per month | -| Кількість необхідних нод | 10 | Не стосується | -| Час технічного обслуговування | $6,000 і більше на місяць | Немає, вбудовані в мережу з глобально розподіленими індексаторами | -| Кількість запитів за місяць | Обмежується інфраструктурними можливостями | ~30,000,000 | -| Вартість одного запиту | $0 | $0.00004 | -| Infrastructure | Централізована | Децентралізована | -| Географічне резервування | $1,200 загальних витрат на кожну додаткову ноду | Включено | -| Час безвідмовної роботи | Варіюється | 99.9%+ | -| Загальна сума щомісячних витрат | $11,000+ | $1,200 | +| Порівняння вартості послуг | Самостійний хостинг | Graph мережа | +| :---------------------------------------: | :---------------------------------------------: | :---------------------------------------------------------------: | +| Щомісячна плата за сервер\* | $1100 на місяць, за одну ноду | $0 | +| Вартість запитів | $4000 | $1,200 per month | +| Кількість необхідних нод | 10 | Не стосується | +| Час технічного обслуговування | $6,000 і більше на місяць | Немає, вбудовані в мережу з глобально розподіленими індексаторами | +| Кількість запитів за місяць | Обмежується інфраструктурними можливостями | ~30,000,000 | +| Вартість одного запиту | $0 | $0.00004 | +| Infrastructure | Централізована | Децентралізована | +| Географічне резервування | $1,200 загальних витрат на кожну додаткову ноду | Включено | +| Час безвідмовної роботи | Варіюється | 99.9%+ | +| Загальна сума щомісячних витрат | $11,000+ | $1,200 | \*включаючи витрати на резервне копіювання: $50-$100 на місяць @@ -75,9 +75,9 @@ Query costs may vary; the quoted cost is the average at time of publication (Mar Reflects cost for data consumer. Query fees are still paid to Indexers for Free Plan queries. -Estimated costs are only for Ethereum Mainnet subgraphs — costs are even higher when self hosting a `graph-node` on other networks. Some users may need to update their subgraph to a new version. Due to Ethereum gas fees, an update costs ~$50 at time of writing. Note that gas fees on [Arbitrum](/archived/arbitrum/arbitrum-faq/) are substantially lower than Ethereum mainnet. +Estimated costs are only for Ethereum Mainnet Subgraphs — costs are even higher when self hosting a `graph-node` on other networks. Some users may need to update their Subgraph to a new version. Due to Ethereum gas fees, an update costs ~$50 at time of writing. Note that gas fees on [Arbitrum](/archived/arbitrum/arbitrum-faq/) are substantially lower than Ethereum mainnet. -Кураторство сигналу на підграфі є опціональною одноразовою послугою з нульовою вартістю (наприклад, сигнал на суму 1 тис. доларів можна розмістити на підграфі, а потім вивести — з можливістю отримання прибутку в цьому процесі). +Curating signal on a Subgraph is an optional one-time, net-zero cost (e.g., $1k in signal can be curated on a Subgraph, and later withdrawn—with potential to earn returns in the process). ## No Setup Costs & Greater Operational Efficiency @@ -89,4 +89,4 @@ The Graph’s decentralized network gives users access to geographic redundancy Bottom line: The Graph Network is less expensive, easier to use, and produces superior results compared to running a `graph-node` locally. -Start using The Graph Network today, and learn how to [publish your subgraph to The Graph's decentralized network](/subgraphs/quick-start/). +Start using The Graph Network today, and learn how to [publish your Subgraph to The Graph's decentralized network](/subgraphs/quick-start/). From 3e2d64272b75ecd3cbd411768cec3a20788d5070 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:16:37 -0500 Subject: [PATCH 0466/1789] New translations benefits.mdx (Chinese Simplified) --- website/src/pages/zh/resources/benefits.mdx | 76 ++++++++++----------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/website/src/pages/zh/resources/benefits.mdx b/website/src/pages/zh/resources/benefits.mdx index dc6336e1893a..1eb179aefcb9 100644 --- a/website/src/pages/zh/resources/benefits.mdx +++ b/website/src/pages/zh/resources/benefits.mdx @@ -27,47 +27,47 @@ Query costs may vary; the quoted cost is the average at time of publication (Mar ## Low Volume User (less than 100,000 queries per month) -| 成本比较 | 自托管 | Graph网络 | -| :------------------: | :-------------------------------------: | :----------------------------------------: | -| 每月服务器费用 \* | 每月350美元 | 0美元 | -| 查询成本 | $0+ | $0 per month | -| 工程时间 | 400美元每月 | 没有,内置在具有全球去中心化索引者的网络中 | -| 每月查询 | 受限于基础设施能力 | 100,000 (Free Plan) | -| 每个查询的成本 | 0美元 | $0 | -| Infrastructure | 中心化 | 去中心化 | -| 异地备援 | 每个额外节点 $750 + | 包括在内 | -| 正常工作时间 | 变量 | 99.9%+ | -| 每月总成本 | $750+ | 0美元 | +| 成本比较 | 自托管 | Graph网络 | +| :--------------: | :-------------------------------------: | :-------------------: | +| 每月服务器费用 \* | 每月350美元 | 0美元 | +| 查询成本 | $0+ | $0 per month | +| 工程时间 | 400美元每月 | 没有,内置在具有全球去中心化索引者的网络中 | +| 每月查询 | 受限于基础设施能力 | 100,000 (Free Plan) | +| 每个查询的成本 | 0美元 | $0 | +| Infrastructure | 中心化 | 去中心化 | +| 异地备援 | 每个额外节点 $750 + | 包括在内 | +| 正常工作时间 | 变量 | 99.9%+ | +| 每月总成本 | $750+ | 0美元 | ## Medium Volume User (~3M queries per month) -| 成本比较 | 自托管 | Graph网络 | -| :------------------: | :----------------------------------------: | :----------------------------------------: | -| 每月服务器费用 \* | 每月350美元 | 0美元 | -| 查询成本 | 每月500美元 | $120 per month | -| 工程时间 | 每月800美元 | 没有,内置在具有全球去中心化索引者的网络中 | -| 每月查询 | 受限于基础设施能力 | ~3,000,000 | -| 每个查询的成本 | 0美元 | $0.00004 | -| Infrastructure | 中心化 | 去中心化 | -| 工程费用 | 每小时200美元 | 包括在内 | -| 异地备援 | 每个额外节点的总成本为1200美元 | 包括在内 | -| 正常工作时间 | 变量 | 99.9%+ | -| 每月总成本 | $1,650+ | $120 | +| 成本比较 | 自托管 | Graph网络 | +| :--------------: | :----------------------------------------: | :-------------------: | +| 每月服务器费用 \* | 每月350美元 | 0美元 | +| 查询成本 | 每月500美元 | $120 per month | +| 工程时间 | 每月800美元 | 没有,内置在具有全球去中心化索引者的网络中 | +| 每月查询 | 受限于基础设施能力 | ~3,000,000 | +| 每个查询的成本 | 0美元 | $0.00004 | +| Infrastructure | 中心化 | 去中心化 | +| 工程费用 | 每小时200美元 | 包括在内 | +| 异地备援 | 每个额外节点的总成本为1200美元 | 包括在内 | +| 正常工作时间 | 变量 | 99.9%+ | +| 每月总成本 | $1,650+ | $120 | ## High Volume User (~30M queries per month) -| 成本比较 | 自托管 | Graph网络 | -| :------------------: | :-----------------------------------------: | :----------------------------------------: | -| 每月服务器费用 \* | 1100美元每月每节点 | 0美元 | -| 查询成本 | 4000美元 | $1,200 per month | -| 需要的节点数量 | 10 | 不适用 | -| 工程时间 | 每月6000美元或以上 | 没有,内置在具有全球去中心化索引者的网络中 | -| 每月查询 | 受限于基础设施能力 | ~30,000,000 | -| 每个查询的成本 | 0美元 | $0.00004 | -| Infrastructure | 中心化 | 去中心化 | -| 异地备援 | 每个额外节点的总成本为1200美元 | 包括在内 | -| 正常工作时间 | 变量 | 99.9%+ | -| 每月总成本 | $11,000+ | $1,200 | +| 成本比较 | 自托管 | Graph网络 | +| :--------------: | :-----------------------------------------: | :-------------------: | +| 每月服务器费用 \* | 1100美元每月每节点 | 0美元 | +| 查询成本 | 4000美元 | $1,200 per month | +| 需要的节点数量 | 10 | 不适用 | +| 工程时间 | 每月6000美元或以上 | 没有,内置在具有全球去中心化索引者的网络中 | +| 每月查询 | 受限于基础设施能力 | ~30,000,000 | +| 每个查询的成本 | 0美元 | $0.00004 | +| Infrastructure | 中心化 | 去中心化 | +| 异地备援 | 每个额外节点的总成本为1200美元 | 包括在内 | +| 正常工作时间 | 变量 | 99.9%+ | +| 每月总成本 | $11,000+ | $1,200 | - 包括后备费用: 每月$50-$100美元 @@ -75,9 +75,9 @@ Query costs may vary; the quoted cost is the average at time of publication (Mar Reflects cost for data consumer. Query fees are still paid to Indexers for Free Plan queries. -Estimated costs are only for Ethereum Mainnet subgraphs — costs are even higher when self hosting a `graph-node` on other networks. Some users may need to update their subgraph to a new version. Due to Ethereum gas fees, an update costs ~$50 at time of writing. Note that gas fees on [Arbitrum](/archived/arbitrum/arbitrum-faq/) are substantially lower than Ethereum mainnet. +Estimated costs are only for Ethereum Mainnet Subgraphs — costs are even higher when self hosting a `graph-node` on other networks. Some users may need to update their Subgraph to a new version. Due to Ethereum gas fees, an update costs ~$50 at time of writing. Note that gas fees on [Arbitrum](/archived/arbitrum/arbitrum-faq/) are substantially lower than Ethereum mainnet. -在一个子图上策划信号是一个可选一次性净零成本(例如,1千美元的信号可以在一个子图上管理,然后撤回ーー在这个过程中有可能获得回报)。 +Curating signal on a Subgraph is an optional one-time, net-zero cost (e.g., $1k in signal can be curated on a Subgraph, and later withdrawn—with potential to earn returns in the process). ## No Setup Costs & Greater Operational Efficiency @@ -89,4 +89,4 @@ The Graph’s decentralized network gives users access to geographic redundancy Bottom line: The Graph Network is less expensive, easier to use, and produces superior results compared to running a `graph-node` locally. -Start using The Graph Network today, and learn how to [publish your subgraph to The Graph's decentralized network](/subgraphs/quick-start/). +Start using The Graph Network today, and learn how to [publish your Subgraph to The Graph's decentralized network](/subgraphs/quick-start/). From 689c27040f528ccfbff33926556fade92fd30671 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:16:38 -0500 Subject: [PATCH 0467/1789] New translations benefits.mdx (Urdu (Pakistan)) --- website/src/pages/ur/resources/benefits.mdx | 76 ++++++++++----------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/website/src/pages/ur/resources/benefits.mdx b/website/src/pages/ur/resources/benefits.mdx index 341a8c0a4c31..ff3e9e031fcf 100644 --- a/website/src/pages/ur/resources/benefits.mdx +++ b/website/src/pages/ur/resources/benefits.mdx @@ -27,47 +27,47 @@ Query costs may vary; the quoted cost is the average at time of publication (Mar ## Low Volume User (less than 100,000 queries per month) -| اخراجات کا موازنہ | خود میزبان | The Graph Network | -| :-: | :-: | :-: | -| ماہانہ سرور کی قیمت/\* | $350 فی مہینہ | $0 | -| استفسار کے اخراجات | $0+ | $0 per month | -| انجینئرنگ کا وقت | $400 فی مہینہ | کوئی بھی نہیں، عالمی سطح پر تقسیم شدہ انڈیکسرز کے ساتھ نیٹ ورک میں بنایا گیا ہے | -| فی مہینہ سوالات | بنیادی صلاحیتوں تک محدود | 100,000 (Free Plan) | -| قیمت فی سوال | $0 | $0 | -| Infrastructure | سینٹرلائزڈ | ڈیسینٹرلائزڈ | -| جغرافیائی فالتو پن | $750+ فی اضافی نوڈ | شامل | -| اپ ٹائم | اتار چڑھاو | 99.9%+ | -| کل ماہانہ اخراجات | $750+ | $0 | +| اخراجات کا موازنہ | خود میزبان | The Graph Network | +| :--------------------------: | :-------------------------------------: | :-----------------------------------------------------------------------------: | +| ماہانہ سرور کی قیمت/\* | $350 فی مہینہ | $0 | +| استفسار کے اخراجات | $0+ | $0 per month | +| انجینئرنگ کا وقت | $400 فی مہینہ | کوئی بھی نہیں، عالمی سطح پر تقسیم شدہ انڈیکسرز کے ساتھ نیٹ ورک میں بنایا گیا ہے | +| فی مہینہ سوالات | بنیادی صلاحیتوں تک محدود | 100,000 (Free Plan) | +| قیمت فی سوال | $0 | $0 | +| Infrastructure | سینٹرلائزڈ | ڈیسینٹرلائزڈ | +| جغرافیائی فالتو پن | $750+ فی اضافی نوڈ | شامل | +| اپ ٹائم | اتار چڑھاو | 99.9%+ | +| کل ماہانہ اخراجات | $750+ | $0 | ## Medium Volume User (~3M queries per month) -| اخراجات کا موازنہ | خود میزبان | The Graph Network | -| :-: | :-: | :-: | -| ماہانہ سرور کی قیمت/\* | $350 فی مہینہ | $0 | -| استفسار کے اخراجات | $500 فی مہینہ | $120 per month | -| انجینئرنگ کا وقت | $800 فی مہینہ | کوئی بھی نہیں، عالمی سطح پر تقسیم شدہ انڈیکسرز کے ساتھ نیٹ ورک میں بنایا گیا ہے | -| فی مہینہ سوالات | بنیادی صلاحیتوں تک محدود | ~3,000,000 | -| قیمت فی سوال | $0 | $0.00004 | -| Infrastructure | سینٹرلائزڈ | ڈیسینٹرلائزڈ | -| انجینئرنگ کے اخراجات | $200 فی گھنٹہ | شامل | -| جغرافیائی فالتو پن | فی اضافی نوڈ کل اخراجات میں $1,200 | شامل | -| اپ ٹائم | اتار چڑھاو | 99.9%+ | -| کل ماہانہ اخراجات | $1,650+ | $120 | +| اخراجات کا موازنہ | خود میزبان | The Graph Network | +| :--------------------------: | :----------------------------------------: | :-----------------------------------------------------------------------------: | +| ماہانہ سرور کی قیمت/\* | $350 فی مہینہ | $0 | +| استفسار کے اخراجات | $500 فی مہینہ | $120 per month | +| انجینئرنگ کا وقت | $800 فی مہینہ | کوئی بھی نہیں، عالمی سطح پر تقسیم شدہ انڈیکسرز کے ساتھ نیٹ ورک میں بنایا گیا ہے | +| فی مہینہ سوالات | بنیادی صلاحیتوں تک محدود | ~3,000,000 | +| قیمت فی سوال | $0 | $0.00004 | +| Infrastructure | سینٹرلائزڈ | ڈیسینٹرلائزڈ | +| انجینئرنگ کے اخراجات | $200 فی گھنٹہ | شامل | +| جغرافیائی فالتو پن | فی اضافی نوڈ کل اخراجات میں $1,200 | شامل | +| اپ ٹائم | اتار چڑھاو | 99.9%+ | +| کل ماہانہ اخراجات | $1,650+ | $120 | ## High Volume User (~30M queries per month) -| اخراجات کا موازنہ | خود میزبان | The Graph Network | -| :-: | :-: | :-: | -| ماہانہ سرور کی قیمت/\* | $1100 فی مہینہ، فی نوڈ | $0 | -| استفسار کے اخراجات | $4000 | $1,200 per month | -| نوڈس کی تعداد درکار ہے | 10 | قابل اطلاق نہیں | -| انجینئرنگ کا وقت | $6,000 یا اس سے زیادہ فی مہینہ | کوئی بھی نہیں، عالمی سطح پر تقسیم شدہ انڈیکسرز کے ساتھ نیٹ ورک میں بنایا گیا ہے | -| فی مہینہ سوالات | بنیادی صلاحیتوں تک محدود | ~30,000,000 | -| قیمت فی سوال | $0 | $0.00004 | -| Infrastructure | سینٹرلائزڈ | ڈیسینٹرلائزڈ | -| جغرافیائی فالتو پن | فی اضافی نوڈ کل اخراجات میں $1,200 | شامل | -| اپ ٹائم | اتار چڑھاو | 99.9%+ | -| کل ماہانہ اخراجات | $11,000+ | $1,200 | +| اخراجات کا موازنہ | خود میزبان | The Graph Network | +| :--------------------------: | :-----------------------------------------: | :-----------------------------------------------------------------------------: | +| ماہانہ سرور کی قیمت/\* | $1100 فی مہینہ، فی نوڈ | $0 | +| استفسار کے اخراجات | $4000 | $1,200 per month | +| نوڈس کی تعداد درکار ہے | 10 | قابل اطلاق نہیں | +| انجینئرنگ کا وقت | $6,000 یا اس سے زیادہ فی مہینہ | کوئی بھی نہیں، عالمی سطح پر تقسیم شدہ انڈیکسرز کے ساتھ نیٹ ورک میں بنایا گیا ہے | +| فی مہینہ سوالات | بنیادی صلاحیتوں تک محدود | ~30,000,000 | +| قیمت فی سوال | $0 | $0.00004 | +| Infrastructure | سینٹرلائزڈ | ڈیسینٹرلائزڈ | +| جغرافیائی فالتو پن | فی اضافی نوڈ کل اخراجات میں $1,200 | شامل | +| اپ ٹائم | اتار چڑھاو | 99.9%+ | +| کل ماہانہ اخراجات | $11,000+ | $1,200 | /\*بیک اپ کے اخراجات سمیت: $50-$100 فی مہینہ @@ -75,9 +75,9 @@ Query costs may vary; the quoted cost is the average at time of publication (Mar Reflects cost for data consumer. Query fees are still paid to Indexers for Free Plan queries. -Estimated costs are only for Ethereum Mainnet subgraphs — costs are even higher when self hosting a `graph-node` on other networks. Some users may need to update their subgraph to a new version. Due to Ethereum gas fees, an update costs ~$50 at time of writing. Note that gas fees on [Arbitrum](/archived/arbitrum/arbitrum-faq/) are substantially lower than Ethereum mainnet. +Estimated costs are only for Ethereum Mainnet Subgraphs — costs are even higher when self hosting a `graph-node` on other networks. Some users may need to update their Subgraph to a new version. Due to Ethereum gas fees, an update costs ~$50 at time of writing. Note that gas fees on [Arbitrum](/archived/arbitrum/arbitrum-faq/) are substantially lower than Ethereum mainnet. -سب گراف پر کیوریٹنگ سگنل ایک اختیاری ایک بار، خالص صفر لاگت ہے (مثال کے طور پر، $1k سگنل کو سب گراف پر کیوریٹ کیا جا سکتا ہے، اور بعد میں واپس لیا جا سکتا ہے—اس عمل میں منافع کمانے کی صلاحیت کے ساتھ). +Curating signal on a Subgraph is an optional one-time, net-zero cost (e.g., $1k in signal can be curated on a Subgraph, and later withdrawn—with potential to earn returns in the process). ## No Setup Costs & Greater Operational Efficiency @@ -89,4 +89,4 @@ The Graph’s decentralized network gives users access to geographic redundancy Bottom line: The Graph Network is less expensive, easier to use, and produces superior results compared to running a `graph-node` locally. -Start using The Graph Network today, and learn how to [publish your subgraph to The Graph's decentralized network](/subgraphs/quick-start/). +Start using The Graph Network today, and learn how to [publish your Subgraph to The Graph's decentralized network](/subgraphs/quick-start/). From 1fdcdefa2c6a6c2378250d8a1e8875f36091613f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:16:39 -0500 Subject: [PATCH 0468/1789] New translations benefits.mdx (Vietnamese) --- website/src/pages/vi/resources/benefits.mdx | 76 ++++++++++----------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/website/src/pages/vi/resources/benefits.mdx b/website/src/pages/vi/resources/benefits.mdx index fa0a84626503..9898d440a9b4 100644 --- a/website/src/pages/vi/resources/benefits.mdx +++ b/website/src/pages/vi/resources/benefits.mdx @@ -27,47 +27,47 @@ Query costs may vary; the quoted cost is the average at time of publication (Mar ## Low Volume User (less than 100,000 queries per month) -| Cost Comparison | Self Hosted | Mạng The Graph | -| :-: | :-: | :-: | -| Monthly server cost\* | $350 per month | $0 | -| Query costs | $0+ | $0 per month | -| Engineering time | $400 per month | None, built into the network with globally distributed Indexers | -| Queries per month | Limited to infra capabilities | 100,000 (Free Plan) | -| Cost per query | $0 | $0 | -| Infrastructure | Centralized | Decentralized | -| Geographic redundancy | $750+ per additional node | Included | -| Uptime | Varies | 99.9%+ | -| Total Monthly Costs | $750+ | $0 | +| Cost Comparison | Self Hosted | Mạng The Graph | +| :--------------------------: | :-------------------------------------: | :-------------------------------------------------------------: | +| Monthly server cost\* | $350 per month | $0 | +| Query costs | $0+ | $0 per month | +| Engineering time | $400 per month | None, built into the network with globally distributed Indexers | +| Queries per month | Limited to infra capabilities | 100,000 (Free Plan) | +| Cost per query | $0 | $0 | +| Infrastructure | Centralized | Decentralized | +| Geographic redundancy | $750+ per additional node | Included | +| Uptime | Varies | 99.9%+ | +| Total Monthly Costs | $750+ | $0 | ## Medium Volume User (~3M queries per month) -| Cost Comparison | Self Hosted | Mạng The Graph | -| :-: | :-: | :-: | -| Monthly server cost\* | $350 per month | $0 | -| Query costs | $500 per month | $120 per month | -| Engineering time | $800 per month | None, built into the network with globally distributed Indexers | -| Queries per month | Limited to infra capabilities | ~3,000,000 | -| Cost per query | $0 | $0.00004 | -| Infrastructure | Centralized | Decentralized | -| Engineering expense | $200 per hour | Included | -| Geographic redundancy | $1,200 in total costs per additional node | Included | -| Uptime | Varies | 99.9%+ | -| Total Monthly Costs | $1,650+ | $120 | +| Cost Comparison | Self Hosted | Mạng The Graph | +| :--------------------------: | :----------------------------------------: | :-------------------------------------------------------------: | +| Monthly server cost\* | $350 per month | $0 | +| Query costs | $500 per month | $120 per month | +| Engineering time | $800 per month | None, built into the network with globally distributed Indexers | +| Queries per month | Limited to infra capabilities | ~3,000,000 | +| Cost per query | $0 | $0.00004 | +| Infrastructure | Centralized | Decentralized | +| Engineering expense | $200 per hour | Included | +| Geographic redundancy | $1,200 in total costs per additional node | Included | +| Uptime | Varies | 99.9%+ | +| Total Monthly Costs | $1,650+ | $120 | ## High Volume User (~30M queries per month) -| Cost Comparison | Self Hosted | Mạng The Graph | -| :-: | :-: | :-: | -| Monthly server cost\* | $1100 per month, per node | $0 | -| Query costs | $4000 | $1,200 per month | -| Number of nodes needed | 10 | Not applicable | -| Engineering time | $6,000 or more per month | None, built into the network with globally distributed Indexers | -| Queries per month | Limited to infra capabilities | ~30,000,000 | -| Cost per query | $0 | $0.00004 | -| Infrastructure | Centralized | Decentralized | -| Geographic redundancy | $1,200 in total costs per additional node | Included | -| Uptime | Varies | 99.9%+ | -| Total Monthly Costs | $11,000+ | $1,200 | +| Cost Comparison | Self Hosted | Mạng The Graph | +| :--------------------------: | :-----------------------------------------: | :-------------------------------------------------------------: | +| Monthly server cost\* | $1100 per month, per node | $0 | +| Query costs | $4000 | $1,200 per month | +| Number of nodes needed | 10 | Not applicable | +| Engineering time | $6,000 or more per month | None, built into the network with globally distributed Indexers | +| Queries per month | Limited to infra capabilities | ~30,000,000 | +| Cost per query | $0 | $0.00004 | +| Infrastructure | Centralized | Decentralized | +| Geographic redundancy | $1,200 in total costs per additional node | Included | +| Uptime | Varies | 99.9%+ | +| Total Monthly Costs | $11,000+ | $1,200 | \*including costs for backup: $50-$100 per month @@ -75,9 +75,9 @@ Query costs may vary; the quoted cost is the average at time of publication (Mar Reflects cost for data consumer. Query fees are still paid to Indexers for Free Plan queries. -Estimated costs are only for Ethereum Mainnet subgraphs — costs are even higher when self hosting a `graph-node` on other networks. Some users may need to update their subgraph to a new version. Due to Ethereum gas fees, an update costs ~$50 at time of writing. Note that gas fees on [Arbitrum](/archived/arbitrum/arbitrum-faq/) are substantially lower than Ethereum mainnet. +Estimated costs are only for Ethereum Mainnet Subgraphs — costs are even higher when self hosting a `graph-node` on other networks. Some users may need to update their Subgraph to a new version. Due to Ethereum gas fees, an update costs ~$50 at time of writing. Note that gas fees on [Arbitrum](/archived/arbitrum/arbitrum-faq/) are substantially lower than Ethereum mainnet. -Curating signal on a subgraph is an optional one-time, net-zero cost (e.g., $1k in signal can be curated on a subgraph, and later withdrawn—with potential to earn returns in the process). +Curating signal on a Subgraph is an optional one-time, net-zero cost (e.g., $1k in signal can be curated on a Subgraph, and later withdrawn—with potential to earn returns in the process). ## No Setup Costs & Greater Operational Efficiency @@ -89,4 +89,4 @@ The Graph’s decentralized network gives users access to geographic redundancy Bottom line: The Graph Network is less expensive, easier to use, and produces superior results compared to running a `graph-node` locally. -Start using The Graph Network today, and learn how to [publish your subgraph to The Graph's decentralized network](/subgraphs/quick-start/). +Start using The Graph Network today, and learn how to [publish your Subgraph to The Graph's decentralized network](/subgraphs/quick-start/). From 758f1a1c5d892fd68f44d359eb2d8d99ddd7e53a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:16:40 -0500 Subject: [PATCH 0469/1789] New translations benefits.mdx (Marathi) --- website/src/pages/mr/resources/benefits.mdx | 76 ++++++++++----------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/website/src/pages/mr/resources/benefits.mdx b/website/src/pages/mr/resources/benefits.mdx index 4ffee4b07761..128743e2c9ff 100644 --- a/website/src/pages/mr/resources/benefits.mdx +++ b/website/src/pages/mr/resources/benefits.mdx @@ -27,47 +27,47 @@ Query costs may vary; the quoted cost is the average at time of publication (Mar ## Low Volume User (less than 100,000 queries per month) -| खर्चाची तुलना | स्वत: होस्ट केलेले | आलेख नेटवर्क | -| :-: | :-: | :-: | -| मासिक सर्व्हर खर्च\* | दरमहा $350 | $0 | -| क्वेरी खर्च | $0+ | $0 per month | -| अभियांत्रिकी वेळ | दरमहा $400 | काहीही नाही, जागतिक स्तरावर वितरित इंडेक्सर्ससह नेटवर्कमध्ये तयार केलेले | -| प्रति महिना प्रश्न | इन्फ्रा क्षमतांपुरती मर्यादित | 100,000 (Free Plan) | -| प्रति क्वेरी खर्च | $0 | $0 | -| Infrastructure | केंद्रीकृत | विकेंद्रित | -| भौगोलिक रिडंडंसी | प्रति अतिरिक्त नोड $750+ | समाविष्ट | -| अपटाइम | बदलते | 99.9%+ | -| एकूण मासिक खर्च | $750+ | $0 | +| खर्चाची तुलना | स्वत: होस्ट केलेले | आलेख नेटवर्क | +| :--------------------------: | :-------------------------------------: | :----------------------------------------------------------------------: | +| मासिक सर्व्हर खर्च\* | दरमहा $350 | $0 | +| क्वेरी खर्च | $0+ | $0 per month | +| अभियांत्रिकी वेळ | दरमहा $400 | काहीही नाही, जागतिक स्तरावर वितरित इंडेक्सर्ससह नेटवर्कमध्ये तयार केलेले | +| प्रति महिना प्रश्न | इन्फ्रा क्षमतांपुरती मर्यादित | 100,000 (Free Plan) | +| प्रति क्वेरी खर्च | $0 | $0 | +| Infrastructure | केंद्रीकृत | विकेंद्रित | +| भौगोलिक रिडंडंसी | प्रति अतिरिक्त नोड $750+ | समाविष्ट | +| अपटाइम | बदलते | 99.9%+ | +| एकूण मासिक खर्च | $750+ | $0 | ## Medium Volume User (~3M queries per month) -| खर्चाची तुलना | स्वत: होस्ट केलेले | आलेख नेटवर्क | -| :-: | :-: | :-: | -| मासिक सर्व्हर खर्च\* | दरमहा $350 | $0 | -| क्वेरी खर्च | दरमहा $500 | $120 per month | -| अभियांत्रिकी वेळ | दरमहा $800 | काहीही नाही, जागतिक स्तरावर वितरित इंडेक्सर्ससह नेटवर्कमध्ये तयार केलेले | -| प्रति महिना प्रश्न | इन्फ्रा क्षमतांपुरती मर्यादित | ~3,000,000 | -| प्रति क्वेरी खर्च | $0 | $0.00004 | -| Infrastructure | केंद्रीकृत | विकेंद्रित | -| अभियांत्रिकी खर्च | $200 प्रति तास | समाविष्ट | -| भौगोलिक रिडंडंसी | प्रति अतिरिक्त नोड एकूण खर्चात $1,200 | समाविष्ट | -| अपटाइम | बदलते | 99.9%+ | -| एकूण मासिक खर्च | $1,650+ | $120 | +| खर्चाची तुलना | स्वत: होस्ट केलेले | आलेख नेटवर्क | +| :--------------------------: | :----------------------------------------: | :----------------------------------------------------------------------: | +| मासिक सर्व्हर खर्च\* | दरमहा $350 | $0 | +| क्वेरी खर्च | दरमहा $500 | $120 per month | +| अभियांत्रिकी वेळ | दरमहा $800 | काहीही नाही, जागतिक स्तरावर वितरित इंडेक्सर्ससह नेटवर्कमध्ये तयार केलेले | +| प्रति महिना प्रश्न | इन्फ्रा क्षमतांपुरती मर्यादित | ~3,000,000 | +| प्रति क्वेरी खर्च | $0 | $0.00004 | +| Infrastructure | केंद्रीकृत | विकेंद्रित | +| अभियांत्रिकी खर्च | $200 प्रति तास | समाविष्ट | +| भौगोलिक रिडंडंसी | प्रति अतिरिक्त नोड एकूण खर्चात $1,200 | समाविष्ट | +| अपटाइम | बदलते | 99.9%+ | +| एकूण मासिक खर्च | $1,650+ | $120 | ## High Volume User (~30M queries per month) -| खर्चाची तुलना | स्वत: होस्ट केलेले | आलेख नेटवर्क | -| :-: | :-: | :-: | -| मासिक सर्व्हर खर्च\* | प्रति नोड, प्रति महिना $1100 | $0 | -| क्वेरी खर्च | $4000 | $1,200 per month | -| आवश्यक नोड्सची संख्या | 10 | लागू नाही | -| अभियांत्रिकी वेळ | दरमहा $6,000 किंवा अधिक | काहीही नाही, जागतिक स्तरावर वितरित इंडेक्सर्ससह नेटवर्कमध्ये तयार केलेले | -| प्रति महिना प्रश्न | इन्फ्रा क्षमतांपुरती मर्यादित | ~30,000,000 | -| प्रति क्वेरी खर्च | $0 | $0.00004 | -| Infrastructure | केंद्रीकृत | विकेंद्रित | -| भौगोलिक रिडंडंसी | प्रति अतिरिक्त नोड एकूण खर्चात $1,200 | समाविष्ट | -| अपटाइम | बदलते | 99.9%+ | -| एकूण मासिक खर्च | $11,000+ | $1,200 | +| खर्चाची तुलना | स्वत: होस्ट केलेले | आलेख नेटवर्क | +| :--------------------------: | :-----------------------------------------: | :----------------------------------------------------------------------: | +| मासिक सर्व्हर खर्च\* | प्रति नोड, प्रति महिना $1100 | $0 | +| क्वेरी खर्च | $4000 | $1,200 per month | +| आवश्यक नोड्सची संख्या | 10 | लागू नाही | +| अभियांत्रिकी वेळ | दरमहा $6,000 किंवा अधिक | काहीही नाही, जागतिक स्तरावर वितरित इंडेक्सर्ससह नेटवर्कमध्ये तयार केलेले | +| प्रति महिना प्रश्न | इन्फ्रा क्षमतांपुरती मर्यादित | ~30,000,000 | +| प्रति क्वेरी खर्च | $0 | $0.00004 | +| Infrastructure | केंद्रीकृत | विकेंद्रित | +| भौगोलिक रिडंडंसी | प्रति अतिरिक्त नोड एकूण खर्चात $1,200 | समाविष्ट | +| अपटाइम | बदलते | 99.9%+ | +| एकूण मासिक खर्च | $11,000+ | $1,200 | \*बॅकअपच्या खर्चासह: $50-$100 प्रति महिना @@ -75,9 +75,9 @@ Query costs may vary; the quoted cost is the average at time of publication (Mar Reflects cost for data consumer. Query fees are still paid to Indexers for Free Plan queries. -Estimated costs are only for Ethereum Mainnet subgraphs — costs are even higher when self hosting a `graph-node` on other networks. Some users may need to update their subgraph to a new version. Due to Ethereum gas fees, an update costs ~$50 at time of writing. Note that gas fees on [Arbitrum](/archived/arbitrum/arbitrum-faq/) are substantially lower than Ethereum mainnet. +Estimated costs are only for Ethereum Mainnet Subgraphs — costs are even higher when self hosting a `graph-node` on other networks. Some users may need to update their Subgraph to a new version. Due to Ethereum gas fees, an update costs ~$50 at time of writing. Note that gas fees on [Arbitrum](/archived/arbitrum/arbitrum-faq/) are substantially lower than Ethereum mainnet. -सबग्राफवर क्युरेटिंग सिग्नल हा पर्यायी एक-वेळचा, निव्वळ-शून्य खर्च आहे (उदा., $1k सिग्नल सबग्राफवर क्युरेट केला जाऊ शकतो आणि नंतर मागे घेतला जाऊ शकतो—प्रक्रियेत परतावा मिळविण्याच्या संभाव्यतेसह). +Curating signal on a Subgraph is an optional one-time, net-zero cost (e.g., $1k in signal can be curated on a Subgraph, and later withdrawn—with potential to earn returns in the process). ## No Setup Costs & Greater Operational Efficiency @@ -89,4 +89,4 @@ The Graph’s decentralized network gives users access to geographic redundancy Bottom line: The Graph Network is less expensive, easier to use, and produces superior results compared to running a `graph-node` locally. -Start using The Graph Network today, and learn how to [publish your subgraph to The Graph's decentralized network](/subgraphs/quick-start/). +Start using The Graph Network today, and learn how to [publish your Subgraph to The Graph's decentralized network](/subgraphs/quick-start/). From 192e4ad11b510634f3388117e051b8303cb383e2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:16:42 -0500 Subject: [PATCH 0470/1789] New translations benefits.mdx (Hindi) --- website/src/pages/hi/resources/benefits.mdx | 76 ++++++++++----------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/website/src/pages/hi/resources/benefits.mdx b/website/src/pages/hi/resources/benefits.mdx index cb043820d821..0cf5544b6f68 100644 --- a/website/src/pages/hi/resources/benefits.mdx +++ b/website/src/pages/hi/resources/benefits.mdx @@ -27,47 +27,47 @@ Query costs may vary; the quoted cost is the average at time of publication (Mar ## Low Volume User (less than 100,000 queries per month) -| लागत तुलना | स्वयं होस्ट किया गया | The Graph Network | -| :-: | :-: | :-: | -| मासिक सर्वर लागत\* | $350 प्रति माह | $0 | -| पूछताछ लागत | $0+ | $0 per month | -| इंजीनियरिंग का समय | $ 400 प्रति माह | कोई नहीं, विश्व स्तर पर वितरित इंडेक्सर्स के साथ नेटवर्क में बनाया गया | -| प्रति माह प्रश्न | इन्फ्रा क्षमताओं तक सीमित | 100,000 (Free Plan) | -| लागत प्रति क्वेरी | $0 | $0 | -| Infrastructure | केंद्रीकृत | विकेन्द्रीकृत | -| भौगोलिक अतिरेक | $750+ प्रति अतिरिक्त नोड | शामिल | -| अपटाइम | भिन्न | 99.9%+ | -| कुल मासिक लागत | $750+ | $0 | +| लागत तुलना | स्वयं होस्ट किया गया | The Graph Network | +| :----------------------------: | :-------------------------------------: | :--------------------------------------------------------------------: | +| मासिक सर्वर लागत\* | $350 प्रति माह | $0 | +| पूछताछ लागत | $0+ | $0 per month | +| इंजीनियरिंग का समय | $ 400 प्रति माह | कोई नहीं, विश्व स्तर पर वितरित इंडेक्सर्स के साथ नेटवर्क में बनाया गया | +| प्रति माह प्रश्न | इन्फ्रा क्षमताओं तक सीमित | 100,000 (Free Plan) | +| लागत प्रति क्वेरी | $0 | $0 | +| Infrastructure | केंद्रीकृत | विकेन्द्रीकृत | +| भौगोलिक अतिरेक | $750+ प्रति अतिरिक्त नोड | शामिल | +| अपटाइम | भिन्न | 99.9%+ | +| कुल मासिक लागत | $750+ | $0 | ## Medium Volume User (~3M queries per month) -| लागत तुलना | स्वयं होस्ट किया गया | The Graph Network | -| :-: | :-: | :-: | -| मासिक सर्वर लागत\* | $350 प्रति माह | $0 | -| पूछताछ लागत | $ 500 प्रति माह | $120 per month | -| इंजीनियरिंग का समय | $800 प्रति माह | कोई नहीं, विश्व स्तर पर वितरित इंडेक्सर्स के साथ नेटवर्क में बनाया गया | -| प्रति माह प्रश्न | इन्फ्रा क्षमताओं तक सीमित | ~3,000,000 | -| लागत प्रति क्वेरी | $0 | $0.00004 | -| Infrastructure | केंद्रीकृत | विकेन्द्रीकृत | -| इंजीनियरिंग खर्च | $ 200 प्रति घंटा | शामिल | -| भौगोलिक अतिरेक | प्रति अतिरिक्त नोड कुल लागत में $1,200 | शामिल | -| अपटाइम | भिन्न | 99.9%+ | -| कुल मासिक लागत | $1,650+ | $120 | +| लागत तुलना | स्वयं होस्ट किया गया | The Graph Network | +| :----------------------------: | :----------------------------------------: | :--------------------------------------------------------------------: | +| मासिक सर्वर लागत\* | $350 प्रति माह | $0 | +| पूछताछ लागत | $ 500 प्रति माह | $120 per month | +| इंजीनियरिंग का समय | $800 प्रति माह | कोई नहीं, विश्व स्तर पर वितरित इंडेक्सर्स के साथ नेटवर्क में बनाया गया | +| प्रति माह प्रश्न | इन्फ्रा क्षमताओं तक सीमित | ~3,000,000 | +| लागत प्रति क्वेरी | $0 | $0.00004 | +| Infrastructure | केंद्रीकृत | विकेन्द्रीकृत | +| इंजीनियरिंग खर्च | $ 200 प्रति घंटा | शामिल | +| भौगोलिक अतिरेक | प्रति अतिरिक्त नोड कुल लागत में $1,200 | शामिल | +| अपटाइम | भिन्न | 99.9%+ | +| कुल मासिक लागत | $1,650+ | $120 | ## High Volume User (~30M queries per month) -| लागत तुलना | स्वयं होस्ट किया गया | The Graph Network | -| :-: | :-: | :-: | -| मासिक सर्वर लागत\* | $1100 प्रति माह, प्रति नोड | $0 | -| पूछताछ लागत | $4000 | $1,200 per month | -| आवश्यक नोड्स की संख्या | 10 | Not applicable | -| इंजीनियरिंग का समय | $6,000 or more per month | कोई नहीं, विश्व स्तर पर वितरित इंडेक्सर्स के साथ नेटवर्क में बनाया गया | -| प्रति माह प्रश्न | इन्फ्रा क्षमताओं तक सीमित | ~30,000,000 | -| लागत प्रति क्वेरी | $0 | $0.00004 | -| Infrastructure | केंद्रीकृत | विकेन्द्रीकृत | -| भौगोलिक अतिरेक | प्रति अतिरिक्त नोड कुल लागत में $1,200 | शामिल | -| अपटाइम | भिन्न | 99.9%+ | -| कुल मासिक लागत | $11,000+ | $1,200 | +| लागत तुलना | स्वयं होस्ट किया गया | The Graph Network | +| :----------------------------: | :-----------------------------------------: | :--------------------------------------------------------------------: | +| मासिक सर्वर लागत\* | $1100 प्रति माह, प्रति नोड | $0 | +| पूछताछ लागत | $4000 | $1,200 per month | +| आवश्यक नोड्स की संख्या | 10 | Not applicable | +| इंजीनियरिंग का समय | $6,000 or more per month | कोई नहीं, विश्व स्तर पर वितरित इंडेक्सर्स के साथ नेटवर्क में बनाया गया | +| प्रति माह प्रश्न | इन्फ्रा क्षमताओं तक सीमित | ~30,000,000 | +| लागत प्रति क्वेरी | $0 | $0.00004 | +| Infrastructure | केंद्रीकृत | विकेन्द्रीकृत | +| भौगोलिक अतिरेक | प्रति अतिरिक्त नोड कुल लागत में $1,200 | शामिल | +| अपटाइम | भिन्न | 99.9%+ | +| कुल मासिक लागत | $11,000+ | $1,200 | \*बैकअप की लागत सहित: $50-$100 प्रति माह @@ -75,9 +75,9 @@ Query costs may vary; the quoted cost is the average at time of publication (Mar Reflects cost for data consumer. Query fees are still paid to Indexers for Free Plan queries. -एस्टिमेटेड लागत केवल Ethereum Mainnet सबग्राफ़ के लिए है — अन्य नेटवर्कों पर `ग्राफ-नोड` को स्वयं होस्ट करने पर लागत और भी अधिक होती है। कुछ उपयोगकर्ताओं को अपने Subgraph को एक नई संस्करण में अपडेट करने की आवश्यकता हो सकती है। Ethereum गैस शुल्क के कारण, एक अपडेट की लागत लगभग ~$50 है जब लेख लिखा गया था। ध्यान दें कि [Arbitrum](/archived/arbitrum/arbitrum-faq/) पर गैस शुल्क Ethereum mainnet से काफी कम हैं। +Estimated costs are only for Ethereum Mainnet Subgraphs — costs are even higher when self hosting a `graph-node` on other networks. Some users may need to update their Subgraph to a new version. Due to Ethereum gas fees, an update costs ~$50 at time of writing. Note that gas fees on [Arbitrum](/archived/arbitrum/arbitrum-faq/) are substantially lower than Ethereum mainnet. -एक सबग्राफ पर क्यूरेटिंग सिग्नल एक वैकल्पिक वन-टाइम, नेट-जीरो कॉस्ट है (उदाहरण के लिए, सिग्नल में $1k को सबग्राफ पर क्यूरेट किया जा सकता है, और बाद में वापस ले लिया जाता है - प्रक्रिया में रिटर्न अर्जित करने की क्षमता के साथ)। +Curating signal on a Subgraph is an optional one-time, net-zero cost (e.g., $1k in signal can be curated on a Subgraph, and later withdrawn—with potential to earn returns in the process). ## कोई सेटअप लागत नहीं और अधिक परिचालन दक्षता @@ -89,4 +89,4 @@ The Graph का विकेन्द्रीकृत नेटवर्क The Graph Network कम खर्चीला, उपयोग में आसान और बेहतर परिणाम प्रदान करता है, जब की graph-node को लोकल पर चलाने के मुकाबले। -आज ही The Graph Network का उपयोग शुरू करें, और सीखें कि कैसे [अपने subgraph को The Graph के विकेंद्रीकृत नेटवर्क पर प्रकाशित](/subgraphs/quick-start/) करें। +Start using The Graph Network today, and learn how to [publish your Subgraph to The Graph's decentralized network](/subgraphs/quick-start/). From 6523642f9c8f2b153d672b94a2d7c875037ce7b0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:16:43 -0500 Subject: [PATCH 0471/1789] New translations benefits.mdx (Swahili) --- website/src/pages/sw/resources/benefits.mdx | 92 +++++++++++++++++++++ 1 file changed, 92 insertions(+) create mode 100644 website/src/pages/sw/resources/benefits.mdx diff --git a/website/src/pages/sw/resources/benefits.mdx b/website/src/pages/sw/resources/benefits.mdx new file mode 100644 index 000000000000..6899e348a912 --- /dev/null +++ b/website/src/pages/sw/resources/benefits.mdx @@ -0,0 +1,92 @@ +--- +title: The Graph vs. Self Hosting +socialImage: https://thegraph.com/docs/img/seo/benefits.jpg +--- + +The Graph’s decentralized network has been engineered and refined to create a robust indexing and querying experience—and it’s getting better every day thanks to thousands of contributors around the world. + +The benefits of this decentralized protocol cannot be replicated by running a `graph-node` locally. The Graph Network is more reliable, more efficient, and less expensive. + +Here is an analysis: + +## Why You Should Use The Graph Network + +- Significantly lower monthly costs +- $0 infrastructure setup costs +- Superior uptime +- Access to hundreds of independent Indexers around the world +- 24/7 technical support by global community + +## The Benefits Explained + +### Lower & more Flexible Cost Structure + +No contracts. No monthly fees. Only pay for the queries you use—with an average cost-per-query of $40 per million queries (~$0.00004 per query). Queries are priced in USD and paid in GRT or credit card. + +Query costs may vary; the quoted cost is the average at time of publication (March 2024). + +## Low Volume User (less than 100,000 queries per month) + +| Cost Comparison | Self Hosted | The Graph Network | +| :--------------------------: | :-------------------------------------: | :-------------------------------------------------------------: | +| Monthly server cost\* | $350 per month | $0 | +| Query costs | $0+ | $0 per month | +| Engineering time | $400 per month | None, built into the network with globally distributed Indexers | +| Queries per month | Limited to infra capabilities | 100,000 (Free Plan) | +| Cost per query | $0 | $0 | +| Infrastructure | Centralized | Decentralized | +| Geographic redundancy | $750+ per additional node | Included | +| Uptime | Varies | 99.9%+ | +| Total Monthly Costs | $750+ | $0 | + +## Medium Volume User (~3M queries per month) + +| Cost Comparison | Self Hosted | The Graph Network | +| :--------------------------: | :----------------------------------------: | :-------------------------------------------------------------: | +| Monthly server cost\* | $350 per month | $0 | +| Query costs | $500 per month | $120 per month | +| Engineering time | $800 per month | None, built into the network with globally distributed Indexers | +| Queries per month | Limited to infra capabilities | ~3,000,000 | +| Cost per query | $0 | $0.00004 | +| Infrastructure | Centralized | Decentralized | +| Engineering expense | $200 per hour | Included | +| Geographic redundancy | $1,200 in total costs per additional node | Included | +| Uptime | Varies | 99.9%+ | +| Total Monthly Costs | $1,650+ | $120 | + +## High Volume User (~30M queries per month) + +| Cost Comparison | Self Hosted | The Graph Network | +| :--------------------------: | :-----------------------------------------: | :-------------------------------------------------------------: | +| Monthly server cost\* | $1100 per month, per node | $0 | +| Query costs | $4000 | $1,200 per month | +| Number of nodes needed | 10 | Not applicable | +| Engineering time | $6,000 or more per month | None, built into the network with globally distributed Indexers | +| Queries per month | Limited to infra capabilities | ~30,000,000 | +| Cost per query | $0 | $0.00004 | +| Infrastructure | Centralized | Decentralized | +| Geographic redundancy | $1,200 in total costs per additional node | Included | +| Uptime | Varies | 99.9%+ | +| Total Monthly Costs | $11,000+ | $1,200 | + +\*including costs for backup: $50-$100 per month + +Engineering time based on $200 per hour assumption + +Reflects cost for data consumer. Query fees are still paid to Indexers for Free Plan queries. + +Estimated costs are only for Ethereum Mainnet Subgraphs — costs are even higher when self hosting a `graph-node` on other networks. Some users may need to update their Subgraph to a new version. Due to Ethereum gas fees, an update costs ~$50 at time of writing. Note that gas fees on [Arbitrum](/archived/arbitrum/arbitrum-faq/) are substantially lower than Ethereum mainnet. + +Curating signal on a Subgraph is an optional one-time, net-zero cost (e.g., $1k in signal can be curated on a Subgraph, and later withdrawn—with potential to earn returns in the process). + +## No Setup Costs & Greater Operational Efficiency + +Zero setup fees. Get started immediately with no setup or overhead costs. No hardware requirements. No outages due to centralized infrastructure, and more time to concentrate on your core product . No need for backup servers, troubleshooting, or expensive engineering resources. + +## Reliability & Resiliency + +The Graph’s decentralized network gives users access to geographic redundancy that does not exist when self-hosting a `graph-node`. Queries are served reliably thanks to 99.9%+ uptime, achieved by hundreds of independent Indexers securing the network globally. + +Bottom line: The Graph Network is less expensive, easier to use, and produces superior results compared to running a `graph-node` locally. + +Start using The Graph Network today, and learn how to [publish your Subgraph to The Graph's decentralized network](/subgraphs/quick-start/). From bd53d9102252982129e55c7fd83933cef30dbadb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:16:44 -0500 Subject: [PATCH 0472/1789] New translations glossary.mdx (Romanian) --- website/src/pages/ro/resources/glossary.mdx | 50 ++++++++++----------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/website/src/pages/ro/resources/glossary.mdx b/website/src/pages/ro/resources/glossary.mdx index ffcd4bca2eed..4c5ad55cd0d3 100644 --- a/website/src/pages/ro/resources/glossary.mdx +++ b/website/src/pages/ro/resources/glossary.mdx @@ -4,51 +4,51 @@ title: Glossary - **The Graph**: A decentralized protocol for indexing and querying data. -- **Query**: A request for data. In the case of The Graph, a query is a request for data from a subgraph that will be answered by an Indexer. +- **Query**: A request for data. In the case of The Graph, a query is a request for data from a Subgraph that will be answered by an Indexer. -- **GraphQL**: A query language for APIs and a runtime for fulfilling those queries with your existing data. The Graph uses GraphQL to query subgraphs. +- **GraphQL**: A query language for APIs and a runtime for fulfilling those queries with your existing data. The Graph uses GraphQL to query Subgraphs. -- **Endpoint**: A URL that can be used to query a subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query subgraphs on The Graph's decentralized network. +- **Endpoint**: A URL that can be used to query a Subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query Subgraphs on The Graph's decentralized network. -- **Subgraph**: An open API that extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. Developers can build, deploy, and publish subgraphs to The Graph Network. Once it is indexed, the subgraph can be queried by anyone. +- **Subgraph**: An open API that extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. Developers can build, deploy, and publish Subgraphs to The Graph Network. Once it is indexed, the Subgraph can be queried by anyone. - **Indexer**: Network participants that run indexing nodes to index data from blockchains and serve GraphQL queries. - **Indexer Revenue Streams**: Indexers are rewarded in GRT with two components: query fee rebates and indexing rewards. - 1. **Query Fee Rebates**: Payments from subgraph consumers for serving queries on the network. + 1. **Query Fee Rebates**: Payments from Subgraph consumers for serving queries on the network. - 2. **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are generated via new issuance of 3% GRT annually. + 2. **Indexing Rewards**: The rewards that Indexers receive for indexing Subgraphs. Indexing rewards are generated via new issuance of 3% GRT annually. - **Indexer's Self-Stake**: The amount of GRT that Indexers stake to participate in the decentralized network. The minimum is 100,000 GRT, and there is no upper limit. - **Delegation Capacity**: The maximum amount of GRT an Indexer can accept from Delegators. Indexers can only accept up to 16x their Indexer Self-Stake, and additional delegation results in diluted rewards. For example, if an Indexer has a Self-Stake of 1M GRT, their delegation capacity is 16M. However, Indexers can increase their Delegation Capacity by increasing their Self-Stake. -- **Upgrade Indexer**: An Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. +- **Upgrade Indexer**: An Indexer designed to act as a fallback for Subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. -- **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing subgraphs. +- **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in Subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing Subgraphs. - **Delegation Tax**: A 0.5% fee paid by Delegators when they delegate GRT to Indexers. The GRT used to pay the fee is burned. -- **Curator**: Network participants that identify high-quality subgraphs, and signal GRT on them in exchange for curation shares. When Indexers claim query fees on a subgraph, 10% is distributed to the Curators of that subgraph. There is a positive correlation between the amount of GRT signaled and the number of Indexers indexing a subgraph. +- **Curator**: Network participants that identify high-quality Subgraphs, and signal GRT on them in exchange for curation shares. When Indexers claim query fees on a Subgraph, 10% is distributed to the Curators of that Subgraph. There is a positive correlation between the amount of GRT signaled and the number of Indexers indexing a Subgraph. -- **Curation Tax**: A 1% fee paid by Curators when they signal GRT on subgraphs. The GRT used to pay the fee is burned. +- **Curation Tax**: A 1% fee paid by Curators when they signal GRT on Subgraphs. The GRT used to pay the fee is burned. -- **Data Consumer**: Any application or user that queries a subgraph. +- **Data Consumer**: Any application or user that queries a Subgraph. -- **Subgraph Developer**: A developer who builds and deploys a subgraph to The Graph's decentralized network. +- **Subgraph Developer**: A developer who builds and deploys a Subgraph to The Graph's decentralized network. -- **Subgraph Manifest**: A YAML file that describes the subgraph's GraphQL schema, data sources, and other metadata. [Here](https://github.com/graphprotocol/example-subgraph/blob/master/subgraph.yaml) is an example. +- **Subgraph Manifest**: A YAML file that describes the Subgraph's GraphQL schema, data sources, and other metadata. [Here](https://github.com/graphprotocol/example-subgraph/blob/master/subgraph.yaml) is an example. - **Epoch**: A unit of time within the network. Currently, one epoch is 6,646 blocks or approximately 1 day. -- **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: +- **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards Subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: - 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. + 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular Subgraph. Active allocations accrue indexing rewards proportional to the signal on the Subgraph, and the amount of GRT allocated. - 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. + 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given Subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. -- **Subgraph Studio**: A powerful dapp for building, deploying, and publishing subgraphs. +- **Subgraph Studio**: A powerful dapp for building, deploying, and publishing Subgraphs. - **Fishermen**: A role within The Graph Network held by participants who monitor the accuracy and integrity of data served by Indexers. When a Fisherman identifies a query response or a POI they believe to be incorrect, they can initiate a dispute against the Indexer. If the dispute rules in favor of the Fisherman, the Indexer is slashed by losing 2.5% of their self-stake. Of this amount, 50% is awarded to the Fisherman as a bounty for their vigilance, and the remaining 50% is removed from circulation (burned). This mechanism is designed to encourage Fishermen to help maintain the reliability of the network by ensuring that Indexers are held accountable for the data they provide. @@ -56,28 +56,28 @@ title: Glossary - **Slashing**: Indexers can have their self-staked GRT slashed for providing an incorrect POI or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self-stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. -- **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are distributed in GRT. +- **Indexing Rewards**: The rewards that Indexers receive for indexing Subgraphs. Indexing rewards are distributed in GRT. - **Delegation Rewards**: The rewards that Delegators receive for delegating GRT to Indexers. Delegation rewards are distributed in GRT. - **GRT**: The Graph's work utility token. GRT provides economic incentives to network participants for contributing to the network. -- **Proof of Indexing (POI)**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given subgraph, they must provide a valid and recent POI. Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. +- **Proof of Indexing (POI)**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given Subgraph, they must provide a valid and recent POI. Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. -- **Graph Node**: Graph Node is the component that indexes subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. +- **Graph Node**: Graph Node is the component that indexes Subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. -- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. +- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing Subgraph deployments to its Graph Node(s), and managing allocations. - **The Graph Client**: A library for building GraphQL-based dapps in a decentralized way. -- **Graph Explorer**: A dapp designed for network participants to explore subgraphs and interact with the protocol. +- **Graph Explorer**: A dapp designed for network participants to explore Subgraphs and interact with the protocol. - **Graph CLI**: A command line interface tool for building and deploying to The Graph. - **Cooldown Period**: The time remaining until an Indexer who changed their delegation parameters can do so again. -- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self-stake. +- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, Subgraphs, curation shares, and Indexer's self-stake. -- **Updating a subgraph**: The process of releasing a new subgraph version with updates to the subgraph's manifest, schema, or mappings. +- **Updating a Subgraph**: The process of releasing a new Subgraph version with updates to the Subgraph's manifest, schema, or mappings. -- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (e.g. when v0.0.1 is updated to v0.0.2). +- **Migrating**: The process of curation shares moving from an old version of a Subgraph to a new version of a Subgraph (e.g. when v0.0.1 is updated to v0.0.2). From 7a8659c6bdcde16c9ef55cd77a4ee232c282b192 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:16:45 -0500 Subject: [PATCH 0473/1789] New translations glossary.mdx (French) --- website/src/pages/fr/resources/glossary.mdx | 60 ++++++++++----------- 1 file changed, 30 insertions(+), 30 deletions(-) diff --git a/website/src/pages/fr/resources/glossary.mdx b/website/src/pages/fr/resources/glossary.mdx index cfaa0beb4c78..f874e54e73cd 100644 --- a/website/src/pages/fr/resources/glossary.mdx +++ b/website/src/pages/fr/resources/glossary.mdx @@ -4,80 +4,80 @@ title: Glossaire - **The Graph** : Un protocole décentralisé pour l'indexation et l'interrogation des données. -- **Query** : Une requête de données. Dans le cas de The Graph, une requête est une demande de données provenant d'un subgraph à laquelle répondra un Indexeur. +- **Query**: A request for data. In the case of The Graph, a query is a request for data from a Subgraph that will be answered by an Indexer. -- **GraphQL** : Un langage de requête pour les API et un moteur d'exécution pour répondre à ces requêtes avec vos données existantes. The Graph utilise GraphQL pour interroger les subgraphs. +- **GraphQL**: A query language for APIs and a runtime for fulfilling those queries with your existing data. The Graph uses GraphQL to query Subgraphs. -- **Endpoint** : Une URL qui peut être utilisée pour interroger un subgraph. L'endpoint de test pour Subgraph Studio est `https://api.studio.thegraph.com/query///` et l'endpoint pour Graph Explorer est `https://gateway.thegraph.com/api//subgraphs/id/`. L'endpoint Graph Explorer est utilisé pour interroger les subgraphs sur le réseau décentralisé de The Graph. +- **Endpoint**: A URL that can be used to query a Subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query Subgraphs on The Graph's decentralized network. -- **Subgraph** : Une API ouverte qui extrait des données d'une blockchain, les traite et les stocke de manière à ce qu'elles puissent être facilement interrogées via GraphQL. Les développeurs peuvent créer, déployer et publier des subgraphs sur The Graph Network. Une fois indexé, le subgraph peut être interrogé par n'importe qui. +- **Subgraph**: An open API that extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. Developers can build, deploy, and publish Subgraphs to The Graph Network. Once it is indexed, the Subgraph can be queried by anyone. - **Indexeur** : Participants au réseau qui gèrent des nœuds d'indexation pour indexer les données des blockchains et répondre aux requêtes GraphQL. - **Flux de revenus pour les Indexeurs** : Les Indexeurs sont récompensés en GRT par deux éléments : les remises sur les frais de requête et les récompenses pour l'indexation. - 1. **Remboursements de frais de requête** : Paiements effectués par les consommateurs de subgraphs pour avoir servi des requêtes sur le réseau. + 1. **Query Fee Rebates**: Payments from Subgraph consumers for serving queries on the network. - 2. **Récompenses d'indexation** : Les récompenses que les Indexeurs reçoivent pour l'indexation des subgraphs. Les récompenses d'indexation sont générées par une nouvelle émission de 3 % de GRT par an. + 2. **Indexing Rewards**: The rewards that Indexers receive for indexing Subgraphs. Indexing rewards are generated via new issuance of 3% GRT annually. - **Indexer's Self-Stake** : Le montant de GRT que les Indexeurs stakent pour participer au réseau décentralisé. Le minimum est de 100 000 GRT, et il n'y a pas de limite supérieure. - **Delegation Capacity** : C'est le montant maximum de GRT qu'un Indexeur peut accepter de la part des Déléguateurs. Les Indexeurs ne peuvent accepter que jusqu'à 16 fois leur propre Indexer Self-Stake, et toute délégation supplémentaire entraîne une dilution des récompenses. Par exemple, si un Indexeur a une Indexer Self-Stake de 1M GRT, sa capacité de délégation est de 16M. Cependant, les indexeurs peuvent augmenter leur capacité de délégation en augmentant leur Indexer Self-Stake. -- **Upgrade Indexer** : Un Indexeur conçu pour servir de solution de repli pour les requêtes de subgraphs qui ne sont pas traitées par d'autres Indexeurs sur le réseau. L'upgrade Indexer n'est pas compétitif par rapport aux autres Indexeurs. +- **Upgrade Indexer**: An Indexer designed to act as a fallback for Subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. -- **Delegator**(Déléguateurs) : Participants au réseau qui possèdent des GRT et les délèguent à des Indexeurs. Cela permet aux Indexeurs d'augmenter leur participation dans les subgraphs du réseau. En retour, les Déléguateurs reçoivent une partie des récompenses d'indexation que les Indexeurs reçoivent pour le traitement des subgraphs. +- **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in Subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing Subgraphs. - **Taxe de délégation** : Une taxe de 0,5 % payée par les Déléguateurs lorsqu'ils délèguent des GRT aux Indexeurs. Les GRT utilisés pour payer la taxe sont brûlés. -- **Curator**(Curateur) : Participants au réseau qui identifient les subgraphs de haute qualité et signalent les GRT sur ces derniers en échange de parts de curation. Lorsque les Indexeurs réclament des frais de requête pour un subgraph, 10 % sont distribués aux Curateurs de ce subgraph. Il existe une corrélation positive entre la quantité de GRT signalée et le nombre d'Indexeurs indexant un subgraph. +- **Curator**: Network participants that identify high-quality Subgraphs, and signal GRT on them in exchange for curation shares. When Indexers claim query fees on a Subgraph, 10% is distributed to the Curators of that Subgraph. There is a positive correlation between the amount of GRT signaled and the number of Indexers indexing a Subgraph. -- **Taxe de curation** : Une taxe de 1% payée par les Curateurs lorsqu'ils signalent des GRT sur des subgraphs. Les GRT utiliséa pour payer la taxe sont brûlés. +- **Curation Tax**: A 1% fee paid by Curators when they signal GRT on Subgraphs. The GRT used to pay the fee is burned. -- **Consommateur de données** : Toute application ou utilisateur qui interroge un subgraph. +- **Data Consumer**: Any application or user that queries a Subgraph. -- **Développeur de subgraphs** : Un développeur qui construit et déploie un subgraph sur le réseau décentralisé de The Graph. +- **Subgraph Developer**: A developer who builds and deploys a Subgraph to The Graph's decentralized network. -- **Manifeste du subgraph** : Un fichier YAML qui décrit le schéma GraphQL du subgraph, les sources de données et d'autres métadonnées. Vous trouverez [Ici](https://github.com/graphprotocol/example-subgraph/blob/master/subgraph.yaml) un exemple. +- **Subgraph Manifest**: A YAML file that describes the Subgraph's GraphQL schema, data sources, and other metadata. [Here](https://github.com/graphprotocol/example-subgraph/blob/master/subgraph.yaml) is an example. - **Epoque** : Unité de temps au sein du réseau. Actuellement, une époque correspond à 6 646 blocs, soit environ 1 jour. -- **Allocation** : Un Indexeur peut allouer l'ensemble de son staking de GRT (y compris le staking des Déléguateurs) à des subgraphs qui ont été publiés sur le réseau décentralisé de The Graph. Les allocations peuvent avoir différents statuts : +- **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards Subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: - 1. **Actif** : Une allocation est considérée comme active lorsqu'elle est créée onchain. C'est ce qu'on appelle ouvrir une allocation, et cela indique au réseau que l'Indexeur est en train d'indexer et de servir des requêtes pour un subgraph particulier. Les allocations actives accumulent des récompenses d'indexation proportionnelles au signal sur le subgraph et à la quantité de GRT allouée. + 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular Subgraph. Active allocations accrue indexing rewards proportional to the signal on the Subgraph, and the amount of GRT allocated. - 2. **Fermé** : Un Indexeur peut réclamer les récompenses d'indexation accumulées sur un subgraph donné en soumettant une preuve d'indexation (POI) récente et valide. C'est ce qu'on appelle la fermeture d'une allocation. Une allocation doit avoir été ouverte pendant au moins une époque avant de pouvoir être fermée. La période d'allocation maximale est de 28 époques. Si un Indexeur laisse une allocation ouverte au-delà de 28 époques, il s'agit d'une allocation périmée. Lorsqu'une allocation est dans l'état **fermé**, un Fisherman peut encore ouvrir un litige pour contester un Indexeur pour avoir servi de fausses données. + 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given Subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. -- **Subgraph Studio** : Une dapp puissante pour construire, déployer et publier des subgraphs. +- **Subgraph Studio**: A powerful dapp for building, deploying, and publishing Subgraphs. -- **Fishermen**: A role within The Graph Network held by participants who monitor the accuracy and integrity of data served by Indexers. When a Fisherman identifies a query response or a POI they believe to be incorrect, they can initiate a dispute against the Indexer. If the dispute rules in favor of the Fisherman, the Indexer is slashed by losing 2.5% of their self-stake. Of this amount, 50% is awarded to the Fisherman as a bounty for their vigilance, and the remaining 50% is removed from circulation (burned). This mechanism is designed to encourage Fishermen to help maintain the reliability of the network by ensuring that Indexers are held accountable for the data they provide. +- **Fishermen** : Un rôle au sein de The Graph Network tenu par les participants qui surveillent l'exactitude et l'intégrité des données servies par les Indexeurs. Lorsqu'un Fisherman identifie une réponse à une requête ou un POI qu'il estime incorrect, il peut lancer un litige contre l'indexeur. Si le litige est tranché en faveur du Fisherman, l'indexeur perd 2,5 % de son staking. Sur ce montant, 50 % sont attribués au Fisherman à titre de récompense pour sa vigilance, et les 50 % restants sont retirés de la circulation (brûlés). Ce mécanisme est conçu pour encourager les pêcheurs à contribuer au maintien de la fiabilité du réseau en veillant à ce que les Indexeurs soient tenus responsables des données qu'ils fournissent. - **Arbitres** : Les arbitres sont des participants au réseau nommés dans le cadre d'un processus de gouvernance. Le rôle de l'arbitre est de décider de l'issue des litiges relatifs à l'indexation et aux requêtes. Leur objectif est de maximiser l'utilité et la fiabilité de The Graph. - **Slashing**(Taillade) : Les Indexeurs peuvent se voir retirer leur GRT pour avoir fourni un POI incorrect ou pour avoir diffusé des données inexactes. Le pourcentage de réduction est un paramètre protocolaire actuellement fixé à 2,5 % du staking personnel de l'Indexeur. 50 % des GRT réduit est versé au pêcheur qui a contesté les données inexactes ou le point d'intérêt incorrect. Les 50 % restants sont brûlés. -- **Récompenses d'indexation** : Les récompenses que les Indexeurs reçoivent pour l'indexation des subgraphs. Les récompenses d'indexation sont distribuées en GRT. +- **Indexing Rewards**: The rewards that Indexers receive for indexing Subgraphs. Indexing rewards are distributed in GRT. - **Récompenses de délégation** : Les récompenses que les Déléguateurs reçoivent pour avoir délégué des GRT aux Indexeurs. Les récompenses de délégation sont distribuées en GRT. -- **GRT**: The Graph's work utility token. GRT provides economic incentives to network participants for contributing to the network. +- **GRT** : Le jeton d'utilité du travail de The Graph. Le GRT fournit des incitations économiques aux participants du réseau pour leur contribution au réseau. -- **Proof of Indexing (POI)**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given subgraph, they must provide a valid and recent POI. Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. +- **Proof of Indexing (POI)**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given Subgraph, they must provide a valid and recent POI. Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. -- **Graph Node**: Graph Node is the component that indexes subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. +- **Graph Node**: Graph Node is the component that indexes Subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. -- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. +- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing Subgraph deployments to its Graph Node(s), and managing allocations. -- **The Graph Client**: A library for building GraphQL-based dapps in a decentralized way. +- **The Graph Client** : Une bibliothèque pour construire des dapps basées sur GraphQL de manière décentralisée. -- **Graph Explorer**: A dapp designed for network participants to explore subgraphs and interact with the protocol. +- **Graph Explorer**: A dapp designed for network participants to explore Subgraphs and interact with the protocol. -- **Graph CLI**: A command line interface tool for building and deploying to The Graph. +- **Graph CLI** : Un outil d'interface de ligne de commande pour construire et déployer sur The Graph. -- **Cooldown Period**: The time remaining until an Indexer who changed their delegation parameters can do so again. +- **Cooldown Period** : Le temps restant avant qu'un indexeur qui a modifié ses paramètres de délégation puisse le faire à nouveau. -- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self-stake. +- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, Subgraphs, curation shares, and Indexer's self-stake. -- **Updating a subgraph**: The process of releasing a new subgraph version with updates to the subgraph's manifest, schema, or mappings. +- **Updating a Subgraph**: The process of releasing a new Subgraph version with updates to the Subgraph's manifest, schema, or mappings. -- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (e.g. when v0.0.1 is updated to v0.0.2). +- **Migrating**: The process of curation shares moving from an old version of a Subgraph to a new version of a Subgraph (e.g. when v0.0.1 is updated to v0.0.2). From 88197190fe86960dc8f37adf0594d750266aa34b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:16:46 -0500 Subject: [PATCH 0474/1789] New translations glossary.mdx (Spanish) --- website/src/pages/es/resources/glossary.mdx | 50 ++++++++++----------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/website/src/pages/es/resources/glossary.mdx b/website/src/pages/es/resources/glossary.mdx index a3614062a63a..dfbe07decedf 100644 --- a/website/src/pages/es/resources/glossary.mdx +++ b/website/src/pages/es/resources/glossary.mdx @@ -4,51 +4,51 @@ title: Glosario - **The Graph**: A decentralized protocol for indexing and querying data. -- **Query**: A request for data. In the case of The Graph, a query is a request for data from a subgraph that will be answered by an Indexer. +- **Query**: A request for data. In the case of The Graph, a query is a request for data from a Subgraph that will be answered by an Indexer. -- **GraphQL**: A query language for APIs and a runtime for fulfilling those queries with your existing data. The Graph uses GraphQL to query subgraphs. +- **GraphQL**: A query language for APIs and a runtime for fulfilling those queries with your existing data. The Graph uses GraphQL to query Subgraphs. -- **Endpoint**: A URL that can be used to query a subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query subgraphs on The Graph's decentralized network. +- **Endpoint**: A URL that can be used to query a Subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query Subgraphs on The Graph's decentralized network. -- **Subgraph**: An open API that extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. Developers can build, deploy, and publish subgraphs to The Graph Network. Once it is indexed, the subgraph can be queried by anyone. +- **Subgraph**: An open API that extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. Developers can build, deploy, and publish Subgraphs to The Graph Network. Once it is indexed, the Subgraph can be queried by anyone. - **Indexer**: Network participants that run indexing nodes to index data from blockchains and serve GraphQL queries. - **Indexer Revenue Streams**: Indexers are rewarded in GRT with two components: query fee rebates and indexing rewards. - 1. **Query Fee Rebates**: Payments from subgraph consumers for serving queries on the network. + 1. **Query Fee Rebates**: Payments from Subgraph consumers for serving queries on the network. - 2. **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are generated via new issuance of 3% GRT annually. + 2. **Indexing Rewards**: The rewards that Indexers receive for indexing Subgraphs. Indexing rewards are generated via new issuance of 3% GRT annually. - **Indexer's Self-Stake**: The amount of GRT that Indexers stake to participate in the decentralized network. The minimum is 100,000 GRT, and there is no upper limit. - **Delegation Capacity**: The maximum amount of GRT an Indexer can accept from Delegators. Indexers can only accept up to 16x their Indexer Self-Stake, and additional delegation results in diluted rewards. For example, if an Indexer has a Self-Stake of 1M GRT, their delegation capacity is 16M. However, Indexers can increase their Delegation Capacity by increasing their Self-Stake. -- **Upgrade Indexer**: An Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. +- **Upgrade Indexer**: An Indexer designed to act as a fallback for Subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. -- **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing subgraphs. +- **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in Subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing Subgraphs. - **Delegation Tax**: A 0.5% fee paid by Delegators when they delegate GRT to Indexers. The GRT used to pay the fee is burned. -- **Curator**: Network participants that identify high-quality subgraphs, and signal GRT on them in exchange for curation shares. When Indexers claim query fees on a subgraph, 10% is distributed to the Curators of that subgraph. There is a positive correlation between the amount of GRT signaled and the number of Indexers indexing a subgraph. +- **Curator**: Network participants that identify high-quality Subgraphs, and signal GRT on them in exchange for curation shares. When Indexers claim query fees on a Subgraph, 10% is distributed to the Curators of that Subgraph. There is a positive correlation between the amount of GRT signaled and the number of Indexers indexing a Subgraph. -- **Curation Tax**: A 1% fee paid by Curators when they signal GRT on subgraphs. The GRT used to pay the fee is burned. +- **Curation Tax**: A 1% fee paid by Curators when they signal GRT on Subgraphs. The GRT used to pay the fee is burned. -- **Data Consumer**: Any application or user that queries a subgraph. +- **Data Consumer**: Any application or user that queries a Subgraph. -- **Subgraph Developer**: A developer who builds and deploys a subgraph to The Graph's decentralized network. +- **Subgraph Developer**: A developer who builds and deploys a Subgraph to The Graph's decentralized network. -- **Subgraph Manifest**: A YAML file that describes the subgraph's GraphQL schema, data sources, and other metadata. [Here](https://github.com/graphprotocol/example-subgraph/blob/master/subgraph.yaml) is an example. +- **Subgraph Manifest**: A YAML file that describes the Subgraph's GraphQL schema, data sources, and other metadata. [Here](https://github.com/graphprotocol/example-subgraph/blob/master/subgraph.yaml) is an example. - **Epoch**: A unit of time within the network. Currently, one epoch is 6,646 blocks or approximately 1 day. -- **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: +- **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards Subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: - 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. + 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular Subgraph. Active allocations accrue indexing rewards proportional to the signal on the Subgraph, and the amount of GRT allocated. - 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. + 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given Subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. -- **Subgraph Studio**: A powerful dapp for building, deploying, and publishing subgraphs. +- **Subgraph Studio**: A powerful dapp for building, deploying, and publishing Subgraphs. - **Fishermen**: A role within The Graph Network held by participants who monitor the accuracy and integrity of data served by Indexers. When a Fisherman identifies a query response or a POI they believe to be incorrect, they can initiate a dispute against the Indexer. If the dispute rules in favor of the Fisherman, the Indexer is slashed by losing 2.5% of their self-stake. Of this amount, 50% is awarded to the Fisherman as a bounty for their vigilance, and the remaining 50% is removed from circulation (burned). This mechanism is designed to encourage Fishermen to help maintain the reliability of the network by ensuring that Indexers are held accountable for the data they provide. @@ -56,28 +56,28 @@ title: Glosario - **Slashing**: Indexers can have their self-staked GRT slashed for providing an incorrect POI or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self-stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. -- **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are distributed in GRT. +- **Indexing Rewards**: The rewards that Indexers receive for indexing Subgraphs. Indexing rewards are distributed in GRT. - **Delegation Rewards**: The rewards that Delegators receive for delegating GRT to Indexers. Delegation rewards are distributed in GRT. - **GRT**: The Graph's work utility token. GRT provides economic incentives to network participants for contributing to the network. -- **Proof of Indexing (POI)**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given subgraph, they must provide a valid and recent POI. Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. +- **Proof of Indexing (POI)**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given Subgraph, they must provide a valid and recent POI. Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. -- **Graph Node**: Graph Node is the component that indexes subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. +- **Graph Node**: Graph Node is the component that indexes Subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. -- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. +- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing Subgraph deployments to its Graph Node(s), and managing allocations. - **The Graph Client**: A library for building GraphQL-based dapps in a decentralized way. -- **Graph Explorer**: A dapp designed for network participants to explore subgraphs and interact with the protocol. +- **Graph Explorer**: A dapp designed for network participants to explore Subgraphs and interact with the protocol. - **Graph CLI**: A command line interface tool for building and deploying to The Graph. - **Cooldown Period**: The time remaining until an Indexer who changed their delegation parameters can do so again. -- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self-stake. +- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, Subgraphs, curation shares, and Indexer's self-stake. -- **Updating a subgraph**: The process of releasing a new subgraph version with updates to the subgraph's manifest, schema, or mappings. +- **Updating a Subgraph**: The process of releasing a new Subgraph version with updates to the Subgraph's manifest, schema, or mappings. -- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (e.g. when v0.0.1 is updated to v0.0.2). +- **Migrating**: The process of curation shares moving from an old version of a Subgraph to a new version of a Subgraph (e.g. when v0.0.1 is updated to v0.0.2). From ed9a7347e5ec2d3787e4bf33be0c9b7e40dad8d2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:16:47 -0500 Subject: [PATCH 0475/1789] New translations glossary.mdx (Arabic) --- website/src/pages/ar/resources/glossary.mdx | 50 ++++++++++----------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/website/src/pages/ar/resources/glossary.mdx b/website/src/pages/ar/resources/glossary.mdx index f922950390a6..d456a94f63ab 100644 --- a/website/src/pages/ar/resources/glossary.mdx +++ b/website/src/pages/ar/resources/glossary.mdx @@ -4,51 +4,51 @@ title: قائمة المصطلحات - **The Graph**: A decentralized protocol for indexing and querying data. -- **Query**: A request for data. In the case of The Graph, a query is a request for data from a subgraph that will be answered by an Indexer. +- **Query**: A request for data. In the case of The Graph, a query is a request for data from a Subgraph that will be answered by an Indexer. -- **GraphQL**: A query language for APIs and a runtime for fulfilling those queries with your existing data. The Graph uses GraphQL to query subgraphs. +- **GraphQL**: A query language for APIs and a runtime for fulfilling those queries with your existing data. The Graph uses GraphQL to query Subgraphs. -- **Endpoint**: A URL that can be used to query a subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query subgraphs on The Graph's decentralized network. +- **Endpoint**: A URL that can be used to query a Subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query Subgraphs on The Graph's decentralized network. -- **Subgraph**: An open API that extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. Developers can build, deploy, and publish subgraphs to The Graph Network. Once it is indexed, the subgraph can be queried by anyone. +- **Subgraph**: An open API that extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. Developers can build, deploy, and publish Subgraphs to The Graph Network. Once it is indexed, the Subgraph can be queried by anyone. - **Indexer**: Network participants that run indexing nodes to index data from blockchains and serve GraphQL queries. - **Indexer Revenue Streams**: Indexers are rewarded in GRT with two components: query fee rebates and indexing rewards. - 1. **Query Fee Rebates**: Payments from subgraph consumers for serving queries on the network. + 1. **Query Fee Rebates**: Payments from Subgraph consumers for serving queries on the network. - 2. **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are generated via new issuance of 3% GRT annually. + 2. **Indexing Rewards**: The rewards that Indexers receive for indexing Subgraphs. Indexing rewards are generated via new issuance of 3% GRT annually. - **Indexer's Self-Stake**: The amount of GRT that Indexers stake to participate in the decentralized network. The minimum is 100,000 GRT, and there is no upper limit. - **Delegation Capacity**: The maximum amount of GRT an Indexer can accept from Delegators. Indexers can only accept up to 16x their Indexer Self-Stake, and additional delegation results in diluted rewards. For example, if an Indexer has a Self-Stake of 1M GRT, their delegation capacity is 16M. However, Indexers can increase their Delegation Capacity by increasing their Self-Stake. -- **Upgrade Indexer**: An Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. +- **Upgrade Indexer**: An Indexer designed to act as a fallback for Subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. -- **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing subgraphs. +- **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in Subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing Subgraphs. - **Delegation Tax**: A 0.5% fee paid by Delegators when they delegate GRT to Indexers. The GRT used to pay the fee is burned. -- **Curator**: Network participants that identify high-quality subgraphs, and signal GRT on them in exchange for curation shares. When Indexers claim query fees on a subgraph, 10% is distributed to the Curators of that subgraph. There is a positive correlation between the amount of GRT signaled and the number of Indexers indexing a subgraph. +- **Curator**: Network participants that identify high-quality Subgraphs, and signal GRT on them in exchange for curation shares. When Indexers claim query fees on a Subgraph, 10% is distributed to the Curators of that Subgraph. There is a positive correlation between the amount of GRT signaled and the number of Indexers indexing a Subgraph. -- **Curation Tax**: A 1% fee paid by Curators when they signal GRT on subgraphs. The GRT used to pay the fee is burned. +- **Curation Tax**: A 1% fee paid by Curators when they signal GRT on Subgraphs. The GRT used to pay the fee is burned. -- **Data Consumer**: Any application or user that queries a subgraph. +- **Data Consumer**: Any application or user that queries a Subgraph. -- **Subgraph Developer**: A developer who builds and deploys a subgraph to The Graph's decentralized network. +- **Subgraph Developer**: A developer who builds and deploys a Subgraph to The Graph's decentralized network. -- **Subgraph Manifest**: A YAML file that describes the subgraph's GraphQL schema, data sources, and other metadata. [Here](https://github.com/graphprotocol/example-subgraph/blob/master/subgraph.yaml) is an example. +- **Subgraph Manifest**: A YAML file that describes the Subgraph's GraphQL schema, data sources, and other metadata. [Here](https://github.com/graphprotocol/example-subgraph/blob/master/subgraph.yaml) is an example. - **Epoch**: A unit of time within the network. Currently, one epoch is 6,646 blocks or approximately 1 day. -- **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: +- **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards Subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: - 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. + 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular Subgraph. Active allocations accrue indexing rewards proportional to the signal on the Subgraph, and the amount of GRT allocated. - 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. + 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given Subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. -- **Subgraph Studio**: A powerful dapp for building, deploying, and publishing subgraphs. +- **Subgraph Studio**: A powerful dapp for building, deploying, and publishing Subgraphs. - **Fishermen**: A role within The Graph Network held by participants who monitor the accuracy and integrity of data served by Indexers. When a Fisherman identifies a query response or a POI they believe to be incorrect, they can initiate a dispute against the Indexer. If the dispute rules in favor of the Fisherman, the Indexer is slashed by losing 2.5% of their self-stake. Of this amount, 50% is awarded to the Fisherman as a bounty for their vigilance, and the remaining 50% is removed from circulation (burned). This mechanism is designed to encourage Fishermen to help maintain the reliability of the network by ensuring that Indexers are held accountable for the data they provide. @@ -56,28 +56,28 @@ title: قائمة المصطلحات - **Slashing**: Indexers can have their self-staked GRT slashed for providing an incorrect POI or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self-stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. -- **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are distributed in GRT. +- **Indexing Rewards**: The rewards that Indexers receive for indexing Subgraphs. Indexing rewards are distributed in GRT. - **Delegation Rewards**: The rewards that Delegators receive for delegating GRT to Indexers. Delegation rewards are distributed in GRT. - **GRT**: The Graph's work utility token. GRT provides economic incentives to network participants for contributing to the network. -- **Proof of Indexing (POI)**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given subgraph, they must provide a valid and recent POI. Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. +- **Proof of Indexing (POI)**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given Subgraph, they must provide a valid and recent POI. Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. -- **Graph Node**: Graph Node is the component that indexes subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. +- **Graph Node**: Graph Node is the component that indexes Subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. -- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. +- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing Subgraph deployments to its Graph Node(s), and managing allocations. - **The Graph Client**: A library for building GraphQL-based dapps in a decentralized way. -- **Graph Explorer**: A dapp designed for network participants to explore subgraphs and interact with the protocol. +- **Graph Explorer**: A dapp designed for network participants to explore Subgraphs and interact with the protocol. - **Graph CLI**: A command line interface tool for building and deploying to The Graph. - **Cooldown Period**: The time remaining until an Indexer who changed their delegation parameters can do so again. -- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self-stake. +- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, Subgraphs, curation shares, and Indexer's self-stake. -- **Updating a subgraph**: The process of releasing a new subgraph version with updates to the subgraph's manifest, schema, or mappings. +- **Updating a Subgraph**: The process of releasing a new Subgraph version with updates to the Subgraph's manifest, schema, or mappings. -- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (e.g. when v0.0.1 is updated to v0.0.2). +- **Migrating**: The process of curation shares moving from an old version of a Subgraph to a new version of a Subgraph (e.g. when v0.0.1 is updated to v0.0.2). From 19876a21716b99e3468476b7fa1c612f3bf755f7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:16:48 -0500 Subject: [PATCH 0476/1789] New translations glossary.mdx (Czech) --- website/src/pages/cs/resources/glossary.mdx | 50 ++++++++++----------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/website/src/pages/cs/resources/glossary.mdx b/website/src/pages/cs/resources/glossary.mdx index 70161f581585..49fd1f60c539 100644 --- a/website/src/pages/cs/resources/glossary.mdx +++ b/website/src/pages/cs/resources/glossary.mdx @@ -4,51 +4,51 @@ title: Glosář - **The Graph**: A decentralized protocol for indexing and querying data. -- **Query**: A request for data. In the case of The Graph, a query is a request for data from a subgraph that will be answered by an Indexer. +- **Query**: A request for data. In the case of The Graph, a query is a request for data from a Subgraph that will be answered by an Indexer. -- **GraphQL**: A query language for APIs and a runtime for fulfilling those queries with your existing data. The Graph uses GraphQL to query subgraphs. +- **GraphQL**: A query language for APIs and a runtime for fulfilling those queries with your existing data. The Graph uses GraphQL to query Subgraphs. -- **Endpoint**: A URL that can be used to query a subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query subgraphs on The Graph's decentralized network. +- **Endpoint**: A URL that can be used to query a Subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query Subgraphs on The Graph's decentralized network. -- **Subgraph**: An open API that extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. Developers can build, deploy, and publish subgraphs to The Graph Network. Once it is indexed, the subgraph can be queried by anyone. +- **Subgraph**: An open API that extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. Developers can build, deploy, and publish Subgraphs to The Graph Network. Once it is indexed, the Subgraph can be queried by anyone. - **Indexer**: Network participants that run indexing nodes to index data from blockchains and serve GraphQL queries. - **Indexer Revenue Streams**: Indexers are rewarded in GRT with two components: query fee rebates and indexing rewards. - 1. **Query Fee Rebates**: Payments from subgraph consumers for serving queries on the network. + 1. **Query Fee Rebates**: Payments from Subgraph consumers for serving queries on the network. - 2. **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are generated via new issuance of 3% GRT annually. + 2. **Indexing Rewards**: The rewards that Indexers receive for indexing Subgraphs. Indexing rewards are generated via new issuance of 3% GRT annually. - **Indexer's Self-Stake**: The amount of GRT that Indexers stake to participate in the decentralized network. The minimum is 100,000 GRT, and there is no upper limit. - **Delegation Capacity**: The maximum amount of GRT an Indexer can accept from Delegators. Indexers can only accept up to 16x their Indexer Self-Stake, and additional delegation results in diluted rewards. For example, if an Indexer has a Self-Stake of 1M GRT, their delegation capacity is 16M. However, Indexers can increase their Delegation Capacity by increasing their Self-Stake. -- **Upgrade Indexer**: An Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. +- **Upgrade Indexer**: An Indexer designed to act as a fallback for Subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. -- **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing subgraphs. +- **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in Subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing Subgraphs. - **Delegation Tax**: A 0.5% fee paid by Delegators when they delegate GRT to Indexers. The GRT used to pay the fee is burned. -- **Curator**: Network participants that identify high-quality subgraphs, and signal GRT on them in exchange for curation shares. When Indexers claim query fees on a subgraph, 10% is distributed to the Curators of that subgraph. There is a positive correlation between the amount of GRT signaled and the number of Indexers indexing a subgraph. +- **Curator**: Network participants that identify high-quality Subgraphs, and signal GRT on them in exchange for curation shares. When Indexers claim query fees on a Subgraph, 10% is distributed to the Curators of that Subgraph. There is a positive correlation between the amount of GRT signaled and the number of Indexers indexing a Subgraph. -- **Curation Tax**: A 1% fee paid by Curators when they signal GRT on subgraphs. The GRT used to pay the fee is burned. +- **Curation Tax**: A 1% fee paid by Curators when they signal GRT on Subgraphs. The GRT used to pay the fee is burned. -- **Data Consumer**: Any application or user that queries a subgraph. +- **Data Consumer**: Any application or user that queries a Subgraph. -- **Subgraph Developer**: A developer who builds and deploys a subgraph to The Graph's decentralized network. +- **Subgraph Developer**: A developer who builds and deploys a Subgraph to The Graph's decentralized network. -- **Subgraph Manifest**: A YAML file that describes the subgraph's GraphQL schema, data sources, and other metadata. [Here](https://github.com/graphprotocol/example-subgraph/blob/master/subgraph.yaml) is an example. +- **Subgraph Manifest**: A YAML file that describes the Subgraph's GraphQL schema, data sources, and other metadata. [Here](https://github.com/graphprotocol/example-subgraph/blob/master/subgraph.yaml) is an example. - **Epoch**: A unit of time within the network. Currently, one epoch is 6,646 blocks or approximately 1 day. -- **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: +- **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards Subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: - 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. + 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular Subgraph. Active allocations accrue indexing rewards proportional to the signal on the Subgraph, and the amount of GRT allocated. - 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. + 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given Subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. -- **Subgraph Studio**: A powerful dapp for building, deploying, and publishing subgraphs. +- **Subgraph Studio**: A powerful dapp for building, deploying, and publishing Subgraphs. - **Fishermen**: A role within The Graph Network held by participants who monitor the accuracy and integrity of data served by Indexers. When a Fisherman identifies a query response or a POI they believe to be incorrect, they can initiate a dispute against the Indexer. If the dispute rules in favor of the Fisherman, the Indexer is slashed by losing 2.5% of their self-stake. Of this amount, 50% is awarded to the Fisherman as a bounty for their vigilance, and the remaining 50% is removed from circulation (burned). This mechanism is designed to encourage Fishermen to help maintain the reliability of the network by ensuring that Indexers are held accountable for the data they provide. @@ -56,28 +56,28 @@ title: Glosář - **Slashing**: Indexers can have their self-staked GRT slashed for providing an incorrect POI or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self-stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. -- **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are distributed in GRT. +- **Indexing Rewards**: The rewards that Indexers receive for indexing Subgraphs. Indexing rewards are distributed in GRT. - **Delegation Rewards**: The rewards that Delegators receive for delegating GRT to Indexers. Delegation rewards are distributed in GRT. - **GRT**: The Graph's work utility token. GRT provides economic incentives to network participants for contributing to the network. -- **Proof of Indexing (POI)**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given subgraph, they must provide a valid and recent POI. Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. +- **Proof of Indexing (POI)**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given Subgraph, they must provide a valid and recent POI. Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. -- **Graph Node**: Graph Node is the component that indexes subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. +- **Graph Node**: Graph Node is the component that indexes Subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. -- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. +- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing Subgraph deployments to its Graph Node(s), and managing allocations. - **The Graph Client**: A library for building GraphQL-based dapps in a decentralized way. -- **Graph Explorer**: A dapp designed for network participants to explore subgraphs and interact with the protocol. +- **Graph Explorer**: A dapp designed for network participants to explore Subgraphs and interact with the protocol. - **Graph CLI**: A command line interface tool for building and deploying to The Graph. - **Cooldown Period**: The time remaining until an Indexer who changed their delegation parameters can do so again. -- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self-stake. +- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, Subgraphs, curation shares, and Indexer's self-stake. -- **Updating a subgraph**: The process of releasing a new subgraph version with updates to the subgraph's manifest, schema, or mappings. +- **Updating a Subgraph**: The process of releasing a new Subgraph version with updates to the Subgraph's manifest, schema, or mappings. -- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (e.g. when v0.0.1 is updated to v0.0.2). +- **Migrating**: The process of curation shares moving from an old version of a Subgraph to a new version of a Subgraph (e.g. when v0.0.1 is updated to v0.0.2). From 207688c78f6035d7565ba5d6a2575fe5e3f9f5e0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:16:49 -0500 Subject: [PATCH 0477/1789] New translations glossary.mdx (German) --- website/src/pages/de/resources/glossary.mdx | 50 ++++++++++----------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/website/src/pages/de/resources/glossary.mdx b/website/src/pages/de/resources/glossary.mdx index ffcd4bca2eed..4c5ad55cd0d3 100644 --- a/website/src/pages/de/resources/glossary.mdx +++ b/website/src/pages/de/resources/glossary.mdx @@ -4,51 +4,51 @@ title: Glossary - **The Graph**: A decentralized protocol for indexing and querying data. -- **Query**: A request for data. In the case of The Graph, a query is a request for data from a subgraph that will be answered by an Indexer. +- **Query**: A request for data. In the case of The Graph, a query is a request for data from a Subgraph that will be answered by an Indexer. -- **GraphQL**: A query language for APIs and a runtime for fulfilling those queries with your existing data. The Graph uses GraphQL to query subgraphs. +- **GraphQL**: A query language for APIs and a runtime for fulfilling those queries with your existing data. The Graph uses GraphQL to query Subgraphs. -- **Endpoint**: A URL that can be used to query a subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query subgraphs on The Graph's decentralized network. +- **Endpoint**: A URL that can be used to query a Subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query Subgraphs on The Graph's decentralized network. -- **Subgraph**: An open API that extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. Developers can build, deploy, and publish subgraphs to The Graph Network. Once it is indexed, the subgraph can be queried by anyone. +- **Subgraph**: An open API that extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. Developers can build, deploy, and publish Subgraphs to The Graph Network. Once it is indexed, the Subgraph can be queried by anyone. - **Indexer**: Network participants that run indexing nodes to index data from blockchains and serve GraphQL queries. - **Indexer Revenue Streams**: Indexers are rewarded in GRT with two components: query fee rebates and indexing rewards. - 1. **Query Fee Rebates**: Payments from subgraph consumers for serving queries on the network. + 1. **Query Fee Rebates**: Payments from Subgraph consumers for serving queries on the network. - 2. **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are generated via new issuance of 3% GRT annually. + 2. **Indexing Rewards**: The rewards that Indexers receive for indexing Subgraphs. Indexing rewards are generated via new issuance of 3% GRT annually. - **Indexer's Self-Stake**: The amount of GRT that Indexers stake to participate in the decentralized network. The minimum is 100,000 GRT, and there is no upper limit. - **Delegation Capacity**: The maximum amount of GRT an Indexer can accept from Delegators. Indexers can only accept up to 16x their Indexer Self-Stake, and additional delegation results in diluted rewards. For example, if an Indexer has a Self-Stake of 1M GRT, their delegation capacity is 16M. However, Indexers can increase their Delegation Capacity by increasing their Self-Stake. -- **Upgrade Indexer**: An Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. +- **Upgrade Indexer**: An Indexer designed to act as a fallback for Subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. -- **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing subgraphs. +- **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in Subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing Subgraphs. - **Delegation Tax**: A 0.5% fee paid by Delegators when they delegate GRT to Indexers. The GRT used to pay the fee is burned. -- **Curator**: Network participants that identify high-quality subgraphs, and signal GRT on them in exchange for curation shares. When Indexers claim query fees on a subgraph, 10% is distributed to the Curators of that subgraph. There is a positive correlation between the amount of GRT signaled and the number of Indexers indexing a subgraph. +- **Curator**: Network participants that identify high-quality Subgraphs, and signal GRT on them in exchange for curation shares. When Indexers claim query fees on a Subgraph, 10% is distributed to the Curators of that Subgraph. There is a positive correlation between the amount of GRT signaled and the number of Indexers indexing a Subgraph. -- **Curation Tax**: A 1% fee paid by Curators when they signal GRT on subgraphs. The GRT used to pay the fee is burned. +- **Curation Tax**: A 1% fee paid by Curators when they signal GRT on Subgraphs. The GRT used to pay the fee is burned. -- **Data Consumer**: Any application or user that queries a subgraph. +- **Data Consumer**: Any application or user that queries a Subgraph. -- **Subgraph Developer**: A developer who builds and deploys a subgraph to The Graph's decentralized network. +- **Subgraph Developer**: A developer who builds and deploys a Subgraph to The Graph's decentralized network. -- **Subgraph Manifest**: A YAML file that describes the subgraph's GraphQL schema, data sources, and other metadata. [Here](https://github.com/graphprotocol/example-subgraph/blob/master/subgraph.yaml) is an example. +- **Subgraph Manifest**: A YAML file that describes the Subgraph's GraphQL schema, data sources, and other metadata. [Here](https://github.com/graphprotocol/example-subgraph/blob/master/subgraph.yaml) is an example. - **Epoch**: A unit of time within the network. Currently, one epoch is 6,646 blocks or approximately 1 day. -- **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: +- **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards Subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: - 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. + 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular Subgraph. Active allocations accrue indexing rewards proportional to the signal on the Subgraph, and the amount of GRT allocated. - 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. + 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given Subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. -- **Subgraph Studio**: A powerful dapp for building, deploying, and publishing subgraphs. +- **Subgraph Studio**: A powerful dapp for building, deploying, and publishing Subgraphs. - **Fishermen**: A role within The Graph Network held by participants who monitor the accuracy and integrity of data served by Indexers. When a Fisherman identifies a query response or a POI they believe to be incorrect, they can initiate a dispute against the Indexer. If the dispute rules in favor of the Fisherman, the Indexer is slashed by losing 2.5% of their self-stake. Of this amount, 50% is awarded to the Fisherman as a bounty for their vigilance, and the remaining 50% is removed from circulation (burned). This mechanism is designed to encourage Fishermen to help maintain the reliability of the network by ensuring that Indexers are held accountable for the data they provide. @@ -56,28 +56,28 @@ title: Glossary - **Slashing**: Indexers can have their self-staked GRT slashed for providing an incorrect POI or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self-stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. -- **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are distributed in GRT. +- **Indexing Rewards**: The rewards that Indexers receive for indexing Subgraphs. Indexing rewards are distributed in GRT. - **Delegation Rewards**: The rewards that Delegators receive for delegating GRT to Indexers. Delegation rewards are distributed in GRT. - **GRT**: The Graph's work utility token. GRT provides economic incentives to network participants for contributing to the network. -- **Proof of Indexing (POI)**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given subgraph, they must provide a valid and recent POI. Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. +- **Proof of Indexing (POI)**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given Subgraph, they must provide a valid and recent POI. Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. -- **Graph Node**: Graph Node is the component that indexes subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. +- **Graph Node**: Graph Node is the component that indexes Subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. -- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. +- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing Subgraph deployments to its Graph Node(s), and managing allocations. - **The Graph Client**: A library for building GraphQL-based dapps in a decentralized way. -- **Graph Explorer**: A dapp designed for network participants to explore subgraphs and interact with the protocol. +- **Graph Explorer**: A dapp designed for network participants to explore Subgraphs and interact with the protocol. - **Graph CLI**: A command line interface tool for building and deploying to The Graph. - **Cooldown Period**: The time remaining until an Indexer who changed their delegation parameters can do so again. -- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self-stake. +- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, Subgraphs, curation shares, and Indexer's self-stake. -- **Updating a subgraph**: The process of releasing a new subgraph version with updates to the subgraph's manifest, schema, or mappings. +- **Updating a Subgraph**: The process of releasing a new Subgraph version with updates to the Subgraph's manifest, schema, or mappings. -- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (e.g. when v0.0.1 is updated to v0.0.2). +- **Migrating**: The process of curation shares moving from an old version of a Subgraph to a new version of a Subgraph (e.g. when v0.0.1 is updated to v0.0.2). From fa3c455e635622d34702f3b3722162119f42fa43 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:16:50 -0500 Subject: [PATCH 0478/1789] New translations glossary.mdx (Italian) --- website/src/pages/it/resources/glossary.mdx | 50 ++++++++++----------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/website/src/pages/it/resources/glossary.mdx b/website/src/pages/it/resources/glossary.mdx index ffcd4bca2eed..4c5ad55cd0d3 100644 --- a/website/src/pages/it/resources/glossary.mdx +++ b/website/src/pages/it/resources/glossary.mdx @@ -4,51 +4,51 @@ title: Glossary - **The Graph**: A decentralized protocol for indexing and querying data. -- **Query**: A request for data. In the case of The Graph, a query is a request for data from a subgraph that will be answered by an Indexer. +- **Query**: A request for data. In the case of The Graph, a query is a request for data from a Subgraph that will be answered by an Indexer. -- **GraphQL**: A query language for APIs and a runtime for fulfilling those queries with your existing data. The Graph uses GraphQL to query subgraphs. +- **GraphQL**: A query language for APIs and a runtime for fulfilling those queries with your existing data. The Graph uses GraphQL to query Subgraphs. -- **Endpoint**: A URL that can be used to query a subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query subgraphs on The Graph's decentralized network. +- **Endpoint**: A URL that can be used to query a Subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query Subgraphs on The Graph's decentralized network. -- **Subgraph**: An open API that extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. Developers can build, deploy, and publish subgraphs to The Graph Network. Once it is indexed, the subgraph can be queried by anyone. +- **Subgraph**: An open API that extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. Developers can build, deploy, and publish Subgraphs to The Graph Network. Once it is indexed, the Subgraph can be queried by anyone. - **Indexer**: Network participants that run indexing nodes to index data from blockchains and serve GraphQL queries. - **Indexer Revenue Streams**: Indexers are rewarded in GRT with two components: query fee rebates and indexing rewards. - 1. **Query Fee Rebates**: Payments from subgraph consumers for serving queries on the network. + 1. **Query Fee Rebates**: Payments from Subgraph consumers for serving queries on the network. - 2. **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are generated via new issuance of 3% GRT annually. + 2. **Indexing Rewards**: The rewards that Indexers receive for indexing Subgraphs. Indexing rewards are generated via new issuance of 3% GRT annually. - **Indexer's Self-Stake**: The amount of GRT that Indexers stake to participate in the decentralized network. The minimum is 100,000 GRT, and there is no upper limit. - **Delegation Capacity**: The maximum amount of GRT an Indexer can accept from Delegators. Indexers can only accept up to 16x their Indexer Self-Stake, and additional delegation results in diluted rewards. For example, if an Indexer has a Self-Stake of 1M GRT, their delegation capacity is 16M. However, Indexers can increase their Delegation Capacity by increasing their Self-Stake. -- **Upgrade Indexer**: An Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. +- **Upgrade Indexer**: An Indexer designed to act as a fallback for Subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. -- **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing subgraphs. +- **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in Subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing Subgraphs. - **Delegation Tax**: A 0.5% fee paid by Delegators when they delegate GRT to Indexers. The GRT used to pay the fee is burned. -- **Curator**: Network participants that identify high-quality subgraphs, and signal GRT on them in exchange for curation shares. When Indexers claim query fees on a subgraph, 10% is distributed to the Curators of that subgraph. There is a positive correlation between the amount of GRT signaled and the number of Indexers indexing a subgraph. +- **Curator**: Network participants that identify high-quality Subgraphs, and signal GRT on them in exchange for curation shares. When Indexers claim query fees on a Subgraph, 10% is distributed to the Curators of that Subgraph. There is a positive correlation between the amount of GRT signaled and the number of Indexers indexing a Subgraph. -- **Curation Tax**: A 1% fee paid by Curators when they signal GRT on subgraphs. The GRT used to pay the fee is burned. +- **Curation Tax**: A 1% fee paid by Curators when they signal GRT on Subgraphs. The GRT used to pay the fee is burned. -- **Data Consumer**: Any application or user that queries a subgraph. +- **Data Consumer**: Any application or user that queries a Subgraph. -- **Subgraph Developer**: A developer who builds and deploys a subgraph to The Graph's decentralized network. +- **Subgraph Developer**: A developer who builds and deploys a Subgraph to The Graph's decentralized network. -- **Subgraph Manifest**: A YAML file that describes the subgraph's GraphQL schema, data sources, and other metadata. [Here](https://github.com/graphprotocol/example-subgraph/blob/master/subgraph.yaml) is an example. +- **Subgraph Manifest**: A YAML file that describes the Subgraph's GraphQL schema, data sources, and other metadata. [Here](https://github.com/graphprotocol/example-subgraph/blob/master/subgraph.yaml) is an example. - **Epoch**: A unit of time within the network. Currently, one epoch is 6,646 blocks or approximately 1 day. -- **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: +- **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards Subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: - 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. + 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular Subgraph. Active allocations accrue indexing rewards proportional to the signal on the Subgraph, and the amount of GRT allocated. - 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. + 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given Subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. -- **Subgraph Studio**: A powerful dapp for building, deploying, and publishing subgraphs. +- **Subgraph Studio**: A powerful dapp for building, deploying, and publishing Subgraphs. - **Fishermen**: A role within The Graph Network held by participants who monitor the accuracy and integrity of data served by Indexers. When a Fisherman identifies a query response or a POI they believe to be incorrect, they can initiate a dispute against the Indexer. If the dispute rules in favor of the Fisherman, the Indexer is slashed by losing 2.5% of their self-stake. Of this amount, 50% is awarded to the Fisherman as a bounty for their vigilance, and the remaining 50% is removed from circulation (burned). This mechanism is designed to encourage Fishermen to help maintain the reliability of the network by ensuring that Indexers are held accountable for the data they provide. @@ -56,28 +56,28 @@ title: Glossary - **Slashing**: Indexers can have their self-staked GRT slashed for providing an incorrect POI or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self-stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. -- **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are distributed in GRT. +- **Indexing Rewards**: The rewards that Indexers receive for indexing Subgraphs. Indexing rewards are distributed in GRT. - **Delegation Rewards**: The rewards that Delegators receive for delegating GRT to Indexers. Delegation rewards are distributed in GRT. - **GRT**: The Graph's work utility token. GRT provides economic incentives to network participants for contributing to the network. -- **Proof of Indexing (POI)**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given subgraph, they must provide a valid and recent POI. Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. +- **Proof of Indexing (POI)**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given Subgraph, they must provide a valid and recent POI. Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. -- **Graph Node**: Graph Node is the component that indexes subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. +- **Graph Node**: Graph Node is the component that indexes Subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. -- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. +- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing Subgraph deployments to its Graph Node(s), and managing allocations. - **The Graph Client**: A library for building GraphQL-based dapps in a decentralized way. -- **Graph Explorer**: A dapp designed for network participants to explore subgraphs and interact with the protocol. +- **Graph Explorer**: A dapp designed for network participants to explore Subgraphs and interact with the protocol. - **Graph CLI**: A command line interface tool for building and deploying to The Graph. - **Cooldown Period**: The time remaining until an Indexer who changed their delegation parameters can do so again. -- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self-stake. +- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, Subgraphs, curation shares, and Indexer's self-stake. -- **Updating a subgraph**: The process of releasing a new subgraph version with updates to the subgraph's manifest, schema, or mappings. +- **Updating a Subgraph**: The process of releasing a new Subgraph version with updates to the Subgraph's manifest, schema, or mappings. -- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (e.g. when v0.0.1 is updated to v0.0.2). +- **Migrating**: The process of curation shares moving from an old version of a Subgraph to a new version of a Subgraph (e.g. when v0.0.1 is updated to v0.0.2). From 3045795d12870dd1045d0f29f51affa9a3f9600c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:16:51 -0500 Subject: [PATCH 0479/1789] New translations glossary.mdx (Japanese) --- website/src/pages/ja/resources/glossary.mdx | 50 ++++++++++----------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/website/src/pages/ja/resources/glossary.mdx b/website/src/pages/ja/resources/glossary.mdx index c71697a009cf..6a602dd4c2d2 100644 --- a/website/src/pages/ja/resources/glossary.mdx +++ b/website/src/pages/ja/resources/glossary.mdx @@ -4,51 +4,51 @@ title: 用語集 - **The Graph**: A decentralized protocol for indexing and querying data. -- **Query**: A request for data. In the case of The Graph, a query is a request for data from a subgraph that will be answered by an Indexer. +- **Query**: A request for data. In the case of The Graph, a query is a request for data from a Subgraph that will be answered by an Indexer. -- **GraphQL**: A query language for APIs and a runtime for fulfilling those queries with your existing data. The Graph uses GraphQL to query subgraphs. +- **GraphQL**: A query language for APIs and a runtime for fulfilling those queries with your existing data. The Graph uses GraphQL to query Subgraphs. -- **Endpoint**: A URL that can be used to query a subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query subgraphs on The Graph's decentralized network. +- **Endpoint**: A URL that can be used to query a Subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query Subgraphs on The Graph's decentralized network. -- **Subgraph**: An open API that extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. Developers can build, deploy, and publish subgraphs to The Graph Network. Once it is indexed, the subgraph can be queried by anyone. +- **Subgraph**: An open API that extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. Developers can build, deploy, and publish Subgraphs to The Graph Network. Once it is indexed, the Subgraph can be queried by anyone. - **Indexer**: Network participants that run indexing nodes to index data from blockchains and serve GraphQL queries. - **Indexer Revenue Streams**: Indexers are rewarded in GRT with two components: query fee rebates and indexing rewards. - 1. **Query Fee Rebates**: Payments from subgraph consumers for serving queries on the network. + 1. **Query Fee Rebates**: Payments from Subgraph consumers for serving queries on the network. - 2. **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are generated via new issuance of 3% GRT annually. + 2. **Indexing Rewards**: The rewards that Indexers receive for indexing Subgraphs. Indexing rewards are generated via new issuance of 3% GRT annually. - **Indexer's Self-Stake**: The amount of GRT that Indexers stake to participate in the decentralized network. The minimum is 100,000 GRT, and there is no upper limit. - **Delegation Capacity**: The maximum amount of GRT an Indexer can accept from Delegators. Indexers can only accept up to 16x their Indexer Self-Stake, and additional delegation results in diluted rewards. For example, if an Indexer has a Self-Stake of 1M GRT, their delegation capacity is 16M. However, Indexers can increase their Delegation Capacity by increasing their Self-Stake. -- **Upgrade Indexer**: An Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. +- **Upgrade Indexer**: An Indexer designed to act as a fallback for Subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. -- **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing subgraphs. +- **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in Subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing Subgraphs. - **Delegation Tax**: A 0.5% fee paid by Delegators when they delegate GRT to Indexers. The GRT used to pay the fee is burned. -- **Curator**: Network participants that identify high-quality subgraphs, and signal GRT on them in exchange for curation shares. When Indexers claim query fees on a subgraph, 10% is distributed to the Curators of that subgraph. There is a positive correlation between the amount of GRT signaled and the number of Indexers indexing a subgraph. +- **Curator**: Network participants that identify high-quality Subgraphs, and signal GRT on them in exchange for curation shares. When Indexers claim query fees on a Subgraph, 10% is distributed to the Curators of that Subgraph. There is a positive correlation between the amount of GRT signaled and the number of Indexers indexing a Subgraph. -- **Curation Tax**: A 1% fee paid by Curators when they signal GRT on subgraphs. The GRT used to pay the fee is burned. +- **Curation Tax**: A 1% fee paid by Curators when they signal GRT on Subgraphs. The GRT used to pay the fee is burned. -- **Data Consumer**: Any application or user that queries a subgraph. +- **Data Consumer**: Any application or user that queries a Subgraph. -- **Subgraph Developer**: A developer who builds and deploys a subgraph to The Graph's decentralized network. +- **Subgraph Developer**: A developer who builds and deploys a Subgraph to The Graph's decentralized network. -- **Subgraph Manifest**: A YAML file that describes the subgraph's GraphQL schema, data sources, and other metadata. [Here](https://github.com/graphprotocol/example-subgraph/blob/master/subgraph.yaml) is an example. +- **Subgraph Manifest**: A YAML file that describes the Subgraph's GraphQL schema, data sources, and other metadata. [Here](https://github.com/graphprotocol/example-subgraph/blob/master/subgraph.yaml) is an example. - **Epoch**: A unit of time within the network. Currently, one epoch is 6,646 blocks or approximately 1 day. -- **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: +- **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards Subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: - 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. + 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular Subgraph. Active allocations accrue indexing rewards proportional to the signal on the Subgraph, and the amount of GRT allocated. - 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. + 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given Subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. -- **Subgraph Studio**: A powerful dapp for building, deploying, and publishing subgraphs. +- **Subgraph Studio**: A powerful dapp for building, deploying, and publishing Subgraphs. - **Fishermen**: A role within The Graph Network held by participants who monitor the accuracy and integrity of data served by Indexers. When a Fisherman identifies a query response or a POI they believe to be incorrect, they can initiate a dispute against the Indexer. If the dispute rules in favor of the Fisherman, the Indexer is slashed by losing 2.5% of their self-stake. Of this amount, 50% is awarded to the Fisherman as a bounty for their vigilance, and the remaining 50% is removed from circulation (burned). This mechanism is designed to encourage Fishermen to help maintain the reliability of the network by ensuring that Indexers are held accountable for the data they provide. @@ -56,28 +56,28 @@ title: 用語集 - **Slashing**: Indexers can have their self-staked GRT slashed for providing an incorrect POI or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self-stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. -- **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are distributed in GRT. +- **Indexing Rewards**: The rewards that Indexers receive for indexing Subgraphs. Indexing rewards are distributed in GRT. - **Delegation Rewards**: The rewards that Delegators receive for delegating GRT to Indexers. Delegation rewards are distributed in GRT. - **GRT**: The Graph's work utility token. GRT provides economic incentives to network participants for contributing to the network. -- **Proof of Indexing (POI)**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given subgraph, they must provide a valid and recent POI. Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. +- **Proof of Indexing (POI)**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given Subgraph, they must provide a valid and recent POI. Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. -- **Graph Node**: Graph Node is the component that indexes subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. +- **Graph Node**: Graph Node is the component that indexes Subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. -- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. +- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing Subgraph deployments to its Graph Node(s), and managing allocations. - **The Graph Client**: A library for building GraphQL-based dapps in a decentralized way. -- **Graph Explorer**: A dapp designed for network participants to explore subgraphs and interact with the protocol. +- **Graph Explorer**: A dapp designed for network participants to explore Subgraphs and interact with the protocol. - **Graph CLI**: A command line interface tool for building and deploying to The Graph. - **Cooldown Period**: The time remaining until an Indexer who changed their delegation parameters can do so again. -- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self-stake. +- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, Subgraphs, curation shares, and Indexer's self-stake. -- **Updating a subgraph**: The process of releasing a new subgraph version with updates to the subgraph's manifest, schema, or mappings. +- **Updating a Subgraph**: The process of releasing a new Subgraph version with updates to the Subgraph's manifest, schema, or mappings. -- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (e.g. when v0.0.1 is updated to v0.0.2). +- **Migrating**: The process of curation shares moving from an old version of a Subgraph to a new version of a Subgraph (e.g. when v0.0.1 is updated to v0.0.2). From 56ab846270a43dea97f38db8da89db180e149114 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:16:52 -0500 Subject: [PATCH 0480/1789] New translations glossary.mdx (Korean) --- website/src/pages/ko/resources/glossary.mdx | 50 ++++++++++----------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/website/src/pages/ko/resources/glossary.mdx b/website/src/pages/ko/resources/glossary.mdx index ffcd4bca2eed..4c5ad55cd0d3 100644 --- a/website/src/pages/ko/resources/glossary.mdx +++ b/website/src/pages/ko/resources/glossary.mdx @@ -4,51 +4,51 @@ title: Glossary - **The Graph**: A decentralized protocol for indexing and querying data. -- **Query**: A request for data. In the case of The Graph, a query is a request for data from a subgraph that will be answered by an Indexer. +- **Query**: A request for data. In the case of The Graph, a query is a request for data from a Subgraph that will be answered by an Indexer. -- **GraphQL**: A query language for APIs and a runtime for fulfilling those queries with your existing data. The Graph uses GraphQL to query subgraphs. +- **GraphQL**: A query language for APIs and a runtime for fulfilling those queries with your existing data. The Graph uses GraphQL to query Subgraphs. -- **Endpoint**: A URL that can be used to query a subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query subgraphs on The Graph's decentralized network. +- **Endpoint**: A URL that can be used to query a Subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query Subgraphs on The Graph's decentralized network. -- **Subgraph**: An open API that extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. Developers can build, deploy, and publish subgraphs to The Graph Network. Once it is indexed, the subgraph can be queried by anyone. +- **Subgraph**: An open API that extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. Developers can build, deploy, and publish Subgraphs to The Graph Network. Once it is indexed, the Subgraph can be queried by anyone. - **Indexer**: Network participants that run indexing nodes to index data from blockchains and serve GraphQL queries. - **Indexer Revenue Streams**: Indexers are rewarded in GRT with two components: query fee rebates and indexing rewards. - 1. **Query Fee Rebates**: Payments from subgraph consumers for serving queries on the network. + 1. **Query Fee Rebates**: Payments from Subgraph consumers for serving queries on the network. - 2. **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are generated via new issuance of 3% GRT annually. + 2. **Indexing Rewards**: The rewards that Indexers receive for indexing Subgraphs. Indexing rewards are generated via new issuance of 3% GRT annually. - **Indexer's Self-Stake**: The amount of GRT that Indexers stake to participate in the decentralized network. The minimum is 100,000 GRT, and there is no upper limit. - **Delegation Capacity**: The maximum amount of GRT an Indexer can accept from Delegators. Indexers can only accept up to 16x their Indexer Self-Stake, and additional delegation results in diluted rewards. For example, if an Indexer has a Self-Stake of 1M GRT, their delegation capacity is 16M. However, Indexers can increase their Delegation Capacity by increasing their Self-Stake. -- **Upgrade Indexer**: An Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. +- **Upgrade Indexer**: An Indexer designed to act as a fallback for Subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. -- **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing subgraphs. +- **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in Subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing Subgraphs. - **Delegation Tax**: A 0.5% fee paid by Delegators when they delegate GRT to Indexers. The GRT used to pay the fee is burned. -- **Curator**: Network participants that identify high-quality subgraphs, and signal GRT on them in exchange for curation shares. When Indexers claim query fees on a subgraph, 10% is distributed to the Curators of that subgraph. There is a positive correlation between the amount of GRT signaled and the number of Indexers indexing a subgraph. +- **Curator**: Network participants that identify high-quality Subgraphs, and signal GRT on them in exchange for curation shares. When Indexers claim query fees on a Subgraph, 10% is distributed to the Curators of that Subgraph. There is a positive correlation between the amount of GRT signaled and the number of Indexers indexing a Subgraph. -- **Curation Tax**: A 1% fee paid by Curators when they signal GRT on subgraphs. The GRT used to pay the fee is burned. +- **Curation Tax**: A 1% fee paid by Curators when they signal GRT on Subgraphs. The GRT used to pay the fee is burned. -- **Data Consumer**: Any application or user that queries a subgraph. +- **Data Consumer**: Any application or user that queries a Subgraph. -- **Subgraph Developer**: A developer who builds and deploys a subgraph to The Graph's decentralized network. +- **Subgraph Developer**: A developer who builds and deploys a Subgraph to The Graph's decentralized network. -- **Subgraph Manifest**: A YAML file that describes the subgraph's GraphQL schema, data sources, and other metadata. [Here](https://github.com/graphprotocol/example-subgraph/blob/master/subgraph.yaml) is an example. +- **Subgraph Manifest**: A YAML file that describes the Subgraph's GraphQL schema, data sources, and other metadata. [Here](https://github.com/graphprotocol/example-subgraph/blob/master/subgraph.yaml) is an example. - **Epoch**: A unit of time within the network. Currently, one epoch is 6,646 blocks or approximately 1 day. -- **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: +- **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards Subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: - 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. + 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular Subgraph. Active allocations accrue indexing rewards proportional to the signal on the Subgraph, and the amount of GRT allocated. - 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. + 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given Subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. -- **Subgraph Studio**: A powerful dapp for building, deploying, and publishing subgraphs. +- **Subgraph Studio**: A powerful dapp for building, deploying, and publishing Subgraphs. - **Fishermen**: A role within The Graph Network held by participants who monitor the accuracy and integrity of data served by Indexers. When a Fisherman identifies a query response or a POI they believe to be incorrect, they can initiate a dispute against the Indexer. If the dispute rules in favor of the Fisherman, the Indexer is slashed by losing 2.5% of their self-stake. Of this amount, 50% is awarded to the Fisherman as a bounty for their vigilance, and the remaining 50% is removed from circulation (burned). This mechanism is designed to encourage Fishermen to help maintain the reliability of the network by ensuring that Indexers are held accountable for the data they provide. @@ -56,28 +56,28 @@ title: Glossary - **Slashing**: Indexers can have their self-staked GRT slashed for providing an incorrect POI or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self-stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. -- **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are distributed in GRT. +- **Indexing Rewards**: The rewards that Indexers receive for indexing Subgraphs. Indexing rewards are distributed in GRT. - **Delegation Rewards**: The rewards that Delegators receive for delegating GRT to Indexers. Delegation rewards are distributed in GRT. - **GRT**: The Graph's work utility token. GRT provides economic incentives to network participants for contributing to the network. -- **Proof of Indexing (POI)**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given subgraph, they must provide a valid and recent POI. Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. +- **Proof of Indexing (POI)**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given Subgraph, they must provide a valid and recent POI. Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. -- **Graph Node**: Graph Node is the component that indexes subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. +- **Graph Node**: Graph Node is the component that indexes Subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. -- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. +- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing Subgraph deployments to its Graph Node(s), and managing allocations. - **The Graph Client**: A library for building GraphQL-based dapps in a decentralized way. -- **Graph Explorer**: A dapp designed for network participants to explore subgraphs and interact with the protocol. +- **Graph Explorer**: A dapp designed for network participants to explore Subgraphs and interact with the protocol. - **Graph CLI**: A command line interface tool for building and deploying to The Graph. - **Cooldown Period**: The time remaining until an Indexer who changed their delegation parameters can do so again. -- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self-stake. +- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, Subgraphs, curation shares, and Indexer's self-stake. -- **Updating a subgraph**: The process of releasing a new subgraph version with updates to the subgraph's manifest, schema, or mappings. +- **Updating a Subgraph**: The process of releasing a new Subgraph version with updates to the Subgraph's manifest, schema, or mappings. -- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (e.g. when v0.0.1 is updated to v0.0.2). +- **Migrating**: The process of curation shares moving from an old version of a Subgraph to a new version of a Subgraph (e.g. when v0.0.1 is updated to v0.0.2). From 01953fa96b040cdca1add6bd74c71692f2b1e8e9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:16:53 -0500 Subject: [PATCH 0481/1789] New translations glossary.mdx (Dutch) --- website/src/pages/nl/resources/glossary.mdx | 50 ++++++++++----------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/website/src/pages/nl/resources/glossary.mdx b/website/src/pages/nl/resources/glossary.mdx index ffcd4bca2eed..4c5ad55cd0d3 100644 --- a/website/src/pages/nl/resources/glossary.mdx +++ b/website/src/pages/nl/resources/glossary.mdx @@ -4,51 +4,51 @@ title: Glossary - **The Graph**: A decentralized protocol for indexing and querying data. -- **Query**: A request for data. In the case of The Graph, a query is a request for data from a subgraph that will be answered by an Indexer. +- **Query**: A request for data. In the case of The Graph, a query is a request for data from a Subgraph that will be answered by an Indexer. -- **GraphQL**: A query language for APIs and a runtime for fulfilling those queries with your existing data. The Graph uses GraphQL to query subgraphs. +- **GraphQL**: A query language for APIs and a runtime for fulfilling those queries with your existing data. The Graph uses GraphQL to query Subgraphs. -- **Endpoint**: A URL that can be used to query a subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query subgraphs on The Graph's decentralized network. +- **Endpoint**: A URL that can be used to query a Subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query Subgraphs on The Graph's decentralized network. -- **Subgraph**: An open API that extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. Developers can build, deploy, and publish subgraphs to The Graph Network. Once it is indexed, the subgraph can be queried by anyone. +- **Subgraph**: An open API that extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. Developers can build, deploy, and publish Subgraphs to The Graph Network. Once it is indexed, the Subgraph can be queried by anyone. - **Indexer**: Network participants that run indexing nodes to index data from blockchains and serve GraphQL queries. - **Indexer Revenue Streams**: Indexers are rewarded in GRT with two components: query fee rebates and indexing rewards. - 1. **Query Fee Rebates**: Payments from subgraph consumers for serving queries on the network. + 1. **Query Fee Rebates**: Payments from Subgraph consumers for serving queries on the network. - 2. **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are generated via new issuance of 3% GRT annually. + 2. **Indexing Rewards**: The rewards that Indexers receive for indexing Subgraphs. Indexing rewards are generated via new issuance of 3% GRT annually. - **Indexer's Self-Stake**: The amount of GRT that Indexers stake to participate in the decentralized network. The minimum is 100,000 GRT, and there is no upper limit. - **Delegation Capacity**: The maximum amount of GRT an Indexer can accept from Delegators. Indexers can only accept up to 16x their Indexer Self-Stake, and additional delegation results in diluted rewards. For example, if an Indexer has a Self-Stake of 1M GRT, their delegation capacity is 16M. However, Indexers can increase their Delegation Capacity by increasing their Self-Stake. -- **Upgrade Indexer**: An Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. +- **Upgrade Indexer**: An Indexer designed to act as a fallback for Subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. -- **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing subgraphs. +- **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in Subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing Subgraphs. - **Delegation Tax**: A 0.5% fee paid by Delegators when they delegate GRT to Indexers. The GRT used to pay the fee is burned. -- **Curator**: Network participants that identify high-quality subgraphs, and signal GRT on them in exchange for curation shares. When Indexers claim query fees on a subgraph, 10% is distributed to the Curators of that subgraph. There is a positive correlation between the amount of GRT signaled and the number of Indexers indexing a subgraph. +- **Curator**: Network participants that identify high-quality Subgraphs, and signal GRT on them in exchange for curation shares. When Indexers claim query fees on a Subgraph, 10% is distributed to the Curators of that Subgraph. There is a positive correlation between the amount of GRT signaled and the number of Indexers indexing a Subgraph. -- **Curation Tax**: A 1% fee paid by Curators when they signal GRT on subgraphs. The GRT used to pay the fee is burned. +- **Curation Tax**: A 1% fee paid by Curators when they signal GRT on Subgraphs. The GRT used to pay the fee is burned. -- **Data Consumer**: Any application or user that queries a subgraph. +- **Data Consumer**: Any application or user that queries a Subgraph. -- **Subgraph Developer**: A developer who builds and deploys a subgraph to The Graph's decentralized network. +- **Subgraph Developer**: A developer who builds and deploys a Subgraph to The Graph's decentralized network. -- **Subgraph Manifest**: A YAML file that describes the subgraph's GraphQL schema, data sources, and other metadata. [Here](https://github.com/graphprotocol/example-subgraph/blob/master/subgraph.yaml) is an example. +- **Subgraph Manifest**: A YAML file that describes the Subgraph's GraphQL schema, data sources, and other metadata. [Here](https://github.com/graphprotocol/example-subgraph/blob/master/subgraph.yaml) is an example. - **Epoch**: A unit of time within the network. Currently, one epoch is 6,646 blocks or approximately 1 day. -- **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: +- **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards Subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: - 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. + 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular Subgraph. Active allocations accrue indexing rewards proportional to the signal on the Subgraph, and the amount of GRT allocated. - 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. + 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given Subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. -- **Subgraph Studio**: A powerful dapp for building, deploying, and publishing subgraphs. +- **Subgraph Studio**: A powerful dapp for building, deploying, and publishing Subgraphs. - **Fishermen**: A role within The Graph Network held by participants who monitor the accuracy and integrity of data served by Indexers. When a Fisherman identifies a query response or a POI they believe to be incorrect, they can initiate a dispute against the Indexer. If the dispute rules in favor of the Fisherman, the Indexer is slashed by losing 2.5% of their self-stake. Of this amount, 50% is awarded to the Fisherman as a bounty for their vigilance, and the remaining 50% is removed from circulation (burned). This mechanism is designed to encourage Fishermen to help maintain the reliability of the network by ensuring that Indexers are held accountable for the data they provide. @@ -56,28 +56,28 @@ title: Glossary - **Slashing**: Indexers can have their self-staked GRT slashed for providing an incorrect POI or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self-stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. -- **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are distributed in GRT. +- **Indexing Rewards**: The rewards that Indexers receive for indexing Subgraphs. Indexing rewards are distributed in GRT. - **Delegation Rewards**: The rewards that Delegators receive for delegating GRT to Indexers. Delegation rewards are distributed in GRT. - **GRT**: The Graph's work utility token. GRT provides economic incentives to network participants for contributing to the network. -- **Proof of Indexing (POI)**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given subgraph, they must provide a valid and recent POI. Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. +- **Proof of Indexing (POI)**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given Subgraph, they must provide a valid and recent POI. Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. -- **Graph Node**: Graph Node is the component that indexes subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. +- **Graph Node**: Graph Node is the component that indexes Subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. -- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. +- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing Subgraph deployments to its Graph Node(s), and managing allocations. - **The Graph Client**: A library for building GraphQL-based dapps in a decentralized way. -- **Graph Explorer**: A dapp designed for network participants to explore subgraphs and interact with the protocol. +- **Graph Explorer**: A dapp designed for network participants to explore Subgraphs and interact with the protocol. - **Graph CLI**: A command line interface tool for building and deploying to The Graph. - **Cooldown Period**: The time remaining until an Indexer who changed their delegation parameters can do so again. -- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self-stake. +- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, Subgraphs, curation shares, and Indexer's self-stake. -- **Updating a subgraph**: The process of releasing a new subgraph version with updates to the subgraph's manifest, schema, or mappings. +- **Updating a Subgraph**: The process of releasing a new Subgraph version with updates to the Subgraph's manifest, schema, or mappings. -- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (e.g. when v0.0.1 is updated to v0.0.2). +- **Migrating**: The process of curation shares moving from an old version of a Subgraph to a new version of a Subgraph (e.g. when v0.0.1 is updated to v0.0.2). From e167af8308dbcf636e1ca4ef9ab075aaafd96be4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:16:54 -0500 Subject: [PATCH 0482/1789] New translations glossary.mdx (Polish) --- website/src/pages/pl/resources/glossary.mdx | 50 ++++++++++----------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/website/src/pages/pl/resources/glossary.mdx b/website/src/pages/pl/resources/glossary.mdx index ffcd4bca2eed..4c5ad55cd0d3 100644 --- a/website/src/pages/pl/resources/glossary.mdx +++ b/website/src/pages/pl/resources/glossary.mdx @@ -4,51 +4,51 @@ title: Glossary - **The Graph**: A decentralized protocol for indexing and querying data. -- **Query**: A request for data. In the case of The Graph, a query is a request for data from a subgraph that will be answered by an Indexer. +- **Query**: A request for data. In the case of The Graph, a query is a request for data from a Subgraph that will be answered by an Indexer. -- **GraphQL**: A query language for APIs and a runtime for fulfilling those queries with your existing data. The Graph uses GraphQL to query subgraphs. +- **GraphQL**: A query language for APIs and a runtime for fulfilling those queries with your existing data. The Graph uses GraphQL to query Subgraphs. -- **Endpoint**: A URL that can be used to query a subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query subgraphs on The Graph's decentralized network. +- **Endpoint**: A URL that can be used to query a Subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query Subgraphs on The Graph's decentralized network. -- **Subgraph**: An open API that extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. Developers can build, deploy, and publish subgraphs to The Graph Network. Once it is indexed, the subgraph can be queried by anyone. +- **Subgraph**: An open API that extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. Developers can build, deploy, and publish Subgraphs to The Graph Network. Once it is indexed, the Subgraph can be queried by anyone. - **Indexer**: Network participants that run indexing nodes to index data from blockchains and serve GraphQL queries. - **Indexer Revenue Streams**: Indexers are rewarded in GRT with two components: query fee rebates and indexing rewards. - 1. **Query Fee Rebates**: Payments from subgraph consumers for serving queries on the network. + 1. **Query Fee Rebates**: Payments from Subgraph consumers for serving queries on the network. - 2. **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are generated via new issuance of 3% GRT annually. + 2. **Indexing Rewards**: The rewards that Indexers receive for indexing Subgraphs. Indexing rewards are generated via new issuance of 3% GRT annually. - **Indexer's Self-Stake**: The amount of GRT that Indexers stake to participate in the decentralized network. The minimum is 100,000 GRT, and there is no upper limit. - **Delegation Capacity**: The maximum amount of GRT an Indexer can accept from Delegators. Indexers can only accept up to 16x their Indexer Self-Stake, and additional delegation results in diluted rewards. For example, if an Indexer has a Self-Stake of 1M GRT, their delegation capacity is 16M. However, Indexers can increase their Delegation Capacity by increasing their Self-Stake. -- **Upgrade Indexer**: An Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. +- **Upgrade Indexer**: An Indexer designed to act as a fallback for Subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. -- **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing subgraphs. +- **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in Subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing Subgraphs. - **Delegation Tax**: A 0.5% fee paid by Delegators when they delegate GRT to Indexers. The GRT used to pay the fee is burned. -- **Curator**: Network participants that identify high-quality subgraphs, and signal GRT on them in exchange for curation shares. When Indexers claim query fees on a subgraph, 10% is distributed to the Curators of that subgraph. There is a positive correlation between the amount of GRT signaled and the number of Indexers indexing a subgraph. +- **Curator**: Network participants that identify high-quality Subgraphs, and signal GRT on them in exchange for curation shares. When Indexers claim query fees on a Subgraph, 10% is distributed to the Curators of that Subgraph. There is a positive correlation between the amount of GRT signaled and the number of Indexers indexing a Subgraph. -- **Curation Tax**: A 1% fee paid by Curators when they signal GRT on subgraphs. The GRT used to pay the fee is burned. +- **Curation Tax**: A 1% fee paid by Curators when they signal GRT on Subgraphs. The GRT used to pay the fee is burned. -- **Data Consumer**: Any application or user that queries a subgraph. +- **Data Consumer**: Any application or user that queries a Subgraph. -- **Subgraph Developer**: A developer who builds and deploys a subgraph to The Graph's decentralized network. +- **Subgraph Developer**: A developer who builds and deploys a Subgraph to The Graph's decentralized network. -- **Subgraph Manifest**: A YAML file that describes the subgraph's GraphQL schema, data sources, and other metadata. [Here](https://github.com/graphprotocol/example-subgraph/blob/master/subgraph.yaml) is an example. +- **Subgraph Manifest**: A YAML file that describes the Subgraph's GraphQL schema, data sources, and other metadata. [Here](https://github.com/graphprotocol/example-subgraph/blob/master/subgraph.yaml) is an example. - **Epoch**: A unit of time within the network. Currently, one epoch is 6,646 blocks or approximately 1 day. -- **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: +- **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards Subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: - 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. + 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular Subgraph. Active allocations accrue indexing rewards proportional to the signal on the Subgraph, and the amount of GRT allocated. - 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. + 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given Subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. -- **Subgraph Studio**: A powerful dapp for building, deploying, and publishing subgraphs. +- **Subgraph Studio**: A powerful dapp for building, deploying, and publishing Subgraphs. - **Fishermen**: A role within The Graph Network held by participants who monitor the accuracy and integrity of data served by Indexers. When a Fisherman identifies a query response or a POI they believe to be incorrect, they can initiate a dispute against the Indexer. If the dispute rules in favor of the Fisherman, the Indexer is slashed by losing 2.5% of their self-stake. Of this amount, 50% is awarded to the Fisherman as a bounty for their vigilance, and the remaining 50% is removed from circulation (burned). This mechanism is designed to encourage Fishermen to help maintain the reliability of the network by ensuring that Indexers are held accountable for the data they provide. @@ -56,28 +56,28 @@ title: Glossary - **Slashing**: Indexers can have their self-staked GRT slashed for providing an incorrect POI or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self-stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. -- **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are distributed in GRT. +- **Indexing Rewards**: The rewards that Indexers receive for indexing Subgraphs. Indexing rewards are distributed in GRT. - **Delegation Rewards**: The rewards that Delegators receive for delegating GRT to Indexers. Delegation rewards are distributed in GRT. - **GRT**: The Graph's work utility token. GRT provides economic incentives to network participants for contributing to the network. -- **Proof of Indexing (POI)**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given subgraph, they must provide a valid and recent POI. Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. +- **Proof of Indexing (POI)**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given Subgraph, they must provide a valid and recent POI. Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. -- **Graph Node**: Graph Node is the component that indexes subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. +- **Graph Node**: Graph Node is the component that indexes Subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. -- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. +- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing Subgraph deployments to its Graph Node(s), and managing allocations. - **The Graph Client**: A library for building GraphQL-based dapps in a decentralized way. -- **Graph Explorer**: A dapp designed for network participants to explore subgraphs and interact with the protocol. +- **Graph Explorer**: A dapp designed for network participants to explore Subgraphs and interact with the protocol. - **Graph CLI**: A command line interface tool for building and deploying to The Graph. - **Cooldown Period**: The time remaining until an Indexer who changed their delegation parameters can do so again. -- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self-stake. +- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, Subgraphs, curation shares, and Indexer's self-stake. -- **Updating a subgraph**: The process of releasing a new subgraph version with updates to the subgraph's manifest, schema, or mappings. +- **Updating a Subgraph**: The process of releasing a new Subgraph version with updates to the Subgraph's manifest, schema, or mappings. -- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (e.g. when v0.0.1 is updated to v0.0.2). +- **Migrating**: The process of curation shares moving from an old version of a Subgraph to a new version of a Subgraph (e.g. when v0.0.1 is updated to v0.0.2). From 58a405870ae52c2fbf14b2be149184a724936505 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:16:55 -0500 Subject: [PATCH 0483/1789] New translations glossary.mdx (Portuguese) --- website/src/pages/pt/resources/glossary.mdx | 50 ++++++++++----------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/website/src/pages/pt/resources/glossary.mdx b/website/src/pages/pt/resources/glossary.mdx index 4660c4d00ecf..d075e63e2c25 100644 --- a/website/src/pages/pt/resources/glossary.mdx +++ b/website/src/pages/pt/resources/glossary.mdx @@ -4,51 +4,51 @@ title: Glossário - **The Graph:** Um protocolo descentralizado para indexação e query de dados. -- **Query:** Uma solicitação de dados. No The Graph, um query é uma solicitação por dados de um subgraph que será respondida por um Indexador. +- **Query**: A request for data. In the case of The Graph, a query is a request for data from a Subgraph that will be answered by an Indexer. -- **GraphQL:** Uma linguagem de queries para APIs e um runtime (programa de execução) para realizar esses queries com os dados existentes. O The Graph usa a GraphQL para fazer queries de subgraphs. +- **GraphQL**: A query language for APIs and a runtime for fulfilling those queries with your existing data. The Graph uses GraphQL to query Subgraphs. -- **Endpoint**: Um URL que pode ser usado para fazer queries. O ponto final de execução para o Subgraph Studio é `https://api.studio.thegraph.com/query///`, e o do Graph Explorer é `https://gateway.thegraph.com/api//subgraphs/id/`. O ponto final do Graph Explorer é usado para fazer queries de subgraphs na rede descentralizada do The Graph. +- **Endpoint**: A URL that can be used to query a Subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query Subgraphs on The Graph's decentralized network. -- **Subgraph:** Uma API aberta que extrai, processa, e guarda dados de uma blockchain para facilitar queries via a GraphQL. Os programadores podem construir, lançar, e editar subgraphs na The Graph Network. Indexado, o subgraph está sujeito a queries por quem quiser solicitar. +- **Subgraph**: An open API that extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. Developers can build, deploy, and publish Subgraphs to The Graph Network. Once it is indexed, the Subgraph can be queried by anyone. - **Indexador**: Um participante da rede que executa nodes de indexação para indexar dados de blockchains e servir queries da GraphQL. - **Fluxos de Receita de Indexadores:** Os Indexadores são recompensados em GRT com dois componentes: Rebates de taxa de query e recompensas de indexação. - 1. **Rebates de Taxa de Query**: Pagamentos de consumidores de subgraphs por servir queries na rede. + 1. **Query Fee Rebates**: Payments from Subgraph consumers for serving queries on the network. - 2. **Recompensas de Indexação**: São recebidas por Indexadores por indexar subgraphs, e geradas via a emissão anual de 3% de GRT. + 2. **Indexing Rewards**: The rewards that Indexers receive for indexing Subgraphs. Indexing rewards are generated via new issuance of 3% GRT annually. - \*\*Auto-Stake (Stake Próprio) do Indexador: A quantia de GRT que os Indexadores usam para participar na rede descentralizada. A quantia mínima é 100.000 GRT, e não há limite máximo. - **Capacidade de Delegação**: A quantia máxima de GRT que um Indexador pode aceitar dos Delegantes. Os Indexadores só podem aceitar até 16 vezes o seu Auto-Stake, e mais delegações resultam em recompensas diluídas. Por exemplo: se um Indexador tem um Auto-Stake de 1 milhão de GRT, a capacidade de delegação é 16 milhões. Porém, os Indexadores só podem aumentar a sua Capacidade de Delegação se aumentarem também o seu Auto-Stake. -- **Indexador de Atualizações**: Um Indexador de reserva para queries não servidos por outros Indexadores na rede. Este Indexador não compete com outros Indexadores. +- **Upgrade Indexer**: An Indexer designed to act as a fallback for Subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. -- **Delegante:** Um participante da rede que possui GRT e delega uma quantia para Indexadores, permitindo que esses aumentem o seu stake em subgraphs. Em retorno, os Delegantes recebem uma porção das Recompensas de Indexação recebidas pelos Indexadores por processar subgraphs. +- **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in Subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing Subgraphs. - **Taxa de Delegação**: Uma taxa de 0,5% paga por Delegantes quando delegam GRT a Indexadores. O GRT usado para pagar a taxa é queimado. -- **Curador:** Um participante da rede que identifica subgraphs de qualidade e sinaliza GRT para eles em troca de ações de curadoria. Quando os Indexadores resgatam as taxas de query em um subgraph, 10% é distribuído para os Curadores desse subgraph. Há uma correlação positiva entre a quantia de GRT sinalizada e o número de Indexadores a indexar um subgraph. +- **Curator**: Network participants that identify high-quality Subgraphs, and signal GRT on them in exchange for curation shares. When Indexers claim query fees on a Subgraph, 10% is distributed to the Curators of that Subgraph. There is a positive correlation between the amount of GRT signaled and the number of Indexers indexing a Subgraph. -- \*\*Taxa de Curadoria: Uma taxa de 1% paga pelos Curadores quando sinalizam GRT em subgraphs. O GRT usado para pagar a taxa é queimado. +- **Curation Tax**: A 1% fee paid by Curators when they signal GRT on Subgraphs. The GRT used to pay the fee is burned. -- Consumidor de Dados: Qualquer aplicativo ou utilizador que faz queries para um subgraph. +- **Data Consumer**: Any application or user that queries a Subgraph. -- \*\*Programador de Subgraph: Um programador que constrói e lança um subgraph à rede descentralizada do The Graph. +- **Subgraph Developer**: A developer who builds and deploys a Subgraph to The Graph's decentralized network. -- **Manifest de Subgraph:** Um arquivo YAML que descreve o schema, fontes de dados, e outros metadados de um subgraph. [Veja um exemplo](https://github.com/graphprotocol/example-subgraph/blob/master/subgraph.yaml). +- **Subgraph Manifest**: A YAML file that describes the Subgraph's GraphQL schema, data sources, and other metadata. [Here](https://github.com/graphprotocol/example-subgraph/blob/master/subgraph.yaml) is an example. - **Epoch:** Uma unidade de tempo na rede. Um epoch atualmente dura 6.646 blocos, ou cerca de um dia. -- \*\*Alocação: Um Indexador pode alocar o seu stake total em GRT (incluindo o stake dos Delegantes) em subgraphs editados na rede descentralizada do The Graph. As alocações podem ter estados diferentes: +- **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards Subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: - 1. **Ativa:** Uma alocação é considerada ativa quando é criada on-chain. Isto se chama abrir uma alocação, e indica à rede que o Indexador está a indexar e servir consultas ativamente para um subgraph particular. Alocações ativas acumulam recompensas de indexação proporcionais ao sinal no subgraph, e à quantidade de GRT alocada. + 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular Subgraph. Active allocations accrue indexing rewards proportional to the signal on the Subgraph, and the amount of GRT allocated. - 2. **Fechada**: Um Indexador pode resgatar as recompensas acumuladas em um subgraph selecionado ao enviar uma Prova de Indexação (POI) recente e válida. Isto se chama "fechar uma alocação". Uma alocação deve ter ficado aberta por, no mínimo, um epoch antes que possa ser fechada. O período máximo de alocação é de 28 epochs; se um indexador deixar uma alocação aberta por mais que isso, ela se torna uma alocação obsoleta. Quando uma alocação está **Fechada**, um Pescador ainda pode abrir uma disputa contra um Indexador por servir dados falsos. + 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given Subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. -- **Subgraph Studio**: um dApp (aplicativo descentralizado) poderoso para a construção, lançamento e edição de subgraphs. +- **Subgraph Studio**: A powerful dapp for building, deploying, and publishing Subgraphs. - **Pescadores**: Um papel na Graph Network cumprido por participantes que monitoram a precisão e integridade dos dados servidos pelos Indexadores. Quando um Pescador identifica uma resposta de query ou uma POI que acreditam ser incorreta, ele pode iniciar uma disputa contra o Indexador. Se a disputa der um veredito a favor do Pescador, o Indexador é cortado, ou seja, perderá 2.5% do seu auto-stake de GRT. Desta quantidade, 50% é dado ao Pescador como recompensa pela sua vigilância, e os 50% restantes são retirados da circulação (queimados). Este mecanismo é desenhado para encorajar Pescadores a ajudar a manter a confiança na rede ao garantir que Indexadores sejam responsabilizados pelos dados que providenciam. @@ -56,28 +56,28 @@ title: Glossário - Corte: Os Indexadores podem tomar cortes no seu self-stake de GRT por fornecer uma prova de indexação (POI) incorreta ou servir dados imprecisos. A percentagem de corte é um parâmetro do protocolo, atualmente configurado em 2,5% do auto-stake de um Indexador. 50% do GRT cortado vai ao Pescador que disputou os dados ou POI incorretos. Os outros 50% são queimados. -- **Recompensas de Indexação**: As recompensas que os Indexadores recebem por indexar subgraphs, distribuídas em GRT. +- **Indexing Rewards**: The rewards that Indexers receive for indexing Subgraphs. Indexing rewards are distributed in GRT. - **Recompensas de Delegação**: As recompensas que os Delegantes recebem por delegar GRT a Indexadores, distribuídas em GRT. - **GRT**: O token de utilidade do The Graph, que oferece incentivos económicos a participantes da rede por contribuir. -- **POI (Prova de Indexação)**: Quando um Indexador fecha a sua alocação e quer resgatar as suas recompensas de indexação acumuladas em um certo subgraph, ele deve apresentar uma Prova de Indexação (POI) válida e recente. Os Pescadores podem disputar a POI providenciada por um Indexador; disputas resolvidas a favor do Pescador causam um corte para o Indexador. +- **Proof of Indexing (POI)**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given Subgraph, they must provide a valid and recent POI. Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. -- **Graph Node**: O componente que indexa subgraphs e disponibiliza os dados resultantes abertos a queries através de uma API GraphQL. Assim, ele é essencial ao stack de indexadores, e operações corretas de um Graph Node são cruciais para executar um indexador com êxito. +- **Graph Node**: Graph Node is the component that indexes Subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. -- **Agente de Indexador**: Parte do stack do indexador. Ele facilita as interações do Indexer on-chain, inclusive registos na rede, gestão de lançamentos de Subgraph ao(s) seu(s) Graph Node(s), e gestão de alocações. +- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing Subgraph deployments to its Graph Node(s), and managing allocations. - **The Graph Client**: Uma biblioteca para construir dApps baseados em GraphQL de maneira descentralizada. -- **Graph Explorer**: Um dApp desenhado para que participantes da rede explorem subgraphs e interajam com o protocolo. +- **Graph Explorer**: A dapp designed for network participants to explore Subgraphs and interact with the protocol. - **Graph CLI**: Uma ferramenta de interface de comando de linha para construções e lançamentos no The Graph. - **Período de Recarga**: O tempo restante até que um Indexador que mudou os seus parâmetros de delegação possa fazê-lo novamente. -- Ferramentas de Transferência para L2: Contratos inteligentes e interfaces que permitem que os participantes na rede transfiram ativos relacionados à rede da mainnet da Ethereum ao Arbitrum One. Os participantes podem transferir GRT delegado, subgraphs, ações de curadoria, e o Auto-Stake do Indexador. +- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, Subgraphs, curation shares, and Indexer's self-stake. -- **Atualização de um subgraph**: O processo de lançar uma nova versão de subgraph com atualizações ao manifest, schema e mapeamentos do subgraph. +- **Updating a Subgraph**: The process of releasing a new Subgraph version with updates to the Subgraph's manifest, schema, or mappings. -- **Migração**: O processo de movimentar ações de curadoria da versão antiga de um subgraph à versão nova do mesmo (por ex., quando a v.0.0.1 é atualizada à v.0.0.2). +- **Migrating**: The process of curation shares moving from an old version of a Subgraph to a new version of a Subgraph (e.g. when v0.0.1 is updated to v0.0.2). From 6b187a76a518975851bd9f568013d3c917ef33b8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:16:56 -0500 Subject: [PATCH 0484/1789] New translations glossary.mdx (Russian) --- website/src/pages/ru/resources/glossary.mdx | 50 ++++++++++----------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/website/src/pages/ru/resources/glossary.mdx b/website/src/pages/ru/resources/glossary.mdx index ffcd4bca2eed..4c5ad55cd0d3 100644 --- a/website/src/pages/ru/resources/glossary.mdx +++ b/website/src/pages/ru/resources/glossary.mdx @@ -4,51 +4,51 @@ title: Glossary - **The Graph**: A decentralized protocol for indexing and querying data. -- **Query**: A request for data. In the case of The Graph, a query is a request for data from a subgraph that will be answered by an Indexer. +- **Query**: A request for data. In the case of The Graph, a query is a request for data from a Subgraph that will be answered by an Indexer. -- **GraphQL**: A query language for APIs and a runtime for fulfilling those queries with your existing data. The Graph uses GraphQL to query subgraphs. +- **GraphQL**: A query language for APIs and a runtime for fulfilling those queries with your existing data. The Graph uses GraphQL to query Subgraphs. -- **Endpoint**: A URL that can be used to query a subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query subgraphs on The Graph's decentralized network. +- **Endpoint**: A URL that can be used to query a Subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query Subgraphs on The Graph's decentralized network. -- **Subgraph**: An open API that extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. Developers can build, deploy, and publish subgraphs to The Graph Network. Once it is indexed, the subgraph can be queried by anyone. +- **Subgraph**: An open API that extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. Developers can build, deploy, and publish Subgraphs to The Graph Network. Once it is indexed, the Subgraph can be queried by anyone. - **Indexer**: Network participants that run indexing nodes to index data from blockchains and serve GraphQL queries. - **Indexer Revenue Streams**: Indexers are rewarded in GRT with two components: query fee rebates and indexing rewards. - 1. **Query Fee Rebates**: Payments from subgraph consumers for serving queries on the network. + 1. **Query Fee Rebates**: Payments from Subgraph consumers for serving queries on the network. - 2. **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are generated via new issuance of 3% GRT annually. + 2. **Indexing Rewards**: The rewards that Indexers receive for indexing Subgraphs. Indexing rewards are generated via new issuance of 3% GRT annually. - **Indexer's Self-Stake**: The amount of GRT that Indexers stake to participate in the decentralized network. The minimum is 100,000 GRT, and there is no upper limit. - **Delegation Capacity**: The maximum amount of GRT an Indexer can accept from Delegators. Indexers can only accept up to 16x their Indexer Self-Stake, and additional delegation results in diluted rewards. For example, if an Indexer has a Self-Stake of 1M GRT, their delegation capacity is 16M. However, Indexers can increase their Delegation Capacity by increasing their Self-Stake. -- **Upgrade Indexer**: An Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. +- **Upgrade Indexer**: An Indexer designed to act as a fallback for Subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. -- **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing subgraphs. +- **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in Subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing Subgraphs. - **Delegation Tax**: A 0.5% fee paid by Delegators when they delegate GRT to Indexers. The GRT used to pay the fee is burned. -- **Curator**: Network participants that identify high-quality subgraphs, and signal GRT on them in exchange for curation shares. When Indexers claim query fees on a subgraph, 10% is distributed to the Curators of that subgraph. There is a positive correlation between the amount of GRT signaled and the number of Indexers indexing a subgraph. +- **Curator**: Network participants that identify high-quality Subgraphs, and signal GRT on them in exchange for curation shares. When Indexers claim query fees on a Subgraph, 10% is distributed to the Curators of that Subgraph. There is a positive correlation between the amount of GRT signaled and the number of Indexers indexing a Subgraph. -- **Curation Tax**: A 1% fee paid by Curators when they signal GRT on subgraphs. The GRT used to pay the fee is burned. +- **Curation Tax**: A 1% fee paid by Curators when they signal GRT on Subgraphs. The GRT used to pay the fee is burned. -- **Data Consumer**: Any application or user that queries a subgraph. +- **Data Consumer**: Any application or user that queries a Subgraph. -- **Subgraph Developer**: A developer who builds and deploys a subgraph to The Graph's decentralized network. +- **Subgraph Developer**: A developer who builds and deploys a Subgraph to The Graph's decentralized network. -- **Subgraph Manifest**: A YAML file that describes the subgraph's GraphQL schema, data sources, and other metadata. [Here](https://github.com/graphprotocol/example-subgraph/blob/master/subgraph.yaml) is an example. +- **Subgraph Manifest**: A YAML file that describes the Subgraph's GraphQL schema, data sources, and other metadata. [Here](https://github.com/graphprotocol/example-subgraph/blob/master/subgraph.yaml) is an example. - **Epoch**: A unit of time within the network. Currently, one epoch is 6,646 blocks or approximately 1 day. -- **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: +- **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards Subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: - 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. + 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular Subgraph. Active allocations accrue indexing rewards proportional to the signal on the Subgraph, and the amount of GRT allocated. - 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. + 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given Subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. -- **Subgraph Studio**: A powerful dapp for building, deploying, and publishing subgraphs. +- **Subgraph Studio**: A powerful dapp for building, deploying, and publishing Subgraphs. - **Fishermen**: A role within The Graph Network held by participants who monitor the accuracy and integrity of data served by Indexers. When a Fisherman identifies a query response or a POI they believe to be incorrect, they can initiate a dispute against the Indexer. If the dispute rules in favor of the Fisherman, the Indexer is slashed by losing 2.5% of their self-stake. Of this amount, 50% is awarded to the Fisherman as a bounty for their vigilance, and the remaining 50% is removed from circulation (burned). This mechanism is designed to encourage Fishermen to help maintain the reliability of the network by ensuring that Indexers are held accountable for the data they provide. @@ -56,28 +56,28 @@ title: Glossary - **Slashing**: Indexers can have their self-staked GRT slashed for providing an incorrect POI or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self-stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. -- **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are distributed in GRT. +- **Indexing Rewards**: The rewards that Indexers receive for indexing Subgraphs. Indexing rewards are distributed in GRT. - **Delegation Rewards**: The rewards that Delegators receive for delegating GRT to Indexers. Delegation rewards are distributed in GRT. - **GRT**: The Graph's work utility token. GRT provides economic incentives to network participants for contributing to the network. -- **Proof of Indexing (POI)**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given subgraph, they must provide a valid and recent POI. Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. +- **Proof of Indexing (POI)**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given Subgraph, they must provide a valid and recent POI. Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. -- **Graph Node**: Graph Node is the component that indexes subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. +- **Graph Node**: Graph Node is the component that indexes Subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. -- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. +- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing Subgraph deployments to its Graph Node(s), and managing allocations. - **The Graph Client**: A library for building GraphQL-based dapps in a decentralized way. -- **Graph Explorer**: A dapp designed for network participants to explore subgraphs and interact with the protocol. +- **Graph Explorer**: A dapp designed for network participants to explore Subgraphs and interact with the protocol. - **Graph CLI**: A command line interface tool for building and deploying to The Graph. - **Cooldown Period**: The time remaining until an Indexer who changed their delegation parameters can do so again. -- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self-stake. +- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, Subgraphs, curation shares, and Indexer's self-stake. -- **Updating a subgraph**: The process of releasing a new subgraph version with updates to the subgraph's manifest, schema, or mappings. +- **Updating a Subgraph**: The process of releasing a new Subgraph version with updates to the Subgraph's manifest, schema, or mappings. -- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (e.g. when v0.0.1 is updated to v0.0.2). +- **Migrating**: The process of curation shares moving from an old version of a Subgraph to a new version of a Subgraph (e.g. when v0.0.1 is updated to v0.0.2). From 47b21a251d52ce7bce27a171f34535d5c0f49923 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:16:57 -0500 Subject: [PATCH 0485/1789] New translations glossary.mdx (Swedish) --- website/src/pages/sv/resources/glossary.mdx | 50 ++++++++++----------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/website/src/pages/sv/resources/glossary.mdx b/website/src/pages/sv/resources/glossary.mdx index dd930819456b..72ab2ba9333a 100644 --- a/website/src/pages/sv/resources/glossary.mdx +++ b/website/src/pages/sv/resources/glossary.mdx @@ -4,51 +4,51 @@ title: Ordlista - **The Graph**: A decentralized protocol for indexing and querying data. -- **Query**: A request for data. In the case of The Graph, a query is a request for data from a subgraph that will be answered by an Indexer. +- **Query**: A request for data. In the case of The Graph, a query is a request for data from a Subgraph that will be answered by an Indexer. -- **GraphQL**: A query language for APIs and a runtime for fulfilling those queries with your existing data. The Graph uses GraphQL to query subgraphs. +- **GraphQL**: A query language for APIs and a runtime for fulfilling those queries with your existing data. The Graph uses GraphQL to query Subgraphs. -- **Endpoint**: A URL that can be used to query a subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query subgraphs on The Graph's decentralized network. +- **Endpoint**: A URL that can be used to query a Subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query Subgraphs on The Graph's decentralized network. -- **Subgraph**: An open API that extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. Developers can build, deploy, and publish subgraphs to The Graph Network. Once it is indexed, the subgraph can be queried by anyone. +- **Subgraph**: An open API that extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. Developers can build, deploy, and publish Subgraphs to The Graph Network. Once it is indexed, the Subgraph can be queried by anyone. - **Indexer**: Network participants that run indexing nodes to index data from blockchains and serve GraphQL queries. - **Indexer Revenue Streams**: Indexers are rewarded in GRT with two components: query fee rebates and indexing rewards. - 1. **Query Fee Rebates**: Payments from subgraph consumers for serving queries on the network. + 1. **Query Fee Rebates**: Payments from Subgraph consumers for serving queries on the network. - 2. **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are generated via new issuance of 3% GRT annually. + 2. **Indexing Rewards**: The rewards that Indexers receive for indexing Subgraphs. Indexing rewards are generated via new issuance of 3% GRT annually. - **Indexer's Self-Stake**: The amount of GRT that Indexers stake to participate in the decentralized network. The minimum is 100,000 GRT, and there is no upper limit. - **Delegation Capacity**: The maximum amount of GRT an Indexer can accept from Delegators. Indexers can only accept up to 16x their Indexer Self-Stake, and additional delegation results in diluted rewards. For example, if an Indexer has a Self-Stake of 1M GRT, their delegation capacity is 16M. However, Indexers can increase their Delegation Capacity by increasing their Self-Stake. -- **Upgrade Indexer**: An Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. +- **Upgrade Indexer**: An Indexer designed to act as a fallback for Subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. -- **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing subgraphs. +- **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in Subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing Subgraphs. - **Delegation Tax**: A 0.5% fee paid by Delegators when they delegate GRT to Indexers. The GRT used to pay the fee is burned. -- **Curator**: Network participants that identify high-quality subgraphs, and signal GRT on them in exchange for curation shares. When Indexers claim query fees on a subgraph, 10% is distributed to the Curators of that subgraph. There is a positive correlation between the amount of GRT signaled and the number of Indexers indexing a subgraph. +- **Curator**: Network participants that identify high-quality Subgraphs, and signal GRT on them in exchange for curation shares. When Indexers claim query fees on a Subgraph, 10% is distributed to the Curators of that Subgraph. There is a positive correlation between the amount of GRT signaled and the number of Indexers indexing a Subgraph. -- **Curation Tax**: A 1% fee paid by Curators when they signal GRT on subgraphs. The GRT used to pay the fee is burned. +- **Curation Tax**: A 1% fee paid by Curators when they signal GRT on Subgraphs. The GRT used to pay the fee is burned. -- **Data Consumer**: Any application or user that queries a subgraph. +- **Data Consumer**: Any application or user that queries a Subgraph. -- **Subgraph Developer**: A developer who builds and deploys a subgraph to The Graph's decentralized network. +- **Subgraph Developer**: A developer who builds and deploys a Subgraph to The Graph's decentralized network. -- **Subgraph Manifest**: A YAML file that describes the subgraph's GraphQL schema, data sources, and other metadata. [Here](https://github.com/graphprotocol/example-subgraph/blob/master/subgraph.yaml) is an example. +- **Subgraph Manifest**: A YAML file that describes the Subgraph's GraphQL schema, data sources, and other metadata. [Here](https://github.com/graphprotocol/example-subgraph/blob/master/subgraph.yaml) is an example. - **Epoch**: A unit of time within the network. Currently, one epoch is 6,646 blocks or approximately 1 day. -- **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: +- **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards Subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: - 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. + 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular Subgraph. Active allocations accrue indexing rewards proportional to the signal on the Subgraph, and the amount of GRT allocated. - 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. + 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given Subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. -- **Subgraph Studio**: A powerful dapp for building, deploying, and publishing subgraphs. +- **Subgraph Studio**: A powerful dapp for building, deploying, and publishing Subgraphs. - **Fishermen**: A role within The Graph Network held by participants who monitor the accuracy and integrity of data served by Indexers. When a Fisherman identifies a query response or a POI they believe to be incorrect, they can initiate a dispute against the Indexer. If the dispute rules in favor of the Fisherman, the Indexer is slashed by losing 2.5% of their self-stake. Of this amount, 50% is awarded to the Fisherman as a bounty for their vigilance, and the remaining 50% is removed from circulation (burned). This mechanism is designed to encourage Fishermen to help maintain the reliability of the network by ensuring that Indexers are held accountable for the data they provide. @@ -56,28 +56,28 @@ title: Ordlista - **Slashing**: Indexers can have their self-staked GRT slashed for providing an incorrect POI or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self-stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. -- **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are distributed in GRT. +- **Indexing Rewards**: The rewards that Indexers receive for indexing Subgraphs. Indexing rewards are distributed in GRT. - **Delegation Rewards**: The rewards that Delegators receive for delegating GRT to Indexers. Delegation rewards are distributed in GRT. - **GRT**: The Graph's work utility token. GRT provides economic incentives to network participants for contributing to the network. -- **Proof of Indexing (POI)**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given subgraph, they must provide a valid and recent POI. Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. +- **Proof of Indexing (POI)**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given Subgraph, they must provide a valid and recent POI. Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. -- **Graph Node**: Graph Node is the component that indexes subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. +- **Graph Node**: Graph Node is the component that indexes Subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. -- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. +- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing Subgraph deployments to its Graph Node(s), and managing allocations. - **The Graph Client**: A library for building GraphQL-based dapps in a decentralized way. -- **Graph Explorer**: A dapp designed for network participants to explore subgraphs and interact with the protocol. +- **Graph Explorer**: A dapp designed for network participants to explore Subgraphs and interact with the protocol. - **Graph CLI**: A command line interface tool for building and deploying to The Graph. - **Cooldown Period**: The time remaining until an Indexer who changed their delegation parameters can do so again. -- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self-stake. +- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, Subgraphs, curation shares, and Indexer's self-stake. -- **Updating a subgraph**: The process of releasing a new subgraph version with updates to the subgraph's manifest, schema, or mappings. +- **Updating a Subgraph**: The process of releasing a new Subgraph version with updates to the Subgraph's manifest, schema, or mappings. -- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (e.g. when v0.0.1 is updated to v0.0.2). +- **Migrating**: The process of curation shares moving from an old version of a Subgraph to a new version of a Subgraph (e.g. when v0.0.1 is updated to v0.0.2). From 6f11057ad51b94c30e6bec0f7a5f5778e252337c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:16:58 -0500 Subject: [PATCH 0486/1789] New translations glossary.mdx (Turkish) --- website/src/pages/tr/resources/glossary.mdx | 50 ++++++++++----------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/website/src/pages/tr/resources/glossary.mdx b/website/src/pages/tr/resources/glossary.mdx index ffcd4bca2eed..4c5ad55cd0d3 100644 --- a/website/src/pages/tr/resources/glossary.mdx +++ b/website/src/pages/tr/resources/glossary.mdx @@ -4,51 +4,51 @@ title: Glossary - **The Graph**: A decentralized protocol for indexing and querying data. -- **Query**: A request for data. In the case of The Graph, a query is a request for data from a subgraph that will be answered by an Indexer. +- **Query**: A request for data. In the case of The Graph, a query is a request for data from a Subgraph that will be answered by an Indexer. -- **GraphQL**: A query language for APIs and a runtime for fulfilling those queries with your existing data. The Graph uses GraphQL to query subgraphs. +- **GraphQL**: A query language for APIs and a runtime for fulfilling those queries with your existing data. The Graph uses GraphQL to query Subgraphs. -- **Endpoint**: A URL that can be used to query a subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query subgraphs on The Graph's decentralized network. +- **Endpoint**: A URL that can be used to query a Subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query Subgraphs on The Graph's decentralized network. -- **Subgraph**: An open API that extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. Developers can build, deploy, and publish subgraphs to The Graph Network. Once it is indexed, the subgraph can be queried by anyone. +- **Subgraph**: An open API that extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. Developers can build, deploy, and publish Subgraphs to The Graph Network. Once it is indexed, the Subgraph can be queried by anyone. - **Indexer**: Network participants that run indexing nodes to index data from blockchains and serve GraphQL queries. - **Indexer Revenue Streams**: Indexers are rewarded in GRT with two components: query fee rebates and indexing rewards. - 1. **Query Fee Rebates**: Payments from subgraph consumers for serving queries on the network. + 1. **Query Fee Rebates**: Payments from Subgraph consumers for serving queries on the network. - 2. **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are generated via new issuance of 3% GRT annually. + 2. **Indexing Rewards**: The rewards that Indexers receive for indexing Subgraphs. Indexing rewards are generated via new issuance of 3% GRT annually. - **Indexer's Self-Stake**: The amount of GRT that Indexers stake to participate in the decentralized network. The minimum is 100,000 GRT, and there is no upper limit. - **Delegation Capacity**: The maximum amount of GRT an Indexer can accept from Delegators. Indexers can only accept up to 16x their Indexer Self-Stake, and additional delegation results in diluted rewards. For example, if an Indexer has a Self-Stake of 1M GRT, their delegation capacity is 16M. However, Indexers can increase their Delegation Capacity by increasing their Self-Stake. -- **Upgrade Indexer**: An Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. +- **Upgrade Indexer**: An Indexer designed to act as a fallback for Subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. -- **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing subgraphs. +- **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in Subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing Subgraphs. - **Delegation Tax**: A 0.5% fee paid by Delegators when they delegate GRT to Indexers. The GRT used to pay the fee is burned. -- **Curator**: Network participants that identify high-quality subgraphs, and signal GRT on them in exchange for curation shares. When Indexers claim query fees on a subgraph, 10% is distributed to the Curators of that subgraph. There is a positive correlation between the amount of GRT signaled and the number of Indexers indexing a subgraph. +- **Curator**: Network participants that identify high-quality Subgraphs, and signal GRT on them in exchange for curation shares. When Indexers claim query fees on a Subgraph, 10% is distributed to the Curators of that Subgraph. There is a positive correlation between the amount of GRT signaled and the number of Indexers indexing a Subgraph. -- **Curation Tax**: A 1% fee paid by Curators when they signal GRT on subgraphs. The GRT used to pay the fee is burned. +- **Curation Tax**: A 1% fee paid by Curators when they signal GRT on Subgraphs. The GRT used to pay the fee is burned. -- **Data Consumer**: Any application or user that queries a subgraph. +- **Data Consumer**: Any application or user that queries a Subgraph. -- **Subgraph Developer**: A developer who builds and deploys a subgraph to The Graph's decentralized network. +- **Subgraph Developer**: A developer who builds and deploys a Subgraph to The Graph's decentralized network. -- **Subgraph Manifest**: A YAML file that describes the subgraph's GraphQL schema, data sources, and other metadata. [Here](https://github.com/graphprotocol/example-subgraph/blob/master/subgraph.yaml) is an example. +- **Subgraph Manifest**: A YAML file that describes the Subgraph's GraphQL schema, data sources, and other metadata. [Here](https://github.com/graphprotocol/example-subgraph/blob/master/subgraph.yaml) is an example. - **Epoch**: A unit of time within the network. Currently, one epoch is 6,646 blocks or approximately 1 day. -- **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: +- **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards Subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: - 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. + 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular Subgraph. Active allocations accrue indexing rewards proportional to the signal on the Subgraph, and the amount of GRT allocated. - 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. + 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given Subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. -- **Subgraph Studio**: A powerful dapp for building, deploying, and publishing subgraphs. +- **Subgraph Studio**: A powerful dapp for building, deploying, and publishing Subgraphs. - **Fishermen**: A role within The Graph Network held by participants who monitor the accuracy and integrity of data served by Indexers. When a Fisherman identifies a query response or a POI they believe to be incorrect, they can initiate a dispute against the Indexer. If the dispute rules in favor of the Fisherman, the Indexer is slashed by losing 2.5% of their self-stake. Of this amount, 50% is awarded to the Fisherman as a bounty for their vigilance, and the remaining 50% is removed from circulation (burned). This mechanism is designed to encourage Fishermen to help maintain the reliability of the network by ensuring that Indexers are held accountable for the data they provide. @@ -56,28 +56,28 @@ title: Glossary - **Slashing**: Indexers can have their self-staked GRT slashed for providing an incorrect POI or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self-stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. -- **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are distributed in GRT. +- **Indexing Rewards**: The rewards that Indexers receive for indexing Subgraphs. Indexing rewards are distributed in GRT. - **Delegation Rewards**: The rewards that Delegators receive for delegating GRT to Indexers. Delegation rewards are distributed in GRT. - **GRT**: The Graph's work utility token. GRT provides economic incentives to network participants for contributing to the network. -- **Proof of Indexing (POI)**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given subgraph, they must provide a valid and recent POI. Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. +- **Proof of Indexing (POI)**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given Subgraph, they must provide a valid and recent POI. Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. -- **Graph Node**: Graph Node is the component that indexes subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. +- **Graph Node**: Graph Node is the component that indexes Subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. -- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. +- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing Subgraph deployments to its Graph Node(s), and managing allocations. - **The Graph Client**: A library for building GraphQL-based dapps in a decentralized way. -- **Graph Explorer**: A dapp designed for network participants to explore subgraphs and interact with the protocol. +- **Graph Explorer**: A dapp designed for network participants to explore Subgraphs and interact with the protocol. - **Graph CLI**: A command line interface tool for building and deploying to The Graph. - **Cooldown Period**: The time remaining until an Indexer who changed their delegation parameters can do so again. -- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self-stake. +- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, Subgraphs, curation shares, and Indexer's self-stake. -- **Updating a subgraph**: The process of releasing a new subgraph version with updates to the subgraph's manifest, schema, or mappings. +- **Updating a Subgraph**: The process of releasing a new Subgraph version with updates to the Subgraph's manifest, schema, or mappings. -- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (e.g. when v0.0.1 is updated to v0.0.2). +- **Migrating**: The process of curation shares moving from an old version of a Subgraph to a new version of a Subgraph (e.g. when v0.0.1 is updated to v0.0.2). From 97a3f88447807bfff97232c6936f58309fdd3d40 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:16:59 -0500 Subject: [PATCH 0487/1789] New translations glossary.mdx (Ukrainian) --- website/src/pages/uk/resources/glossary.mdx | 50 ++++++++++----------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/website/src/pages/uk/resources/glossary.mdx b/website/src/pages/uk/resources/glossary.mdx index 1338f2ba16ba..ef7f1d9c23b9 100644 --- a/website/src/pages/uk/resources/glossary.mdx +++ b/website/src/pages/uk/resources/glossary.mdx @@ -4,51 +4,51 @@ title: Глосарій - **The Graph**: A decentralized protocol for indexing and querying data. -- **Query**: A request for data. In the case of The Graph, a query is a request for data from a subgraph that will be answered by an Indexer. +- **Query**: A request for data. In the case of The Graph, a query is a request for data from a Subgraph that will be answered by an Indexer. -- **GraphQL**: A query language for APIs and a runtime for fulfilling those queries with your existing data. The Graph uses GraphQL to query subgraphs. +- **GraphQL**: A query language for APIs and a runtime for fulfilling those queries with your existing data. The Graph uses GraphQL to query Subgraphs. -- **Endpoint**: A URL that can be used to query a subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query subgraphs on The Graph's decentralized network. +- **Endpoint**: A URL that can be used to query a Subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query Subgraphs on The Graph's decentralized network. -- **Subgraph**: An open API that extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. Developers can build, deploy, and publish subgraphs to The Graph Network. Once it is indexed, the subgraph can be queried by anyone. +- **Subgraph**: An open API that extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. Developers can build, deploy, and publish Subgraphs to The Graph Network. Once it is indexed, the Subgraph can be queried by anyone. - **Indexer**: Network participants that run indexing nodes to index data from blockchains and serve GraphQL queries. - **Indexer Revenue Streams**: Indexers are rewarded in GRT with two components: query fee rebates and indexing rewards. - 1. **Query Fee Rebates**: Payments from subgraph consumers for serving queries on the network. + 1. **Query Fee Rebates**: Payments from Subgraph consumers for serving queries on the network. - 2. **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are generated via new issuance of 3% GRT annually. + 2. **Indexing Rewards**: The rewards that Indexers receive for indexing Subgraphs. Indexing rewards are generated via new issuance of 3% GRT annually. - **Indexer's Self-Stake**: The amount of GRT that Indexers stake to participate in the decentralized network. The minimum is 100,000 GRT, and there is no upper limit. - **Delegation Capacity**: The maximum amount of GRT an Indexer can accept from Delegators. Indexers can only accept up to 16x their Indexer Self-Stake, and additional delegation results in diluted rewards. For example, if an Indexer has a Self-Stake of 1M GRT, their delegation capacity is 16M. However, Indexers can increase their Delegation Capacity by increasing their Self-Stake. -- **Upgrade Indexer**: An Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. +- **Upgrade Indexer**: An Indexer designed to act as a fallback for Subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. -- **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing subgraphs. +- **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in Subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing Subgraphs. - **Delegation Tax**: A 0.5% fee paid by Delegators when they delegate GRT to Indexers. The GRT used to pay the fee is burned. -- **Curator**: Network participants that identify high-quality subgraphs, and signal GRT on them in exchange for curation shares. When Indexers claim query fees on a subgraph, 10% is distributed to the Curators of that subgraph. There is a positive correlation between the amount of GRT signaled and the number of Indexers indexing a subgraph. +- **Curator**: Network participants that identify high-quality Subgraphs, and signal GRT on them in exchange for curation shares. When Indexers claim query fees on a Subgraph, 10% is distributed to the Curators of that Subgraph. There is a positive correlation between the amount of GRT signaled and the number of Indexers indexing a Subgraph. -- **Curation Tax**: A 1% fee paid by Curators when they signal GRT on subgraphs. The GRT used to pay the fee is burned. +- **Curation Tax**: A 1% fee paid by Curators when they signal GRT on Subgraphs. The GRT used to pay the fee is burned. -- **Data Consumer**: Any application or user that queries a subgraph. +- **Data Consumer**: Any application or user that queries a Subgraph. -- **Subgraph Developer**: A developer who builds and deploys a subgraph to The Graph's decentralized network. +- **Subgraph Developer**: A developer who builds and deploys a Subgraph to The Graph's decentralized network. -- **Subgraph Manifest**: A YAML file that describes the subgraph's GraphQL schema, data sources, and other metadata. [Here](https://github.com/graphprotocol/example-subgraph/blob/master/subgraph.yaml) is an example. +- **Subgraph Manifest**: A YAML file that describes the Subgraph's GraphQL schema, data sources, and other metadata. [Here](https://github.com/graphprotocol/example-subgraph/blob/master/subgraph.yaml) is an example. - **Epoch**: A unit of time within the network. Currently, one epoch is 6,646 blocks or approximately 1 day. -- **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: +- **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards Subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: - 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. + 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular Subgraph. Active allocations accrue indexing rewards proportional to the signal on the Subgraph, and the amount of GRT allocated. - 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. + 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given Subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. -- **Subgraph Studio**: A powerful dapp for building, deploying, and publishing subgraphs. +- **Subgraph Studio**: A powerful dapp for building, deploying, and publishing Subgraphs. - **Fishermen**: A role within The Graph Network held by participants who monitor the accuracy and integrity of data served by Indexers. When a Fisherman identifies a query response or a POI they believe to be incorrect, they can initiate a dispute against the Indexer. If the dispute rules in favor of the Fisherman, the Indexer is slashed by losing 2.5% of their self-stake. Of this amount, 50% is awarded to the Fisherman as a bounty for their vigilance, and the remaining 50% is removed from circulation (burned). This mechanism is designed to encourage Fishermen to help maintain the reliability of the network by ensuring that Indexers are held accountable for the data they provide. @@ -56,28 +56,28 @@ title: Глосарій - **Slashing**: Indexers can have their self-staked GRT slashed for providing an incorrect POI or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self-stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. -- **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are distributed in GRT. +- **Indexing Rewards**: The rewards that Indexers receive for indexing Subgraphs. Indexing rewards are distributed in GRT. - **Delegation Rewards**: The rewards that Delegators receive for delegating GRT to Indexers. Delegation rewards are distributed in GRT. - **GRT**: The Graph's work utility token. GRT provides economic incentives to network participants for contributing to the network. -- **Proof of Indexing (POI)**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given subgraph, they must provide a valid and recent POI. Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. +- **Proof of Indexing (POI)**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given Subgraph, they must provide a valid and recent POI. Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. -- **Graph Node**: Graph Node is the component that indexes subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. +- **Graph Node**: Graph Node is the component that indexes Subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. -- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. +- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing Subgraph deployments to its Graph Node(s), and managing allocations. - **The Graph Client**: A library for building GraphQL-based dapps in a decentralized way. -- **Graph Explorer**: A dapp designed for network participants to explore subgraphs and interact with the protocol. +- **Graph Explorer**: A dapp designed for network participants to explore Subgraphs and interact with the protocol. - **Graph CLI**: A command line interface tool for building and deploying to The Graph. - **Cooldown Period**: The time remaining until an Indexer who changed their delegation parameters can do so again. -- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self-stake. +- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, Subgraphs, curation shares, and Indexer's self-stake. -- **Updating a subgraph**: The process of releasing a new subgraph version with updates to the subgraph's manifest, schema, or mappings. +- **Updating a Subgraph**: The process of releasing a new Subgraph version with updates to the Subgraph's manifest, schema, or mappings. -- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (e.g. when v0.0.1 is updated to v0.0.2). +- **Migrating**: The process of curation shares moving from an old version of a Subgraph to a new version of a Subgraph (e.g. when v0.0.1 is updated to v0.0.2). From 3b42f9a3f968e20f7e6acef935788f9a4022b336 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:17:00 -0500 Subject: [PATCH 0488/1789] New translations glossary.mdx (Chinese Simplified) --- website/src/pages/zh/resources/glossary.mdx | 68 ++++++++++----------- 1 file changed, 34 insertions(+), 34 deletions(-) diff --git a/website/src/pages/zh/resources/glossary.mdx b/website/src/pages/zh/resources/glossary.mdx index 98e473e0a8ae..a5eb760383a7 100644 --- a/website/src/pages/zh/resources/glossary.mdx +++ b/website/src/pages/zh/resources/glossary.mdx @@ -2,82 +2,82 @@ title: 术语汇编 --- -- **The Graph**: A decentralized protocol for indexing and querying data. +- **The Graph**: 用于索引和查询数据的去中心化协议。 -- **Query**: A request for data. In the case of The Graph, a query is a request for data from a subgraph that will be answered by an Indexer. +- **Query**: A request for data. In the case of The Graph, a query is a request for data from a Subgraph that will be answered by an Indexer. -- **GraphQL**: A query language for APIs and a runtime for fulfilling those queries with your existing data. The Graph uses GraphQL to query subgraphs. +- **GraphQL**: A query language for APIs and a runtime for fulfilling those queries with your existing data. The Graph uses GraphQL to query Subgraphs. -- **Endpoint**: A URL that can be used to query a subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query subgraphs on The Graph's decentralized network. +- **Endpoint**: A URL that can be used to query a Subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query Subgraphs on The Graph's decentralized network. -- **Subgraph**: An open API that extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. Developers can build, deploy, and publish subgraphs to The Graph Network. Once it is indexed, the subgraph can be queried by anyone. +- **Subgraph**: An open API that extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. Developers can build, deploy, and publish Subgraphs to The Graph Network. Once it is indexed, the Subgraph can be queried by anyone. -- **Indexer**: Network participants that run indexing nodes to index data from blockchains and serve GraphQL queries. +- **Indexers**:网络参与者运行索引节点,从区块链索引数据并提供 GraphQL 查询。 -- **Indexer Revenue Streams**: Indexers are rewarded in GRT with two components: query fee rebates and indexing rewards. +- **Indexer Revenue Streams**:索引人在 GRT 中的获得包括两个组成部分: 查询费用回扣和索引奖励。 - 1. **Query Fee Rebates**: Payments from subgraph consumers for serving queries on the network. + 1. **Query Fee Rebates**: Payments from Subgraph consumers for serving queries on the network. - 2. **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are generated via new issuance of 3% GRT annually. + 2. **Indexing Rewards**: The rewards that Indexers receive for indexing Subgraphs. Indexing rewards are generated via new issuance of 3% GRT annually. - **Indexer's Self-Stake**: The amount of GRT that Indexers stake to participate in the decentralized network. The minimum is 100,000 GRT, and there is no upper limit. - **Delegation Capacity**: The maximum amount of GRT an Indexer can accept from Delegators. Indexers can only accept up to 16x their Indexer Self-Stake, and additional delegation results in diluted rewards. For example, if an Indexer has a Self-Stake of 1M GRT, their delegation capacity is 16M. However, Indexers can increase their Delegation Capacity by increasing their Self-Stake. -- **Upgrade Indexer**: An Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. +- **Upgrade Indexer**: An Indexer designed to act as a fallback for Subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. -- **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing subgraphs. +- **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in Subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing Subgraphs. - **Delegation Tax**: A 0.5% fee paid by Delegators when they delegate GRT to Indexers. The GRT used to pay the fee is burned. -- **Curator**: Network participants that identify high-quality subgraphs, and signal GRT on them in exchange for curation shares. When Indexers claim query fees on a subgraph, 10% is distributed to the Curators of that subgraph. There is a positive correlation between the amount of GRT signaled and the number of Indexers indexing a subgraph. +- **Curator**: Network participants that identify high-quality Subgraphs, and signal GRT on them in exchange for curation shares. When Indexers claim query fees on a Subgraph, 10% is distributed to the Curators of that Subgraph. There is a positive correlation between the amount of GRT signaled and the number of Indexers indexing a Subgraph. -- **Curation Tax**: A 1% fee paid by Curators when they signal GRT on subgraphs. The GRT used to pay the fee is burned. +- **Curation Tax**: A 1% fee paid by Curators when they signal GRT on Subgraphs. The GRT used to pay the fee is burned. -- **Data Consumer**: Any application or user that queries a subgraph. +- **Data Consumer**: Any application or user that queries a Subgraph. -- **Subgraph Developer**: A developer who builds and deploys a subgraph to The Graph's decentralized network. +- **Subgraph Developer**: A developer who builds and deploys a Subgraph to The Graph's decentralized network. -- **Subgraph Manifest**: A YAML file that describes the subgraph's GraphQL schema, data sources, and other metadata. [Here](https://github.com/graphprotocol/example-subgraph/blob/master/subgraph.yaml) is an example. +- **Subgraph Manifest**: A YAML file that describes the Subgraph's GraphQL schema, data sources, and other metadata. [Here](https://github.com/graphprotocol/example-subgraph/blob/master/subgraph.yaml) is an example. -- **Epoch**: A unit of time within the network. Currently, one epoch is 6,646 blocks or approximately 1 day. +- **Epoch**: 网络中的时间单位。一个时期目前为6,646个区块或大约1天。 -- **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: +- **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards Subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: - 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. + 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular Subgraph. Active allocations accrue indexing rewards proportional to the signal on the Subgraph, and the amount of GRT allocated. - 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. + 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given Subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. -- **Subgraph Studio**: A powerful dapp for building, deploying, and publishing subgraphs. +- **Subgraph Studio**: A powerful dapp for building, deploying, and publishing Subgraphs. - **Fishermen**: A role within The Graph Network held by participants who monitor the accuracy and integrity of data served by Indexers. When a Fisherman identifies a query response or a POI they believe to be incorrect, they can initiate a dispute against the Indexer. If the dispute rules in favor of the Fisherman, the Indexer is slashed by losing 2.5% of their self-stake. Of this amount, 50% is awarded to the Fisherman as a bounty for their vigilance, and the remaining 50% is removed from circulation (burned). This mechanism is designed to encourage Fishermen to help maintain the reliability of the network by ensuring that Indexers are held accountable for the data they provide. -- **Arbitrators**: Arbitrators are network participants appointed through a governance process. The role of the Arbitrator is to decide the outcome of indexing and query disputes. Their goal is to maximize the utility and reliability of The Graph Network. +- **Arbitrators**: 仲裁员是通过治理设置的网络参与者。仲裁员的作用是决定索引和查询争议的结果。他们的目标是最大限度地提高The Graph网络的效用和可靠性。 - **Slashing**: Indexers can have their self-staked GRT slashed for providing an incorrect POI or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self-stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. -- **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are distributed in GRT. +- **Indexing Rewards**: The rewards that Indexers receive for indexing Subgraphs. Indexing rewards are distributed in GRT. -- **Delegation Rewards**: The rewards that Delegators receive for delegating GRT to Indexers. Delegation rewards are distributed in GRT. +- **Delegation Rewards**: 委托人将 GRT 委托给索引人所获得的奖励。委托奖励以 GRT 的形式分配。 -- **GRT**: The Graph's work utility token. GRT provides economic incentives to network participants for contributing to the network. +- **GRT**: The Graph的工作效用代币。 GRT 为网络参与者提供经济激励,鼓励他们为网络做出贡献。 -- **Proof of Indexing (POI)**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given subgraph, they must provide a valid and recent POI. Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. +- **Proof of Indexing (POI)**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given Subgraph, they must provide a valid and recent POI. Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. -- **Graph Node**: Graph Node is the component that indexes subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. +- **Graph Node**: Graph Node is the component that indexes Subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. -- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. +- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing Subgraph deployments to its Graph Node(s), and managing allocations. -- **The Graph Client**: A library for building GraphQL-based dapps in a decentralized way. +- **The Graph Client**: 用于以去中心化方式构建基于 GraphQL 的 dapps 的库。 -- **Graph Explorer**: A dapp designed for network participants to explore subgraphs and interact with the protocol. +- **Graph Explorer**: A dapp designed for network participants to explore Subgraphs and interact with the protocol. -- **Graph CLI**: A command line interface tool for building and deploying to The Graph. +- **Graph CLI**: 用于构建和部署到The Graph 的命令行界面工具。 - **Cooldown Period**: The time remaining until an Indexer who changed their delegation parameters can do so again. -- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self-stake. +- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, Subgraphs, curation shares, and Indexer's self-stake. -- **Updating a subgraph**: The process of releasing a new subgraph version with updates to the subgraph's manifest, schema, or mappings. +- **Updating a Subgraph**: The process of releasing a new Subgraph version with updates to the Subgraph's manifest, schema, or mappings. -- **Migrating**: 策展份额从子图的旧版本移动到子图的新版本的过程(例如,从 v0.0.1 更新到 v0.0.2)。 +- **Migrating**: The process of curation shares moving from an old version of a Subgraph to a new version of a Subgraph (e.g. when v0.0.1 is updated to v0.0.2). From 7e5948dbdb353a662c99de8e8eb1b309383578ab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:17:01 -0500 Subject: [PATCH 0489/1789] New translations glossary.mdx (Urdu (Pakistan)) --- website/src/pages/ur/resources/glossary.mdx | 50 ++++++++++----------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/website/src/pages/ur/resources/glossary.mdx b/website/src/pages/ur/resources/glossary.mdx index bece9e2db4ea..8b3d3ba9814c 100644 --- a/website/src/pages/ur/resources/glossary.mdx +++ b/website/src/pages/ur/resources/glossary.mdx @@ -4,51 +4,51 @@ title: لغت - **The Graph**: A decentralized protocol for indexing and querying data. -- **Query**: A request for data. In the case of The Graph, a query is a request for data from a subgraph that will be answered by an Indexer. +- **Query**: A request for data. In the case of The Graph, a query is a request for data from a Subgraph that will be answered by an Indexer. -- **GraphQL**: A query language for APIs and a runtime for fulfilling those queries with your existing data. The Graph uses GraphQL to query subgraphs. +- **GraphQL**: A query language for APIs and a runtime for fulfilling those queries with your existing data. The Graph uses GraphQL to query Subgraphs. -- **Endpoint**: A URL that can be used to query a subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query subgraphs on The Graph's decentralized network. +- **Endpoint**: A URL that can be used to query a Subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query Subgraphs on The Graph's decentralized network. -- **Subgraph**: An open API that extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. Developers can build, deploy, and publish subgraphs to The Graph Network. Once it is indexed, the subgraph can be queried by anyone. +- **Subgraph**: An open API that extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. Developers can build, deploy, and publish Subgraphs to The Graph Network. Once it is indexed, the Subgraph can be queried by anyone. - **Indexer**: Network participants that run indexing nodes to index data from blockchains and serve GraphQL queries. - **Indexer Revenue Streams**: Indexers are rewarded in GRT with two components: query fee rebates and indexing rewards. - 1. **Query Fee Rebates**: Payments from subgraph consumers for serving queries on the network. + 1. **Query Fee Rebates**: Payments from Subgraph consumers for serving queries on the network. - 2. **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are generated via new issuance of 3% GRT annually. + 2. **Indexing Rewards**: The rewards that Indexers receive for indexing Subgraphs. Indexing rewards are generated via new issuance of 3% GRT annually. - **Indexer's Self-Stake**: The amount of GRT that Indexers stake to participate in the decentralized network. The minimum is 100,000 GRT, and there is no upper limit. - **Delegation Capacity**: The maximum amount of GRT an Indexer can accept from Delegators. Indexers can only accept up to 16x their Indexer Self-Stake, and additional delegation results in diluted rewards. For example, if an Indexer has a Self-Stake of 1M GRT, their delegation capacity is 16M. However, Indexers can increase their Delegation Capacity by increasing their Self-Stake. -- **Upgrade Indexer**: An Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. +- **Upgrade Indexer**: An Indexer designed to act as a fallback for Subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. -- **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing subgraphs. +- **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in Subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing Subgraphs. - **Delegation Tax**: A 0.5% fee paid by Delegators when they delegate GRT to Indexers. The GRT used to pay the fee is burned. -- **Curator**: Network participants that identify high-quality subgraphs, and signal GRT on them in exchange for curation shares. When Indexers claim query fees on a subgraph, 10% is distributed to the Curators of that subgraph. There is a positive correlation between the amount of GRT signaled and the number of Indexers indexing a subgraph. +- **Curator**: Network participants that identify high-quality Subgraphs, and signal GRT on them in exchange for curation shares. When Indexers claim query fees on a Subgraph, 10% is distributed to the Curators of that Subgraph. There is a positive correlation between the amount of GRT signaled and the number of Indexers indexing a Subgraph. -- **Curation Tax**: A 1% fee paid by Curators when they signal GRT on subgraphs. The GRT used to pay the fee is burned. +- **Curation Tax**: A 1% fee paid by Curators when they signal GRT on Subgraphs. The GRT used to pay the fee is burned. -- **Data Consumer**: Any application or user that queries a subgraph. +- **Data Consumer**: Any application or user that queries a Subgraph. -- **Subgraph Developer**: A developer who builds and deploys a subgraph to The Graph's decentralized network. +- **Subgraph Developer**: A developer who builds and deploys a Subgraph to The Graph's decentralized network. -- **Subgraph Manifest**: A YAML file that describes the subgraph's GraphQL schema, data sources, and other metadata. [Here](https://github.com/graphprotocol/example-subgraph/blob/master/subgraph.yaml) is an example. +- **Subgraph Manifest**: A YAML file that describes the Subgraph's GraphQL schema, data sources, and other metadata. [Here](https://github.com/graphprotocol/example-subgraph/blob/master/subgraph.yaml) is an example. - **Epoch**: A unit of time within the network. Currently, one epoch is 6,646 blocks or approximately 1 day. -- **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: +- **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards Subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: - 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. + 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular Subgraph. Active allocations accrue indexing rewards proportional to the signal on the Subgraph, and the amount of GRT allocated. - 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. + 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given Subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. -- **Subgraph Studio**: A powerful dapp for building, deploying, and publishing subgraphs. +- **Subgraph Studio**: A powerful dapp for building, deploying, and publishing Subgraphs. - **Fishermen**: A role within The Graph Network held by participants who monitor the accuracy and integrity of data served by Indexers. When a Fisherman identifies a query response or a POI they believe to be incorrect, they can initiate a dispute against the Indexer. If the dispute rules in favor of the Fisherman, the Indexer is slashed by losing 2.5% of their self-stake. Of this amount, 50% is awarded to the Fisherman as a bounty for their vigilance, and the remaining 50% is removed from circulation (burned). This mechanism is designed to encourage Fishermen to help maintain the reliability of the network by ensuring that Indexers are held accountable for the data they provide. @@ -56,28 +56,28 @@ title: لغت - **Slashing**: Indexers can have their self-staked GRT slashed for providing an incorrect POI or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self-stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. -- **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are distributed in GRT. +- **Indexing Rewards**: The rewards that Indexers receive for indexing Subgraphs. Indexing rewards are distributed in GRT. - **Delegation Rewards**: The rewards that Delegators receive for delegating GRT to Indexers. Delegation rewards are distributed in GRT. - **GRT**: The Graph's work utility token. GRT provides economic incentives to network participants for contributing to the network. -- **Proof of Indexing (POI)**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given subgraph, they must provide a valid and recent POI. Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. +- **Proof of Indexing (POI)**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given Subgraph, they must provide a valid and recent POI. Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. -- **Graph Node**: Graph Node is the component that indexes subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. +- **Graph Node**: Graph Node is the component that indexes Subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. -- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. +- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing Subgraph deployments to its Graph Node(s), and managing allocations. - **The Graph Client**: A library for building GraphQL-based dapps in a decentralized way. -- **Graph Explorer**: A dapp designed for network participants to explore subgraphs and interact with the protocol. +- **Graph Explorer**: A dapp designed for network participants to explore Subgraphs and interact with the protocol. - **Graph CLI**: A command line interface tool for building and deploying to The Graph. - **Cooldown Period**: The time remaining until an Indexer who changed their delegation parameters can do so again. -- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self-stake. +- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, Subgraphs, curation shares, and Indexer's self-stake. -- **Updating a subgraph**: The process of releasing a new subgraph version with updates to the subgraph's manifest, schema, or mappings. +- **Updating a Subgraph**: The process of releasing a new Subgraph version with updates to the Subgraph's manifest, schema, or mappings. -- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (e.g. when v0.0.1 is updated to v0.0.2). +- **Migrating**: The process of curation shares moving from an old version of a Subgraph to a new version of a Subgraph (e.g. when v0.0.1 is updated to v0.0.2). From 9ce4dc179eac25da2aada437057282e75e9bc7c2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:17:02 -0500 Subject: [PATCH 0490/1789] New translations glossary.mdx (Vietnamese) --- website/src/pages/vi/resources/glossary.mdx | 50 ++++++++++----------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/website/src/pages/vi/resources/glossary.mdx b/website/src/pages/vi/resources/glossary.mdx index ffcd4bca2eed..4c5ad55cd0d3 100644 --- a/website/src/pages/vi/resources/glossary.mdx +++ b/website/src/pages/vi/resources/glossary.mdx @@ -4,51 +4,51 @@ title: Glossary - **The Graph**: A decentralized protocol for indexing and querying data. -- **Query**: A request for data. In the case of The Graph, a query is a request for data from a subgraph that will be answered by an Indexer. +- **Query**: A request for data. In the case of The Graph, a query is a request for data from a Subgraph that will be answered by an Indexer. -- **GraphQL**: A query language for APIs and a runtime for fulfilling those queries with your existing data. The Graph uses GraphQL to query subgraphs. +- **GraphQL**: A query language for APIs and a runtime for fulfilling those queries with your existing data. The Graph uses GraphQL to query Subgraphs. -- **Endpoint**: A URL that can be used to query a subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query subgraphs on The Graph's decentralized network. +- **Endpoint**: A URL that can be used to query a Subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query Subgraphs on The Graph's decentralized network. -- **Subgraph**: An open API that extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. Developers can build, deploy, and publish subgraphs to The Graph Network. Once it is indexed, the subgraph can be queried by anyone. +- **Subgraph**: An open API that extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. Developers can build, deploy, and publish Subgraphs to The Graph Network. Once it is indexed, the Subgraph can be queried by anyone. - **Indexer**: Network participants that run indexing nodes to index data from blockchains and serve GraphQL queries. - **Indexer Revenue Streams**: Indexers are rewarded in GRT with two components: query fee rebates and indexing rewards. - 1. **Query Fee Rebates**: Payments from subgraph consumers for serving queries on the network. + 1. **Query Fee Rebates**: Payments from Subgraph consumers for serving queries on the network. - 2. **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are generated via new issuance of 3% GRT annually. + 2. **Indexing Rewards**: The rewards that Indexers receive for indexing Subgraphs. Indexing rewards are generated via new issuance of 3% GRT annually. - **Indexer's Self-Stake**: The amount of GRT that Indexers stake to participate in the decentralized network. The minimum is 100,000 GRT, and there is no upper limit. - **Delegation Capacity**: The maximum amount of GRT an Indexer can accept from Delegators. Indexers can only accept up to 16x their Indexer Self-Stake, and additional delegation results in diluted rewards. For example, if an Indexer has a Self-Stake of 1M GRT, their delegation capacity is 16M. However, Indexers can increase their Delegation Capacity by increasing their Self-Stake. -- **Upgrade Indexer**: An Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. +- **Upgrade Indexer**: An Indexer designed to act as a fallback for Subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. -- **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing subgraphs. +- **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in Subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing Subgraphs. - **Delegation Tax**: A 0.5% fee paid by Delegators when they delegate GRT to Indexers. The GRT used to pay the fee is burned. -- **Curator**: Network participants that identify high-quality subgraphs, and signal GRT on them in exchange for curation shares. When Indexers claim query fees on a subgraph, 10% is distributed to the Curators of that subgraph. There is a positive correlation between the amount of GRT signaled and the number of Indexers indexing a subgraph. +- **Curator**: Network participants that identify high-quality Subgraphs, and signal GRT on them in exchange for curation shares. When Indexers claim query fees on a Subgraph, 10% is distributed to the Curators of that Subgraph. There is a positive correlation between the amount of GRT signaled and the number of Indexers indexing a Subgraph. -- **Curation Tax**: A 1% fee paid by Curators when they signal GRT on subgraphs. The GRT used to pay the fee is burned. +- **Curation Tax**: A 1% fee paid by Curators when they signal GRT on Subgraphs. The GRT used to pay the fee is burned. -- **Data Consumer**: Any application or user that queries a subgraph. +- **Data Consumer**: Any application or user that queries a Subgraph. -- **Subgraph Developer**: A developer who builds and deploys a subgraph to The Graph's decentralized network. +- **Subgraph Developer**: A developer who builds and deploys a Subgraph to The Graph's decentralized network. -- **Subgraph Manifest**: A YAML file that describes the subgraph's GraphQL schema, data sources, and other metadata. [Here](https://github.com/graphprotocol/example-subgraph/blob/master/subgraph.yaml) is an example. +- **Subgraph Manifest**: A YAML file that describes the Subgraph's GraphQL schema, data sources, and other metadata. [Here](https://github.com/graphprotocol/example-subgraph/blob/master/subgraph.yaml) is an example. - **Epoch**: A unit of time within the network. Currently, one epoch is 6,646 blocks or approximately 1 day. -- **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: +- **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards Subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: - 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. + 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular Subgraph. Active allocations accrue indexing rewards proportional to the signal on the Subgraph, and the amount of GRT allocated. - 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. + 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given Subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. -- **Subgraph Studio**: A powerful dapp for building, deploying, and publishing subgraphs. +- **Subgraph Studio**: A powerful dapp for building, deploying, and publishing Subgraphs. - **Fishermen**: A role within The Graph Network held by participants who monitor the accuracy and integrity of data served by Indexers. When a Fisherman identifies a query response or a POI they believe to be incorrect, they can initiate a dispute against the Indexer. If the dispute rules in favor of the Fisherman, the Indexer is slashed by losing 2.5% of their self-stake. Of this amount, 50% is awarded to the Fisherman as a bounty for their vigilance, and the remaining 50% is removed from circulation (burned). This mechanism is designed to encourage Fishermen to help maintain the reliability of the network by ensuring that Indexers are held accountable for the data they provide. @@ -56,28 +56,28 @@ title: Glossary - **Slashing**: Indexers can have their self-staked GRT slashed for providing an incorrect POI or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self-stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. -- **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are distributed in GRT. +- **Indexing Rewards**: The rewards that Indexers receive for indexing Subgraphs. Indexing rewards are distributed in GRT. - **Delegation Rewards**: The rewards that Delegators receive for delegating GRT to Indexers. Delegation rewards are distributed in GRT. - **GRT**: The Graph's work utility token. GRT provides economic incentives to network participants for contributing to the network. -- **Proof of Indexing (POI)**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given subgraph, they must provide a valid and recent POI. Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. +- **Proof of Indexing (POI)**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given Subgraph, they must provide a valid and recent POI. Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. -- **Graph Node**: Graph Node is the component that indexes subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. +- **Graph Node**: Graph Node is the component that indexes Subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. -- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. +- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing Subgraph deployments to its Graph Node(s), and managing allocations. - **The Graph Client**: A library for building GraphQL-based dapps in a decentralized way. -- **Graph Explorer**: A dapp designed for network participants to explore subgraphs and interact with the protocol. +- **Graph Explorer**: A dapp designed for network participants to explore Subgraphs and interact with the protocol. - **Graph CLI**: A command line interface tool for building and deploying to The Graph. - **Cooldown Period**: The time remaining until an Indexer who changed their delegation parameters can do so again. -- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self-stake. +- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, Subgraphs, curation shares, and Indexer's self-stake. -- **Updating a subgraph**: The process of releasing a new subgraph version with updates to the subgraph's manifest, schema, or mappings. +- **Updating a Subgraph**: The process of releasing a new Subgraph version with updates to the Subgraph's manifest, schema, or mappings. -- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (e.g. when v0.0.1 is updated to v0.0.2). +- **Migrating**: The process of curation shares moving from an old version of a Subgraph to a new version of a Subgraph (e.g. when v0.0.1 is updated to v0.0.2). From f78a69e3ff5ca5915c9a61b4d2cef683d0522135 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:17:03 -0500 Subject: [PATCH 0491/1789] New translations glossary.mdx (Marathi) --- website/src/pages/mr/resources/glossary.mdx | 50 ++++++++++----------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/website/src/pages/mr/resources/glossary.mdx b/website/src/pages/mr/resources/glossary.mdx index ffcd4bca2eed..4c5ad55cd0d3 100644 --- a/website/src/pages/mr/resources/glossary.mdx +++ b/website/src/pages/mr/resources/glossary.mdx @@ -4,51 +4,51 @@ title: Glossary - **The Graph**: A decentralized protocol for indexing and querying data. -- **Query**: A request for data. In the case of The Graph, a query is a request for data from a subgraph that will be answered by an Indexer. +- **Query**: A request for data. In the case of The Graph, a query is a request for data from a Subgraph that will be answered by an Indexer. -- **GraphQL**: A query language for APIs and a runtime for fulfilling those queries with your existing data. The Graph uses GraphQL to query subgraphs. +- **GraphQL**: A query language for APIs and a runtime for fulfilling those queries with your existing data. The Graph uses GraphQL to query Subgraphs. -- **Endpoint**: A URL that can be used to query a subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query subgraphs on The Graph's decentralized network. +- **Endpoint**: A URL that can be used to query a Subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query Subgraphs on The Graph's decentralized network. -- **Subgraph**: An open API that extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. Developers can build, deploy, and publish subgraphs to The Graph Network. Once it is indexed, the subgraph can be queried by anyone. +- **Subgraph**: An open API that extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. Developers can build, deploy, and publish Subgraphs to The Graph Network. Once it is indexed, the Subgraph can be queried by anyone. - **Indexer**: Network participants that run indexing nodes to index data from blockchains and serve GraphQL queries. - **Indexer Revenue Streams**: Indexers are rewarded in GRT with two components: query fee rebates and indexing rewards. - 1. **Query Fee Rebates**: Payments from subgraph consumers for serving queries on the network. + 1. **Query Fee Rebates**: Payments from Subgraph consumers for serving queries on the network. - 2. **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are generated via new issuance of 3% GRT annually. + 2. **Indexing Rewards**: The rewards that Indexers receive for indexing Subgraphs. Indexing rewards are generated via new issuance of 3% GRT annually. - **Indexer's Self-Stake**: The amount of GRT that Indexers stake to participate in the decentralized network. The minimum is 100,000 GRT, and there is no upper limit. - **Delegation Capacity**: The maximum amount of GRT an Indexer can accept from Delegators. Indexers can only accept up to 16x their Indexer Self-Stake, and additional delegation results in diluted rewards. For example, if an Indexer has a Self-Stake of 1M GRT, their delegation capacity is 16M. However, Indexers can increase their Delegation Capacity by increasing their Self-Stake. -- **Upgrade Indexer**: An Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. +- **Upgrade Indexer**: An Indexer designed to act as a fallback for Subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. -- **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing subgraphs. +- **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in Subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing Subgraphs. - **Delegation Tax**: A 0.5% fee paid by Delegators when they delegate GRT to Indexers. The GRT used to pay the fee is burned. -- **Curator**: Network participants that identify high-quality subgraphs, and signal GRT on them in exchange for curation shares. When Indexers claim query fees on a subgraph, 10% is distributed to the Curators of that subgraph. There is a positive correlation between the amount of GRT signaled and the number of Indexers indexing a subgraph. +- **Curator**: Network participants that identify high-quality Subgraphs, and signal GRT on them in exchange for curation shares. When Indexers claim query fees on a Subgraph, 10% is distributed to the Curators of that Subgraph. There is a positive correlation between the amount of GRT signaled and the number of Indexers indexing a Subgraph. -- **Curation Tax**: A 1% fee paid by Curators when they signal GRT on subgraphs. The GRT used to pay the fee is burned. +- **Curation Tax**: A 1% fee paid by Curators when they signal GRT on Subgraphs. The GRT used to pay the fee is burned. -- **Data Consumer**: Any application or user that queries a subgraph. +- **Data Consumer**: Any application or user that queries a Subgraph. -- **Subgraph Developer**: A developer who builds and deploys a subgraph to The Graph's decentralized network. +- **Subgraph Developer**: A developer who builds and deploys a Subgraph to The Graph's decentralized network. -- **Subgraph Manifest**: A YAML file that describes the subgraph's GraphQL schema, data sources, and other metadata. [Here](https://github.com/graphprotocol/example-subgraph/blob/master/subgraph.yaml) is an example. +- **Subgraph Manifest**: A YAML file that describes the Subgraph's GraphQL schema, data sources, and other metadata. [Here](https://github.com/graphprotocol/example-subgraph/blob/master/subgraph.yaml) is an example. - **Epoch**: A unit of time within the network. Currently, one epoch is 6,646 blocks or approximately 1 day. -- **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: +- **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards Subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: - 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. + 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular Subgraph. Active allocations accrue indexing rewards proportional to the signal on the Subgraph, and the amount of GRT allocated. - 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. + 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given Subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. -- **Subgraph Studio**: A powerful dapp for building, deploying, and publishing subgraphs. +- **Subgraph Studio**: A powerful dapp for building, deploying, and publishing Subgraphs. - **Fishermen**: A role within The Graph Network held by participants who monitor the accuracy and integrity of data served by Indexers. When a Fisherman identifies a query response or a POI they believe to be incorrect, they can initiate a dispute against the Indexer. If the dispute rules in favor of the Fisherman, the Indexer is slashed by losing 2.5% of their self-stake. Of this amount, 50% is awarded to the Fisherman as a bounty for their vigilance, and the remaining 50% is removed from circulation (burned). This mechanism is designed to encourage Fishermen to help maintain the reliability of the network by ensuring that Indexers are held accountable for the data they provide. @@ -56,28 +56,28 @@ title: Glossary - **Slashing**: Indexers can have their self-staked GRT slashed for providing an incorrect POI or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self-stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. -- **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are distributed in GRT. +- **Indexing Rewards**: The rewards that Indexers receive for indexing Subgraphs. Indexing rewards are distributed in GRT. - **Delegation Rewards**: The rewards that Delegators receive for delegating GRT to Indexers. Delegation rewards are distributed in GRT. - **GRT**: The Graph's work utility token. GRT provides economic incentives to network participants for contributing to the network. -- **Proof of Indexing (POI)**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given subgraph, they must provide a valid and recent POI. Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. +- **Proof of Indexing (POI)**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given Subgraph, they must provide a valid and recent POI. Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. -- **Graph Node**: Graph Node is the component that indexes subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. +- **Graph Node**: Graph Node is the component that indexes Subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. -- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. +- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing Subgraph deployments to its Graph Node(s), and managing allocations. - **The Graph Client**: A library for building GraphQL-based dapps in a decentralized way. -- **Graph Explorer**: A dapp designed for network participants to explore subgraphs and interact with the protocol. +- **Graph Explorer**: A dapp designed for network participants to explore Subgraphs and interact with the protocol. - **Graph CLI**: A command line interface tool for building and deploying to The Graph. - **Cooldown Period**: The time remaining until an Indexer who changed their delegation parameters can do so again. -- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self-stake. +- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, Subgraphs, curation shares, and Indexer's self-stake. -- **Updating a subgraph**: The process of releasing a new subgraph version with updates to the subgraph's manifest, schema, or mappings. +- **Updating a Subgraph**: The process of releasing a new Subgraph version with updates to the Subgraph's manifest, schema, or mappings. -- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (e.g. when v0.0.1 is updated to v0.0.2). +- **Migrating**: The process of curation shares moving from an old version of a Subgraph to a new version of a Subgraph (e.g. when v0.0.1 is updated to v0.0.2). From aca41eb274f0b9fa757958621e69d356cc96f5d7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:17:04 -0500 Subject: [PATCH 0492/1789] New translations glossary.mdx (Hindi) --- website/src/pages/hi/resources/glossary.mdx | 50 ++++++++++----------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/website/src/pages/hi/resources/glossary.mdx b/website/src/pages/hi/resources/glossary.mdx index d7c1fd85df2b..0ab16b6c8474 100644 --- a/website/src/pages/hi/resources/glossary.mdx +++ b/website/src/pages/hi/resources/glossary.mdx @@ -4,51 +4,51 @@ title: शब्दकोष - **The Graph**: A decentralized protocol for indexing and querying data. -- **Query**: A request for data. In the case of The Graph, a query is a request for data from a subgraph that will be answered by an Indexer. +- **Query**: A request for data. In the case of The Graph, a query is a request for data from a Subgraph that will be answered by an Indexer. -- **GraphQL**: A query language for APIs and a runtime for fulfilling those queries with your existing data. The Graph uses GraphQL to query subgraphs. +- **GraphQL**: A query language for APIs and a runtime for fulfilling those queries with your existing data. The Graph uses GraphQL to query Subgraphs. -- **Endpoint**: A URL that can be used to query a subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query subgraphs on The Graph's decentralized network. +- **Endpoint**: A URL that can be used to query a Subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query Subgraphs on The Graph's decentralized network. -- **Subgraph**: An open API that extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. Developers can build, deploy, and publish subgraphs to The Graph Network. Once it is indexed, the subgraph can be queried by anyone. +- **Subgraph**: An open API that extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. Developers can build, deploy, and publish Subgraphs to The Graph Network. Once it is indexed, the Subgraph can be queried by anyone. - **Indexer**: Network participants that run indexing nodes to index data from blockchains and serve GraphQL queries. - **Indexer Revenue Streams**: Indexers are rewarded in GRT with two components: query fee rebates and indexing rewards. - 1. **Query Fee Rebates**: Payments from subgraph consumers for serving queries on the network. + 1. **Query Fee Rebates**: Payments from Subgraph consumers for serving queries on the network. - 2. **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are generated via new issuance of 3% GRT annually. + 2. **Indexing Rewards**: The rewards that Indexers receive for indexing Subgraphs. Indexing rewards are generated via new issuance of 3% GRT annually. - **Indexer's Self-Stake**: The amount of GRT that Indexers stake to participate in the decentralized network. The minimum is 100,000 GRT, and there is no upper limit. - **Delegation Capacity**: The maximum amount of GRT an Indexer can accept from Delegators. Indexers can only accept up to 16x their Indexer Self-Stake, and additional delegation results in diluted rewards. For example, if an Indexer has a Self-Stake of 1M GRT, their delegation capacity is 16M. However, Indexers can increase their Delegation Capacity by increasing their Self-Stake. -- **Upgrade Indexer**: An Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. +- **Upgrade Indexer**: An Indexer designed to act as a fallback for Subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. -- **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing subgraphs. +- **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in Subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing Subgraphs. - **Delegation Tax**: A 0.5% fee paid by Delegators when they delegate GRT to Indexers. The GRT used to pay the fee is burned. -- **Curator**: Network participants that identify high-quality subgraphs, and signal GRT on them in exchange for curation shares. When Indexers claim query fees on a subgraph, 10% is distributed to the Curators of that subgraph. There is a positive correlation between the amount of GRT signaled and the number of Indexers indexing a subgraph. +- **Curator**: Network participants that identify high-quality Subgraphs, and signal GRT on them in exchange for curation shares. When Indexers claim query fees on a Subgraph, 10% is distributed to the Curators of that Subgraph. There is a positive correlation between the amount of GRT signaled and the number of Indexers indexing a Subgraph. -- **Curation Tax**: A 1% fee paid by Curators when they signal GRT on subgraphs. The GRT used to pay the fee is burned. +- **Curation Tax**: A 1% fee paid by Curators when they signal GRT on Subgraphs. The GRT used to pay the fee is burned. -- **Data Consumer**: Any application or user that queries a subgraph. +- **Data Consumer**: Any application or user that queries a Subgraph. -- **Subgraph Developer**: A developer who builds and deploys a subgraph to The Graph's decentralized network. +- **Subgraph Developer**: A developer who builds and deploys a Subgraph to The Graph's decentralized network. -- **Subgraph Manifest**: A YAML file that describes the subgraph's GraphQL schema, data sources, and other metadata. [Here](https://github.com/graphprotocol/example-subgraph/blob/master/subgraph.yaml) is an example. +- **Subgraph Manifest**: A YAML file that describes the Subgraph's GraphQL schema, data sources, and other metadata. [Here](https://github.com/graphprotocol/example-subgraph/blob/master/subgraph.yaml) is an example. - **Epoch**: A unit of time within the network. Currently, one epoch is 6,646 blocks or approximately 1 day. -- **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: +- **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards Subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: - 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. + 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular Subgraph. Active allocations accrue indexing rewards proportional to the signal on the Subgraph, and the amount of GRT allocated. - 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. + 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given Subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. -- **Subgraph Studio**: A powerful dapp for building, deploying, and publishing subgraphs. +- **Subgraph Studio**: A powerful dapp for building, deploying, and publishing Subgraphs. - **Fishermen**: A role within The Graph Network held by participants who monitor the accuracy and integrity of data served by Indexers. When a Fisherman identifies a query response or a POI they believe to be incorrect, they can initiate a dispute against the Indexer. If the dispute rules in favor of the Fisherman, the Indexer is slashed by losing 2.5% of their self-stake. Of this amount, 50% is awarded to the Fisherman as a bounty for their vigilance, and the remaining 50% is removed from circulation (burned). This mechanism is designed to encourage Fishermen to help maintain the reliability of the network by ensuring that Indexers are held accountable for the data they provide. @@ -56,28 +56,28 @@ title: शब्दकोष - **Slashing**: Indexers can have their self-staked GRT slashed for providing an incorrect POI or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self-stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. -- **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are distributed in GRT. +- **Indexing Rewards**: The rewards that Indexers receive for indexing Subgraphs. Indexing rewards are distributed in GRT. - **Delegation Rewards**: The rewards that Delegators receive for delegating GRT to Indexers. Delegation rewards are distributed in GRT. - **GRT**: The Graph's work utility token. GRT provides economic incentives to network participants for contributing to the network. -- **Proof of Indexing (POI)**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given subgraph, they must provide a valid and recent POI. Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. +- **Proof of Indexing (POI)**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given Subgraph, they must provide a valid and recent POI. Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. -- **Graph Node**: Graph Node is the component that indexes subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. +- **Graph Node**: Graph Node is the component that indexes Subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. -- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. +- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing Subgraph deployments to its Graph Node(s), and managing allocations. - **The Graph Client**: A library for building GraphQL-based dapps in a decentralized way. -- **Graph Explorer**: A dapp designed for network participants to explore subgraphs and interact with the protocol. +- **Graph Explorer**: A dapp designed for network participants to explore Subgraphs and interact with the protocol. - **Graph CLI**: A command line interface tool for building and deploying to The Graph. - **Cooldown Period**: The time remaining until an Indexer who changed their delegation parameters can do so again. -- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self-stake. +- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, Subgraphs, curation shares, and Indexer's self-stake. -- **Updating a subgraph**: The process of releasing a new subgraph version with updates to the subgraph's manifest, schema, or mappings. +- **Updating a Subgraph**: The process of releasing a new Subgraph version with updates to the Subgraph's manifest, schema, or mappings. -- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (e.g. when v0.0.1 is updated to v0.0.2). +- **Migrating**: The process of curation shares moving from an old version of a Subgraph to a new version of a Subgraph (e.g. when v0.0.1 is updated to v0.0.2). From 1c38dccadb95d833aa2858b8fa8a5b99d36074b3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:17:06 -0500 Subject: [PATCH 0493/1789] New translations glossary.mdx (Swahili) --- website/src/pages/sw/resources/glossary.mdx | 83 +++++++++++++++++++++ 1 file changed, 83 insertions(+) create mode 100644 website/src/pages/sw/resources/glossary.mdx diff --git a/website/src/pages/sw/resources/glossary.mdx b/website/src/pages/sw/resources/glossary.mdx new file mode 100644 index 000000000000..4c5ad55cd0d3 --- /dev/null +++ b/website/src/pages/sw/resources/glossary.mdx @@ -0,0 +1,83 @@ +--- +title: Glossary +--- + +- **The Graph**: A decentralized protocol for indexing and querying data. + +- **Query**: A request for data. In the case of The Graph, a query is a request for data from a Subgraph that will be answered by an Indexer. + +- **GraphQL**: A query language for APIs and a runtime for fulfilling those queries with your existing data. The Graph uses GraphQL to query Subgraphs. + +- **Endpoint**: A URL that can be used to query a Subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query Subgraphs on The Graph's decentralized network. + +- **Subgraph**: An open API that extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. Developers can build, deploy, and publish Subgraphs to The Graph Network. Once it is indexed, the Subgraph can be queried by anyone. + +- **Indexer**: Network participants that run indexing nodes to index data from blockchains and serve GraphQL queries. + +- **Indexer Revenue Streams**: Indexers are rewarded in GRT with two components: query fee rebates and indexing rewards. + + 1. **Query Fee Rebates**: Payments from Subgraph consumers for serving queries on the network. + + 2. **Indexing Rewards**: The rewards that Indexers receive for indexing Subgraphs. Indexing rewards are generated via new issuance of 3% GRT annually. + +- **Indexer's Self-Stake**: The amount of GRT that Indexers stake to participate in the decentralized network. The minimum is 100,000 GRT, and there is no upper limit. + +- **Delegation Capacity**: The maximum amount of GRT an Indexer can accept from Delegators. Indexers can only accept up to 16x their Indexer Self-Stake, and additional delegation results in diluted rewards. For example, if an Indexer has a Self-Stake of 1M GRT, their delegation capacity is 16M. However, Indexers can increase their Delegation Capacity by increasing their Self-Stake. + +- **Upgrade Indexer**: An Indexer designed to act as a fallback for Subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. + +- **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in Subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing Subgraphs. + +- **Delegation Tax**: A 0.5% fee paid by Delegators when they delegate GRT to Indexers. The GRT used to pay the fee is burned. + +- **Curator**: Network participants that identify high-quality Subgraphs, and signal GRT on them in exchange for curation shares. When Indexers claim query fees on a Subgraph, 10% is distributed to the Curators of that Subgraph. There is a positive correlation between the amount of GRT signaled and the number of Indexers indexing a Subgraph. + +- **Curation Tax**: A 1% fee paid by Curators when they signal GRT on Subgraphs. The GRT used to pay the fee is burned. + +- **Data Consumer**: Any application or user that queries a Subgraph. + +- **Subgraph Developer**: A developer who builds and deploys a Subgraph to The Graph's decentralized network. + +- **Subgraph Manifest**: A YAML file that describes the Subgraph's GraphQL schema, data sources, and other metadata. [Here](https://github.com/graphprotocol/example-subgraph/blob/master/subgraph.yaml) is an example. + +- **Epoch**: A unit of time within the network. Currently, one epoch is 6,646 blocks or approximately 1 day. + +- **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards Subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: + + 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular Subgraph. Active allocations accrue indexing rewards proportional to the signal on the Subgraph, and the amount of GRT allocated. + + 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given Subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. + +- **Subgraph Studio**: A powerful dapp for building, deploying, and publishing Subgraphs. + +- **Fishermen**: A role within The Graph Network held by participants who monitor the accuracy and integrity of data served by Indexers. When a Fisherman identifies a query response or a POI they believe to be incorrect, they can initiate a dispute against the Indexer. If the dispute rules in favor of the Fisherman, the Indexer is slashed by losing 2.5% of their self-stake. Of this amount, 50% is awarded to the Fisherman as a bounty for their vigilance, and the remaining 50% is removed from circulation (burned). This mechanism is designed to encourage Fishermen to help maintain the reliability of the network by ensuring that Indexers are held accountable for the data they provide. + +- **Arbitrators**: Arbitrators are network participants appointed through a governance process. The role of the Arbitrator is to decide the outcome of indexing and query disputes. Their goal is to maximize the utility and reliability of The Graph Network. + +- **Slashing**: Indexers can have their self-staked GRT slashed for providing an incorrect POI or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self-stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. + +- **Indexing Rewards**: The rewards that Indexers receive for indexing Subgraphs. Indexing rewards are distributed in GRT. + +- **Delegation Rewards**: The rewards that Delegators receive for delegating GRT to Indexers. Delegation rewards are distributed in GRT. + +- **GRT**: The Graph's work utility token. GRT provides economic incentives to network participants for contributing to the network. + +- **Proof of Indexing (POI)**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given Subgraph, they must provide a valid and recent POI. Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. + +- **Graph Node**: Graph Node is the component that indexes Subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. + +- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing Subgraph deployments to its Graph Node(s), and managing allocations. + +- **The Graph Client**: A library for building GraphQL-based dapps in a decentralized way. + +- **Graph Explorer**: A dapp designed for network participants to explore Subgraphs and interact with the protocol. + +- **Graph CLI**: A command line interface tool for building and deploying to The Graph. + +- **Cooldown Period**: The time remaining until an Indexer who changed their delegation parameters can do so again. + +- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, Subgraphs, curation shares, and Indexer's self-stake. + +- **Updating a Subgraph**: The process of releasing a new Subgraph version with updates to the Subgraph's manifest, schema, or mappings. + +- **Migrating**: The process of curation shares moving from an old version of a Subgraph to a new version of a Subgraph (e.g. when v0.0.1 is updated to v0.0.2). From 9b4dbd00355446f96f8add629f6e7779ff15be03 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:17:07 -0500 Subject: [PATCH 0494/1789] New translations curating.mdx (Romanian) --- .../src/pages/ro/resources/roles/curating.mdx | 58 +++++++++---------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/website/src/pages/ro/resources/roles/curating.mdx b/website/src/pages/ro/resources/roles/curating.mdx index 1cc05bb7b62f..a228ebfb3267 100644 --- a/website/src/pages/ro/resources/roles/curating.mdx +++ b/website/src/pages/ro/resources/roles/curating.mdx @@ -2,37 +2,37 @@ title: Curating --- -Curators are critical to The Graph's decentralized economy. They use their knowledge of the web3 ecosystem to assess and signal on the subgraphs that should be indexed by The Graph Network. Through Graph Explorer, Curators view network data to make signaling decisions. In turn, The Graph Network rewards Curators who signal on good quality subgraphs with a share of the query fees those subgraphs generate. The amount of GRT signaled is one of the key considerations for indexers when determining which subgraphs to index. +Curators are critical to The Graph's decentralized economy. They use their knowledge of the web3 ecosystem to assess and signal on the Subgraphs that should be indexed by The Graph Network. Through Graph Explorer, Curators view network data to make signaling decisions. In turn, The Graph Network rewards Curators who signal on good quality Subgraphs with a share of the query fees those Subgraphs generate. The amount of GRT signaled is one of the key considerations for indexers when determining which Subgraphs to index. ## What Does Signaling Mean for The Graph Network? -Before consumers can query a subgraph, it must be indexed. This is where curation comes into play. In order for Indexers to earn substantial query fees on quality subgraphs, they need to know what subgraphs to index. When Curators signal on a subgraph, it lets Indexers know that a subgraph is in demand and of sufficient quality that it should be indexed. +Before consumers can query a Subgraph, it must be indexed. This is where curation comes into play. In order for Indexers to earn substantial query fees on quality Subgraphs, they need to know what Subgraphs to index. When Curators signal on a Subgraph, it lets Indexers know that a Subgraph is in demand and of sufficient quality that it should be indexed. -Curators make The Graph network efficient and [signaling](#how-to-signal) is the process that Curators use to let Indexers know that a subgraph is good to index. Indexers can trust the signal from a Curator because upon signaling, Curators mint a curation share for the subgraph, entitling them to a portion of future query fees that the subgraph drives. +Curators make The Graph network efficient and [signaling](#how-to-signal) is the process that Curators use to let Indexers know that a Subgraph is good to index. Indexers can trust the signal from a Curator because upon signaling, Curators mint a curation share for the Subgraph, entitling them to a portion of future query fees that the Subgraph drives. -Curator signals are represented as ERC20 tokens called Graph Curation Shares (GCS). Those that want to earn more query fees should signal their GRT to subgraphs that they predict will generate a strong flow of fees to the network. Curators cannot be slashed for bad behavior, but there is a deposit tax on Curators to disincentivize poor decision-making that could harm the integrity of the network. Curators will also earn fewer query fees if they curate on a low-quality subgraph because there will be fewer queries to process or fewer Indexers to process them. +Curator signals are represented as ERC20 tokens called Graph Curation Shares (GCS). Those that want to earn more query fees should signal their GRT to Subgraphs that they predict will generate a strong flow of fees to the network. Curators cannot be slashed for bad behavior, but there is a deposit tax on Curators to disincentivize poor decision-making that could harm the integrity of the network. Curators will also earn fewer query fees if they curate on a low-quality Subgraph because there will be fewer queries to process or fewer Indexers to process them. -The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all subgraphs, signaling GRT on a particular subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. +The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all Subgraphs, signaling GRT on a particular Subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. -When signaling, Curators can decide to signal on a specific version of the subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. +When signaling, Curators can decide to signal on a specific version of the Subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. -If you require assistance with curation to enhance the quality of service, please send a request to the Edge & Node team at support@thegraph.zendesk.com and specify the subgraphs you need assistance with. +If you require assistance with curation to enhance the quality of service, please send a request to the Edge & Node team at support@thegraph.zendesk.com and specify the Subgraphs you need assistance with. -Indexers can find subgraphs to index based on curation signals they see in Graph Explorer (screenshot below). +Indexers can find Subgraphs to index based on curation signals they see in Graph Explorer (screenshot below). -![Explorer subgraphs](/img/explorer-subgraphs.png) +![Explorer Subgraphs](/img/explorer-subgraphs.png) ## How to Signal -Within the Curator tab in Graph Explorer, curators will be able to signal and unsignal on certain subgraphs based on network stats. For a step-by-step overview of how to do this in Graph Explorer, [click here.](/subgraphs/explorer/) +Within the Curator tab in Graph Explorer, curators will be able to signal and unsignal on certain Subgraphs based on network stats. For a step-by-step overview of how to do this in Graph Explorer, [click here.](/subgraphs/explorer/) -A curator can choose to signal on a specific subgraph version, or they can choose to have their signal automatically migrate to the newest production build of that subgraph. Both are valid strategies and come with their own pros and cons. +A curator can choose to signal on a specific Subgraph version, or they can choose to have their signal automatically migrate to the newest production build of that Subgraph. Both are valid strategies and come with their own pros and cons. -Signaling on a specific version is especially useful when one subgraph is used by multiple dapps. One dapp might need to regularly update the subgraph with new features. Another dapp might prefer to use an older, well-tested subgraph version. Upon initial curation, a 1% standard tax is incurred. +Signaling on a specific version is especially useful when one Subgraph is used by multiple dapps. One dapp might need to regularly update the Subgraph with new features. Another dapp might prefer to use an older, well-tested Subgraph version. Upon initial curation, a 1% standard tax is incurred. Having your signal automatically migrate to the newest production build can be valuable to ensure you keep accruing query fees. Every time you curate, a 1% curation tax is incurred. You will also pay a 0.5% curation tax on every migration. Subgraph developers are discouraged from frequently publishing new versions - they have to pay a 0.5% curation tax on all auto-migrated curation shares. -> **Note**: The first address to signal a particular subgraph is considered the first curator and will have to do much more gas-intensive work than the rest of the following curators because the first curator initializes the curation share tokens, and also transfers tokens into The Graph proxy. +> **Note**: The first address to signal a particular Subgraph is considered the first curator and will have to do much more gas-intensive work than the rest of the following curators because the first curator initializes the curation share tokens, and also transfers tokens into The Graph proxy. ## Withdrawing your GRT @@ -40,39 +40,39 @@ Curators have the option to withdraw their signaled GRT at any time. Unlike the process of delegating, if you decide to withdraw your signaled GRT you will not have to wait for a cooldown period and will receive the entire amount (minus the 1% curation tax). -Once a curator withdraws their signal, indexers may choose to keep indexing the subgraph, even if there's currently no active GRT signaled. +Once a curator withdraws their signal, indexers may choose to keep indexing the Subgraph, even if there's currently no active GRT signaled. -However, it is recommended that curators leave their signaled GRT in place not only to receive a portion of the query fees, but also to ensure reliability and uptime of the subgraph. +However, it is recommended that curators leave their signaled GRT in place not only to receive a portion of the query fees, but also to ensure reliability and uptime of the Subgraph. ## Risks 1. The query market is inherently young at The Graph and there is risk that your %APY may be lower than you expect due to nascent market dynamics. -2. Curation Fee - when a curator signals GRT on a subgraph, they incur a 1% curation tax. This fee is burned. -3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). -4. A subgraph can fail due to a bug. A failed subgraph does not accrue query fees. As a result, you’ll have to wait until the developer fixes the bug and deploys a new version. - - If you are subscribed to the newest version of a subgraph, your shares will auto-migrate to that new version. This will incur a 0.5% curation tax. - - If you have signaled on a specific subgraph version and it fails, you will have to manually burn your curation shares. You can then signal on the new subgraph version, thus incurring a 1% curation tax. +2. Curation Fee - when a curator signals GRT on a Subgraph, they incur a 1% curation tax. This fee is burned. +3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their Subgraph or if a Subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). +4. A Subgraph can fail due to a bug. A failed Subgraph does not accrue query fees. As a result, you’ll have to wait until the developer fixes the bug and deploys a new version. + - If you are subscribed to the newest version of a Subgraph, your shares will auto-migrate to that new version. This will incur a 0.5% curation tax. + - If you have signaled on a specific Subgraph version and it fails, you will have to manually burn your curation shares. You can then signal on the new Subgraph version, thus incurring a 1% curation tax. ## Curation FAQs ### 1. What % of query fees do Curators earn? -By signalling on a subgraph, you will earn a share of all the query fees that the subgraph generates. 10% of all query fees go to the Curators pro-rata to their curation shares. This 10% is subject to governance. +By signalling on a Subgraph, you will earn a share of all the query fees that the Subgraph generates. 10% of all query fees go to the Curators pro-rata to their curation shares. This 10% is subject to governance. -### 2. How do I decide which subgraphs are high quality to signal on? +### 2. How do I decide which Subgraphs are high quality to signal on? -Finding high-quality subgraphs is a complex task, but it can be approached in many different ways. As a Curator, you want to look for trustworthy subgraphs that are driving query volume. A trustworthy subgraph may be valuable if it is complete, accurate, and supports a dapp’s data needs. A poorly architected subgraph might need to be revised or re-published, and can also end up failing. It is critical for Curators to review a subgraph’s architecture or code in order to assess if a subgraph is valuable. As a result: +Finding high-quality Subgraphs is a complex task, but it can be approached in many different ways. As a Curator, you want to look for trustworthy Subgraphs that are driving query volume. A trustworthy Subgraph may be valuable if it is complete, accurate, and supports a dapp’s data needs. A poorly architected Subgraph might need to be revised or re-published, and can also end up failing. It is critical for Curators to review a Subgraph’s architecture or code in order to assess if a Subgraph is valuable. As a result: -- Curators can use their understanding of a network to try and predict how an individual subgraph may generate a higher or lower query volume in the future -- Curators should also understand the metrics that are available through Graph Explorer. Metrics like past query volume and who the subgraph developer is can help determine whether or not a subgraph is worth signalling on. +- Curators can use their understanding of a network to try and predict how an individual Subgraph may generate a higher or lower query volume in the future +- Curators should also understand the metrics that are available through Graph Explorer. Metrics like past query volume and who the Subgraph developer is can help determine whether or not a Subgraph is worth signalling on. -### 3. What’s the cost of updating a subgraph? +### 3. What’s the cost of updating a Subgraph? -Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading subgraphs is an onchain action that costs gas. +Migrating your curation shares to a new Subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a Subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading Subgraphs is an onchain action that costs gas. -### 4. How often can I update my subgraph? +### 4. How often can I update my Subgraph? -It’s suggested that you don’t update your subgraphs too frequently. See the question above for more details. +It’s suggested that you don’t update your Subgraphs too frequently. See the question above for more details. ### 5. Can I sell my curation shares? From 5e52a3c09410133a73d86039b3bdfa32b630a757 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:17:08 -0500 Subject: [PATCH 0495/1789] New translations curating.mdx (French) --- .../src/pages/fr/resources/roles/curating.mdx | 58 +++++++++---------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/website/src/pages/fr/resources/roles/curating.mdx b/website/src/pages/fr/resources/roles/curating.mdx index 909aa9f0e848..931afdc98101 100644 --- a/website/src/pages/fr/resources/roles/curating.mdx +++ b/website/src/pages/fr/resources/roles/curating.mdx @@ -2,37 +2,37 @@ title: Curation --- -Les Curateurs jouent un rôle essentiel dans l'économie décentralisée de The Graph. Ils utilisent leur connaissance de l'écosystème web3 pour évaluer et signaler les subgraphs qui devraient être indexés par The Graph Network. à travers Graph Explorer, les Curateurs consultent les données du réseau pour prendre des décisions de signalisation. En retour, The Graph Network récompense les Curateurs qui signalent des subgraphs de bonne qualité en leur reversant une partie des frais de recherche générés par ces subgraphs. La quantité de GRT signalée est l'une des principales considérations des Indexeurs lorsqu'ils déterminent les subgraphs à indexer. +Curators are critical to The Graph's decentralized economy. They use their knowledge of the web3 ecosystem to assess and signal on the Subgraphs that should be indexed by The Graph Network. Through Graph Explorer, Curators view network data to make signaling decisions. In turn, The Graph Network rewards Curators who signal on good quality Subgraphs with a share of the query fees those Subgraphs generate. The amount of GRT signaled is one of the key considerations for indexers when determining which Subgraphs to index. ## Que signifie "le signalement" pour The Graph Network? -Avant que les consommateurs ne puissent interroger un subgraphs, celui-ci doit être indexé. C'est ici que la curation entre en jeu. Afin que les Indexeurs puissent gagner des frais de requête substantiels sur des subgraphs de qualité, ils doivent savoir quels subgraphs indexer. Lorsque les Curateurs signalent un subgraphs , ils indiquent aux Indexeurs qu'un subgraphs est demandé et de qualité suffisante pour être indexé. +Before consumers can query a Subgraph, it must be indexed. This is where curation comes into play. In order for Indexers to earn substantial query fees on quality Subgraphs, they need to know what Subgraphs to index. When Curators signal on a Subgraph, it lets Indexers know that a Subgraph is in demand and of sufficient quality that it should be indexed. -Les Curateurs rendent le réseau The Graph efficace et le [signalement](#how-to-signal) est le processus que les Curateurs utilisent pour informer les Indexeurs qu'un subgraph est bon à indexer. Les Indexeurs peuvent se fier au signal d’un Curateur car, en signalant, les Curateurs mintent une part de curation (curation share) pour le subgraph, leur donnant droit à une partie des futurs frais de requête générés par ce subgraph. +Curators make The Graph network efficient and [signaling](#how-to-signal) is the process that Curators use to let Indexers know that a Subgraph is good to index. Indexers can trust the signal from a Curator because upon signaling, Curators mint a curation share for the Subgraph, entitling them to a portion of future query fees that the Subgraph drives. -Les signaux des Curateurs sont représentés par des jetons ERC20 appelés Graph Curation Shares (GCS). Ceux qui veulent gagner plus de frais de requête doivent signaler leurs GRT aux subgraphs qui, selon eux, généreront un flux important de frais pour le réseau. Les Curateurs ne peuvent pas être réduits pour mauvais comportement, mais il y a une taxe de dépôt sur les Curateurs pour dissuader les mauvaises décisions pouvant nuire à l'intégrité du réseau. Les Curateurs gagneront également moins de frais de requête s'ils sélectionnent un subgraph de mauvaise qualité car il y aura moins de requêtes à traiter ou moins d'Indexeurs pour les traiter. +Curator signals are represented as ERC20 tokens called Graph Curation Shares (GCS). Those that want to earn more query fees should signal their GRT to Subgraphs that they predict will generate a strong flow of fees to the network. Curators cannot be slashed for bad behavior, but there is a deposit tax on Curators to disincentivize poor decision-making that could harm the integrity of the network. Curators will also earn fewer query fees if they curate on a low-quality Subgraph because there will be fewer queries to process or fewer Indexers to process them. -L’[Indexer Sunrise Upgrade](/archived/sunrise/#what-is-the-upgrade-indexer) assure l'indexation de tous les subgraphs, toutefois, signaler des GRT sur un subgraph spécifique attirera davantage d’Indexeurs vers ce dernier. Cette incitation supplémentaire a pour but d’améliorer la qualité de service pour les requêtes en réduisant la latence et en améliorant la disponibilité du réseau. +The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all Subgraphs, signaling GRT on a particular Subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. -Lors du signalement, les Curateurs peuvent décider de signaler une version spécifique du subgraph ou de signaler en utilisant l'auto-migration. S'ils signalent en utilisant l'auto-migration, les parts d'un Curateur seront toujours mises à jour vers la dernière version publiée par le développeur. S'ils décident de signaler une version spécifique, les parts resteront toujours sur cette version spécifique. +When signaling, Curators can decide to signal on a specific version of the Subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. -Si vous avez besoin d’aide pour la curation afin d’améliorer la qualité de service, envoyez une demande à l’équipe Edge & Node à l’adresse support@thegraph.zendesk.com en précisant les subgraphs pour lesquels vous avez besoin d’assistance. +If you require assistance with curation to enhance the quality of service, please send a request to the Edge & Node team at support@thegraph.zendesk.com and specify the Subgraphs you need assistance with. -Les Indexeurs peuvent trouver des subgraphs à indexer en fonction des signaux de curation qu'ils voient dans Graph Explorer (capture d'écran ci-dessous). +Indexers can find Subgraphs to index based on curation signals they see in Graph Explorer (screenshot below). -![Subgraphs de l'Explorer](/img/explorer-subgraphs.png) +![Explorer Subgraphs](/img/explorer-subgraphs.png) ## Comment signaler -Dans l'onglet Curateur de Graph Explorer, les curateurs pourront signaler et retirer leur signal sur certains subgraphs en fonction des statistiques du réseau. Pour un guide pas à pas expliquant comment procéder dans Graph Explorer, [cliquez ici.](/subgraphs/explorer/) +Within the Curator tab in Graph Explorer, curators will be able to signal and unsignal on certain Subgraphs based on network stats. For a step-by-step overview of how to do this in Graph Explorer, [click here.](/subgraphs/explorer/) -Un curateur peut choisir de signaler une version spécifique d'un sugraph ou de faire migrer automatiquement son signal vers la version de production la plus récente de ce subgraph. Ces deux stratégies sont valables et comportent leurs propres avantages et inconvénients. +A curator can choose to signal on a specific Subgraph version, or they can choose to have their signal automatically migrate to the newest production build of that Subgraph. Both are valid strategies and come with their own pros and cons. -Le signalement sur une version spécifique est particulièrement utile lorsqu'un subgraph est utilisé par plusieurs dapps. Une dapp pourrait avoir besoin de mettre à jour régulièrement le subgraph avec de nouvelles fonctionnalités, tandis qu’une autre dapp pourrait préférer utiliser une version plus ancienne et bien testée du subgraph. Lors de la curation initiale, une taxe standard de 1 % est prélevée. +Signaling on a specific version is especially useful when one Subgraph is used by multiple dapps. One dapp might need to regularly update the Subgraph with new features. Another dapp might prefer to use an older, well-tested Subgraph version. Upon initial curation, a 1% standard tax is incurred. La migration automatique de votre signal vers la version de production la plus récente peut s'avérer utile pour vous assurer que vous continuez à accumuler des frais de requête. Chaque fois que vous effectuez une curation, une taxe de curation de 1 % est appliquée. Vous paierez également une taxe de curation de 0,5 % à chaque migration. Les développeurs de subgraphs sont découragés de publier fréquemment de nouvelles versions - ils doivent payer une taxe de curation de 0,5 % sur toutes les parts de curation migrées automatiquement. -> **Remarque**: La première adresse à signaler un subgraph donné est considérée comme le premier curateur et devra effectuer un travail bien plus coûteux en gas que les curateurs suivants, car le premier curateur doit initialiser les tokens de part de curation et transférer les tokens dans le proxy de The Graph. +> **Note**: The first address to signal a particular Subgraph is considered the first curator and will have to do much more gas-intensive work than the rest of the following curators because the first curator initializes the curation share tokens, and also transfers tokens into The Graph proxy. ## Retrait de vos GRT @@ -40,39 +40,39 @@ Les Curateurs ont la possibilité de retirer leur GRT signalé à tout moment. Contrairement au processus de délégation, si vous décidez de retirer vos GRT signalés, vous n'aurez pas un délai d'attente et vous recevrez le montant total (moins la taxe de curation de 1%). -Une fois qu'un Curateur retire ses signaux, les Indexeurs peuvent choisir de continuer à indexer le subgraph, même s'il n'y a actuellement aucun GRT signalé actif. +Once a curator withdraws their signal, indexers may choose to keep indexing the Subgraph, even if there's currently no active GRT signaled. -Cependant, il est recommandé que les Curateurs laissent leur GRT signalé en place non seulement pour recevoir une partie des frais de requête, mais aussi pour assurer la fiabilité et la disponibilité du subgraph. +However, it is recommended that curators leave their signaled GRT in place not only to receive a portion of the query fees, but also to ensure reliability and uptime of the Subgraph. ## Risques 1. Le marché des requêtes est intrinsèquement jeune chez The Graph et il y a un risque que votre %APY soit inférieur à vos attentes en raison de la dynamique naissante du marché. -2. Frais de curation - lorsqu'un Curateur signale des GRT sur un subgraph, il doit s'acquitter d'une taxe de curation de 1%. Cette taxe est brûlée. -3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). -4. Un subgraph peut échouer à cause d'un bug. Un subgraph qui échoue n'accumule pas de frais de requête. Par conséquent, vous devrez attendre que le développeur corrige le bogue et déploie une nouvelle version. - - Si vous êtes abonné à la version la plus récente d'un subgraph, vos parts migreront automatiquement vers cette nouvelle version. Cela entraînera une taxe de curation de 0,5 %. - - Si vous avez signalé sur une version spécifique d'un subgraph et qu'elle échoue, vous devrez brûler manuellement vos parts de curation. Vous pouvez alors signaler sur la nouvelle version du subgraph, encourant ainsi une taxe de curation de 1%. +2. Curation Fee - when a curator signals GRT on a Subgraph, they incur a 1% curation tax. This fee is burned. +3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their Subgraph or if a Subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). +4. A Subgraph can fail due to a bug. A failed Subgraph does not accrue query fees. As a result, you’ll have to wait until the developer fixes the bug and deploys a new version. + - If you are subscribed to the newest version of a Subgraph, your shares will auto-migrate to that new version. This will incur a 0.5% curation tax. + - If you have signaled on a specific Subgraph version and it fails, you will have to manually burn your curation shares. You can then signal on the new Subgraph version, thus incurring a 1% curation tax. ## FAQs sur la Curation ### 1. Quel pourcentage des frais de requête les Curateurs perçoivent-ils? -En signalant sur un subgraph, vous gagnerez une part de tous les frais de requête générés par le subgraph. 10% de tous les frais de requête vont aux Curateurs au prorata de leurs parts de curation. Ces 10% sont soumis à la gouvernance. +By signalling on a Subgraph, you will earn a share of all the query fees that the Subgraph generates. 10% of all query fees go to the Curators pro-rata to their curation shares. This 10% is subject to governance. -### 2. Comment décider quels sont les subgraphs de haute qualité sur lesquels on peut émettre un signal ? +### 2. How do I decide which Subgraphs are high quality to signal on? -Identifier des subgraphs de haute qualité est une tâche complexe, mais il existe de multiples approches.. En tant que Curateur, vous souhaitez trouver des subgraphs fiables qui génèrent un volume de requêtes élevé. Un subgraph fiable peut être précieux s’il est complet, précis et s’il répond aux besoins en données d’une dapp. Un subgraph mal conçu pourrait avoir besoin d'être révisé ou republié, et peut aussi finir par échouer. Il est crucial pour les Curateurs d'examiner l'architecture ou le code d'un subgraph afin d'évaluer sa valeur. Ainsi : +Finding high-quality Subgraphs is a complex task, but it can be approached in many different ways. As a Curator, you want to look for trustworthy Subgraphs that are driving query volume. A trustworthy Subgraph may be valuable if it is complete, accurate, and supports a dapp’s data needs. A poorly architected Subgraph might need to be revised or re-published, and can also end up failing. It is critical for Curators to review a Subgraph’s architecture or code in order to assess if a Subgraph is valuable. As a result: -- Les Curateurs peuvent utiliser leur compréhension d'un réseau pour essayer de prédire comment un subgraph individuel peut générer un volume de requêtes plus élevé ou plus faible à l'avenir -- Les Curateurs doivent également comprendre les métriques disponibles via Graph Explorer. Des métriques telles que le volume de requêtes passées et l'identité du développeur du subgraph peuvent aider à déterminer si un subgraph mérite ou non d'être signalé. +- Curators can use their understanding of a network to try and predict how an individual Subgraph may generate a higher or lower query volume in the future +- Curators should also understand the metrics that are available through Graph Explorer. Metrics like past query volume and who the Subgraph developer is can help determine whether or not a Subgraph is worth signalling on. -### 3. Quel est le coût de la mise à jour d'un subgraph ? +### 3. What’s the cost of updating a Subgraph? -La migration de vos parts de curation (curation shares) vers une nouvelle version de subgraph entraîne une taxe de curation de 1 %. Les Curateurs peuvent choisir de s'abonner à la dernière version d'un subgraph. Lorsque les parts de Curateurs sont automatiquement migrées vers une nouvelle version, les Curateurs paieront également une demi-taxe de curation, soit 0,5 %, car la mise à niveau (upgrade) des subgraphs est une action onchain qui coûte du gas. +Migrating your curation shares to a new Subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a Subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading Subgraphs is an onchain action that costs gas. -### 4. À quelle fréquence puis-je mettre à jour mon subgraph ? +### 4. How often can I update my Subgraph? -Il est conseillé de ne pas mettre à jour vos subgraphs trop fréquemment. Voir la question ci-dessus pour plus de détails. +It’s suggested that you don’t update your Subgraphs too frequently. See the question above for more details. ### 5. Puis-je vendre mes parts de curateurs ? From 89ce568b486f4c1e55bf20414cf8ad34366bcab4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:17:09 -0500 Subject: [PATCH 0496/1789] New translations curating.mdx (Spanish) --- .../src/pages/es/resources/roles/curating.mdx | 58 +++++++++---------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/website/src/pages/es/resources/roles/curating.mdx b/website/src/pages/es/resources/roles/curating.mdx index da189f62bf69..a3ec7ae0ce5e 100644 --- a/website/src/pages/es/resources/roles/curating.mdx +++ b/website/src/pages/es/resources/roles/curating.mdx @@ -2,37 +2,37 @@ title: Curación --- -Curators are critical to The Graph's decentralized economy. They use their knowledge of the web3 ecosystem to assess and signal on the subgraphs that should be indexed by The Graph Network. Through Graph Explorer, Curators view network data to make signaling decisions. In turn, The Graph Network rewards Curators who signal on good quality subgraphs with a share of the query fees those subgraphs generate. The amount of GRT signaled is one of the key considerations for indexers when determining which subgraphs to index. +Curators are critical to The Graph's decentralized economy. They use their knowledge of the web3 ecosystem to assess and signal on the Subgraphs that should be indexed by The Graph Network. Through Graph Explorer, Curators view network data to make signaling decisions. In turn, The Graph Network rewards Curators who signal on good quality Subgraphs with a share of the query fees those Subgraphs generate. The amount of GRT signaled is one of the key considerations for indexers when determining which Subgraphs to index. ## What Does Signaling Mean for The Graph Network? -Before consumers can query a subgraph, it must be indexed. This is where curation comes into play. In order for Indexers to earn substantial query fees on quality subgraphs, they need to know what subgraphs to index. When Curators signal on a subgraph, it lets Indexers know that a subgraph is in demand and of sufficient quality that it should be indexed. +Before consumers can query a Subgraph, it must be indexed. This is where curation comes into play. In order for Indexers to earn substantial query fees on quality Subgraphs, they need to know what Subgraphs to index. When Curators signal on a Subgraph, it lets Indexers know that a Subgraph is in demand and of sufficient quality that it should be indexed. -Curators make The Graph network efficient and [signaling](#how-to-signal) is the process that Curators use to let Indexers know that a subgraph is good to index. Indexers can trust the signal from a Curator because upon signaling, Curators mint a curation share for the subgraph, entitling them to a portion of future query fees that the subgraph drives. +Curators make The Graph network efficient and [signaling](#how-to-signal) is the process that Curators use to let Indexers know that a Subgraph is good to index. Indexers can trust the signal from a Curator because upon signaling, Curators mint a curation share for the Subgraph, entitling them to a portion of future query fees that the Subgraph drives. -Curator signals are represented as ERC20 tokens called Graph Curation Shares (GCS). Those that want to earn more query fees should signal their GRT to subgraphs that they predict will generate a strong flow of fees to the network. Curators cannot be slashed for bad behavior, but there is a deposit tax on Curators to disincentivize poor decision-making that could harm the integrity of the network. Curators will also earn fewer query fees if they curate on a low-quality subgraph because there will be fewer queries to process or fewer Indexers to process them. +Curator signals are represented as ERC20 tokens called Graph Curation Shares (GCS). Those that want to earn more query fees should signal their GRT to Subgraphs that they predict will generate a strong flow of fees to the network. Curators cannot be slashed for bad behavior, but there is a deposit tax on Curators to disincentivize poor decision-making that could harm the integrity of the network. Curators will also earn fewer query fees if they curate on a low-quality Subgraph because there will be fewer queries to process or fewer Indexers to process them. -The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all subgraphs, signaling GRT on a particular subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. +The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all Subgraphs, signaling GRT on a particular Subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. -When signaling, Curators can decide to signal on a specific version of the subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. +When signaling, Curators can decide to signal on a specific version of the Subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. -If you require assistance with curation to enhance the quality of service, please send a request to the Edge & Node team at support@thegraph.zendesk.com and specify the subgraphs you need assistance with. +If you require assistance with curation to enhance the quality of service, please send a request to the Edge & Node team at support@thegraph.zendesk.com and specify the Subgraphs you need assistance with. -Indexers can find subgraphs to index based on curation signals they see in Graph Explorer (screenshot below). +Indexers can find Subgraphs to index based on curation signals they see in Graph Explorer (screenshot below). -![Explorer subgraphs](/img/explorer-subgraphs.png) +![Explorer Subgraphs](/img/explorer-subgraphs.png) ## Cómo señalar -Within the Curator tab in Graph Explorer, curators will be able to signal and unsignal on certain subgraphs based on network stats. For a step-by-step overview of how to do this in Graph Explorer, [click here.](/subgraphs/explorer/) +Within the Curator tab in Graph Explorer, curators will be able to signal and unsignal on certain Subgraphs based on network stats. For a step-by-step overview of how to do this in Graph Explorer, [click here.](/subgraphs/explorer/) -Un curador puede optar por señalar una versión especifica de un subgrafo, o puede optar por que su señal migre automáticamente a la versión de producción mas reciente de ese subgrafo. Ambas son estrategias válidas y tienen sus pros y sus contras. +A curator can choose to signal on a specific Subgraph version, or they can choose to have their signal automatically migrate to the newest production build of that Subgraph. Both are valid strategies and come with their own pros and cons. -Signaling on a specific version is especially useful when one subgraph is used by multiple dapps. One dapp might need to regularly update the subgraph with new features. Another dapp might prefer to use an older, well-tested subgraph version. Upon initial curation, a 1% standard tax is incurred. +Signaling on a specific version is especially useful when one Subgraph is used by multiple dapps. One dapp might need to regularly update the Subgraph with new features. Another dapp might prefer to use an older, well-tested Subgraph version. Upon initial curation, a 1% standard tax is incurred. Hacer que tu señal migre automáticamente a la más reciente compilación de producción puede ser valioso para asegurarse de seguir acumulando tarifas de consulta. Cada vez que curas, se incurre en un impuesto de curación del 1%. También pagarás un impuesto de curación del 0,5% en cada migración. Se desaconseja a los desarrolladores de Subgrafos que publiquen con frecuencia nuevas versiones - tienen que pagar un impuesto de curación del 0,5% en todas las acciones de curación auto-migradas. -> **Note**: The first address to signal a particular subgraph is considered the first curator and will have to do much more gas-intensive work than the rest of the following curators because the first curator initializes the curation share tokens, and also transfers tokens into The Graph proxy. +> **Note**: The first address to signal a particular Subgraph is considered the first curator and will have to do much more gas-intensive work than the rest of the following curators because the first curator initializes the curation share tokens, and also transfers tokens into The Graph proxy. ## Withdrawing your GRT @@ -40,39 +40,39 @@ Curators have the option to withdraw their signaled GRT at any time. Unlike the process of delegating, if you decide to withdraw your signaled GRT you will not have to wait for a cooldown period and will receive the entire amount (minus the 1% curation tax). -Once a curator withdraws their signal, indexers may choose to keep indexing the subgraph, even if there's currently no active GRT signaled. +Once a curator withdraws their signal, indexers may choose to keep indexing the Subgraph, even if there's currently no active GRT signaled. -However, it is recommended that curators leave their signaled GRT in place not only to receive a portion of the query fees, but also to ensure reliability and uptime of the subgraph. +However, it is recommended that curators leave their signaled GRT in place not only to receive a portion of the query fees, but also to ensure reliability and uptime of the Subgraph. ## Riesgos 1. El mercado de consultas es inherentemente joven en The Graph y existe el riesgo de que su APY (Rentabilidad anualizada) sea más bajo de lo esperado debido a la dinámica del mercado que recién está empezando. -2. Curation Fee - when a curator signals GRT on a subgraph, they incur a 1% curation tax. This fee is burned. -3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). -4. Un subgrafo puede fallar debido a un error. Un subgrafo fallido no acumula tarifas de consulta. Como resultado, tendrás que esperar hasta que el desarrollador corrija el error e implemente una nueva versión. - - Si estás suscrito a la versión más reciente de un subgrafo, tus acciones se migrarán automáticamente a esa nueva versión. Esto incurrirá un impuesto de curación del 0.5%. - - If you have signaled on a specific subgraph version and it fails, you will have to manually burn your curation shares. You can then signal on the new subgraph version, thus incurring a 1% curation tax. +2. Curation Fee - when a curator signals GRT on a Subgraph, they incur a 1% curation tax. This fee is burned. +3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their Subgraph or if a Subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). +4. A Subgraph can fail due to a bug. A failed Subgraph does not accrue query fees. As a result, you’ll have to wait until the developer fixes the bug and deploys a new version. + - If you are subscribed to the newest version of a Subgraph, your shares will auto-migrate to that new version. This will incur a 0.5% curation tax. + - If you have signaled on a specific Subgraph version and it fails, you will have to manually burn your curation shares. You can then signal on the new Subgraph version, thus incurring a 1% curation tax. ## Preguntas frecuentes sobre Curación ### 1. ¿Qué porcentaje de las tasas de consulta ganan los curadores? -By signalling on a subgraph, you will earn a share of all the query fees that the subgraph generates. 10% of all query fees go to the Curators pro-rata to their curation shares. This 10% is subject to governance. +By signalling on a Subgraph, you will earn a share of all the query fees that the Subgraph generates. 10% of all query fees go to the Curators pro-rata to their curation shares. This 10% is subject to governance. -### 2. ¿Cómo decido qué subgrafos son de alta calidad para señalar? +### 2. How do I decide which Subgraphs are high quality to signal on? -Finding high-quality subgraphs is a complex task, but it can be approached in many different ways. As a Curator, you want to look for trustworthy subgraphs that are driving query volume. A trustworthy subgraph may be valuable if it is complete, accurate, and supports a dapp’s data needs. A poorly architected subgraph might need to be revised or re-published, and can also end up failing. It is critical for Curators to review a subgraph’s architecture or code in order to assess if a subgraph is valuable. As a result: +Finding high-quality Subgraphs is a complex task, but it can be approached in many different ways. As a Curator, you want to look for trustworthy Subgraphs that are driving query volume. A trustworthy Subgraph may be valuable if it is complete, accurate, and supports a dapp’s data needs. A poorly architected Subgraph might need to be revised or re-published, and can also end up failing. It is critical for Curators to review a Subgraph’s architecture or code in order to assess if a Subgraph is valuable. As a result: -- Curators can use their understanding of a network to try and predict how an individual subgraph may generate a higher or lower query volume in the future -- Curators should also understand the metrics that are available through Graph Explorer. Metrics like past query volume and who the subgraph developer is can help determine whether or not a subgraph is worth signalling on. +- Curators can use their understanding of a network to try and predict how an individual Subgraph may generate a higher or lower query volume in the future +- Curators should also understand the metrics that are available through Graph Explorer. Metrics like past query volume and who the Subgraph developer is can help determine whether or not a Subgraph is worth signalling on. -### 3. What’s the cost of updating a subgraph? +### 3. What’s the cost of updating a Subgraph? -Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading subgraphs is an onchain action that costs gas. +Migrating your curation shares to a new Subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a Subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading Subgraphs is an onchain action that costs gas. -### 4. How often can I update my subgraph? +### 4. How often can I update my Subgraph? -It’s suggested that you don’t update your subgraphs too frequently. See the question above for more details. +It’s suggested that you don’t update your Subgraphs too frequently. See the question above for more details. ### 5. ¿Puedo vender mis acciones de curación? From d73476768cde68c395063848c5c89c5a774a389a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:17:10 -0500 Subject: [PATCH 0497/1789] New translations curating.mdx (Arabic) --- .../src/pages/ar/resources/roles/curating.mdx | 58 +++++++++---------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/website/src/pages/ar/resources/roles/curating.mdx b/website/src/pages/ar/resources/roles/curating.mdx index d2f355055aac..e73785e92590 100644 --- a/website/src/pages/ar/resources/roles/curating.mdx +++ b/website/src/pages/ar/resources/roles/curating.mdx @@ -2,37 +2,37 @@ title: Curating --- -Curators are critical to The Graph's decentralized economy. They use their knowledge of the web3 ecosystem to assess and signal on the subgraphs that should be indexed by The Graph Network. Through Graph Explorer, Curators view network data to make signaling decisions. In turn, The Graph Network rewards Curators who signal on good quality subgraphs with a share of the query fees those subgraphs generate. The amount of GRT signaled is one of the key considerations for indexers when determining which subgraphs to index. +Curators are critical to The Graph's decentralized economy. They use their knowledge of the web3 ecosystem to assess and signal on the Subgraphs that should be indexed by The Graph Network. Through Graph Explorer, Curators view network data to make signaling decisions. In turn, The Graph Network rewards Curators who signal on good quality Subgraphs with a share of the query fees those Subgraphs generate. The amount of GRT signaled is one of the key considerations for indexers when determining which Subgraphs to index. ## What Does Signaling Mean for The Graph Network? -Before consumers can query a subgraph, it must be indexed. This is where curation comes into play. In order for Indexers to earn substantial query fees on quality subgraphs, they need to know what subgraphs to index. When Curators signal on a subgraph, it lets Indexers know that a subgraph is in demand and of sufficient quality that it should be indexed. +Before consumers can query a Subgraph, it must be indexed. This is where curation comes into play. In order for Indexers to earn substantial query fees on quality Subgraphs, they need to know what Subgraphs to index. When Curators signal on a Subgraph, it lets Indexers know that a Subgraph is in demand and of sufficient quality that it should be indexed. -Curators make The Graph network efficient and [signaling](#how-to-signal) is the process that Curators use to let Indexers know that a subgraph is good to index. Indexers can trust the signal from a Curator because upon signaling, Curators mint a curation share for the subgraph, entitling them to a portion of future query fees that the subgraph drives. +Curators make The Graph network efficient and [signaling](#how-to-signal) is the process that Curators use to let Indexers know that a Subgraph is good to index. Indexers can trust the signal from a Curator because upon signaling, Curators mint a curation share for the Subgraph, entitling them to a portion of future query fees that the Subgraph drives. -Curator signals are represented as ERC20 tokens called Graph Curation Shares (GCS). Those that want to earn more query fees should signal their GRT to subgraphs that they predict will generate a strong flow of fees to the network. Curators cannot be slashed for bad behavior, but there is a deposit tax on Curators to disincentivize poor decision-making that could harm the integrity of the network. Curators will also earn fewer query fees if they curate on a low-quality subgraph because there will be fewer queries to process or fewer Indexers to process them. +Curator signals are represented as ERC20 tokens called Graph Curation Shares (GCS). Those that want to earn more query fees should signal their GRT to Subgraphs that they predict will generate a strong flow of fees to the network. Curators cannot be slashed for bad behavior, but there is a deposit tax on Curators to disincentivize poor decision-making that could harm the integrity of the network. Curators will also earn fewer query fees if they curate on a low-quality Subgraph because there will be fewer queries to process or fewer Indexers to process them. -The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all subgraphs, signaling GRT on a particular subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. +The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all Subgraphs, signaling GRT on a particular Subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. -When signaling, Curators can decide to signal on a specific version of the subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. +When signaling, Curators can decide to signal on a specific version of the Subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. -If you require assistance with curation to enhance the quality of service, please send a request to the Edge & Node team at support@thegraph.zendesk.com and specify the subgraphs you need assistance with. +If you require assistance with curation to enhance the quality of service, please send a request to the Edge & Node team at support@thegraph.zendesk.com and specify the Subgraphs you need assistance with. -Indexers can find subgraphs to index based on curation signals they see in Graph Explorer (screenshot below). +Indexers can find Subgraphs to index based on curation signals they see in Graph Explorer (screenshot below). -![Explorer subgraphs](/img/explorer-subgraphs.png) +![Explorer Subgraphs](/img/explorer-subgraphs.png) ## كيفية الإشارة -Within the Curator tab in Graph Explorer, curators will be able to signal and unsignal on certain subgraphs based on network stats. For a step-by-step overview of how to do this in Graph Explorer, [click here.](/subgraphs/explorer/) +Within the Curator tab in Graph Explorer, curators will be able to signal and unsignal on certain Subgraphs based on network stats. For a step-by-step overview of how to do this in Graph Explorer, [click here.](/subgraphs/explorer/) -يمكن للمنسق الإشارة إلى إصدار معين ل subgraph ، أو يمكنه اختيار أن يتم ترحيل migrate إشاراتهم تلقائيا إلى أحدث إصدار لهذا ال subgraph. كلاهما استراتيجيات سليمة ولها إيجابيات وسلبيات. +A curator can choose to signal on a specific Subgraph version, or they can choose to have their signal automatically migrate to the newest production build of that Subgraph. Both are valid strategies and come with their own pros and cons. -Signaling on a specific version is especially useful when one subgraph is used by multiple dapps. One dapp might need to regularly update the subgraph with new features. Another dapp might prefer to use an older, well-tested subgraph version. Upon initial curation, a 1% standard tax is incurred. +Signaling on a specific version is especially useful when one Subgraph is used by multiple dapps. One dapp might need to regularly update the Subgraph with new features. Another dapp might prefer to use an older, well-tested Subgraph version. Upon initial curation, a 1% standard tax is incurred. Having your signal automatically migrate to the newest production build can be valuable to ensure you keep accruing query fees. Every time you curate, a 1% curation tax is incurred. You will also pay a 0.5% curation tax on every migration. Subgraph developers are discouraged from frequently publishing new versions - they have to pay a 0.5% curation tax on all auto-migrated curation shares. -> **Note**: The first address to signal a particular subgraph is considered the first curator and will have to do much more gas-intensive work than the rest of the following curators because the first curator initializes the curation share tokens, and also transfers tokens into The Graph proxy. +> **Note**: The first address to signal a particular Subgraph is considered the first curator and will have to do much more gas-intensive work than the rest of the following curators because the first curator initializes the curation share tokens, and also transfers tokens into The Graph proxy. ## Withdrawing your GRT @@ -40,39 +40,39 @@ Curators have the option to withdraw their signaled GRT at any time. Unlike the process of delegating, if you decide to withdraw your signaled GRT you will not have to wait for a cooldown period and will receive the entire amount (minus the 1% curation tax). -Once a curator withdraws their signal, indexers may choose to keep indexing the subgraph, even if there's currently no active GRT signaled. +Once a curator withdraws their signal, indexers may choose to keep indexing the Subgraph, even if there's currently no active GRT signaled. -However, it is recommended that curators leave their signaled GRT in place not only to receive a portion of the query fees, but also to ensure reliability and uptime of the subgraph. +However, it is recommended that curators leave their signaled GRT in place not only to receive a portion of the query fees, but also to ensure reliability and uptime of the Subgraph. ## المخاطر 1. سوق الاستعلام يعتبر حديثا في The Graph وهناك خطر من أن يكون٪ APY الخاص بك أقل مما تتوقع بسبب ديناميكيات السوق الناشئة. -2. Curation Fee - when a curator signals GRT on a subgraph, they incur a 1% curation tax. This fee is burned. -3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). -4. يمكن أن يفشل ال subgraph بسبب خطأ. ال subgraph الفاشل لا يمكنه إنشاء رسوم استعلام. نتيجة لذلك ، سيتعين عليك الانتظار حتى يصلح المطور الخطأ وينشر إصدارا جديدا. - - إذا كنت مشتركا في أحدث إصدار من subgraph ، فسيتم ترحيل migrate أسهمك تلقائيا إلى هذا الإصدار الجديد. هذا سيتحمل ضريبة تنسيق بنسبة 0.5٪. - - If you have signaled on a specific subgraph version and it fails, you will have to manually burn your curation shares. You can then signal on the new subgraph version, thus incurring a 1% curation tax. +2. Curation Fee - when a curator signals GRT on a Subgraph, they incur a 1% curation tax. This fee is burned. +3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their Subgraph or if a Subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). +4. A Subgraph can fail due to a bug. A failed Subgraph does not accrue query fees. As a result, you’ll have to wait until the developer fixes the bug and deploys a new version. + - If you are subscribed to the newest version of a Subgraph, your shares will auto-migrate to that new version. This will incur a 0.5% curation tax. + - If you have signaled on a specific Subgraph version and it fails, you will have to manually burn your curation shares. You can then signal on the new Subgraph version, thus incurring a 1% curation tax. ## الأسئلة الشائعة حول التنسيق ### 1. ما هي النسبة المئوية لرسوم الاستعلام التي يكسبها المنسقون؟ -By signalling on a subgraph, you will earn a share of all the query fees that the subgraph generates. 10% of all query fees go to the Curators pro-rata to their curation shares. This 10% is subject to governance. +By signalling on a Subgraph, you will earn a share of all the query fees that the Subgraph generates. 10% of all query fees go to the Curators pro-rata to their curation shares. This 10% is subject to governance. -### 2. كيف يمكنني تقرير ما إذا كان ال subgraph عالي الجودة لكي أقوم بالإشارة إليه؟ +### 2. How do I decide which Subgraphs are high quality to signal on? -Finding high-quality subgraphs is a complex task, but it can be approached in many different ways. As a Curator, you want to look for trustworthy subgraphs that are driving query volume. A trustworthy subgraph may be valuable if it is complete, accurate, and supports a dapp’s data needs. A poorly architected subgraph might need to be revised or re-published, and can also end up failing. It is critical for Curators to review a subgraph’s architecture or code in order to assess if a subgraph is valuable. As a result: +Finding high-quality Subgraphs is a complex task, but it can be approached in many different ways. As a Curator, you want to look for trustworthy Subgraphs that are driving query volume. A trustworthy Subgraph may be valuable if it is complete, accurate, and supports a dapp’s data needs. A poorly architected Subgraph might need to be revised or re-published, and can also end up failing. It is critical for Curators to review a Subgraph’s architecture or code in order to assess if a Subgraph is valuable. As a result: -- Curators can use their understanding of a network to try and predict how an individual subgraph may generate a higher or lower query volume in the future -- Curators should also understand the metrics that are available through Graph Explorer. Metrics like past query volume and who the subgraph developer is can help determine whether or not a subgraph is worth signalling on. +- Curators can use their understanding of a network to try and predict how an individual Subgraph may generate a higher or lower query volume in the future +- Curators should also understand the metrics that are available through Graph Explorer. Metrics like past query volume and who the Subgraph developer is can help determine whether or not a Subgraph is worth signalling on. -### 3. What’s the cost of updating a subgraph? +### 3. What’s the cost of updating a Subgraph? -Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading subgraphs is an onchain action that costs gas. +Migrating your curation shares to a new Subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a Subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading Subgraphs is an onchain action that costs gas. -### 4. How often can I update my subgraph? +### 4. How often can I update my Subgraph? -It’s suggested that you don’t update your subgraphs too frequently. See the question above for more details. +It’s suggested that you don’t update your Subgraphs too frequently. See the question above for more details. ### 5. هل يمكنني بيع أسهم التنسيق الخاصة بي؟ From cb57bac44e280b8b4c5e5aaf6c5b087995ee360e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:17:11 -0500 Subject: [PATCH 0498/1789] New translations curating.mdx (Czech) --- .../src/pages/cs/resources/roles/curating.mdx | 58 +++++++++---------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/website/src/pages/cs/resources/roles/curating.mdx b/website/src/pages/cs/resources/roles/curating.mdx index c8b9caf18e2e..f06866a7c0ee 100644 --- a/website/src/pages/cs/resources/roles/curating.mdx +++ b/website/src/pages/cs/resources/roles/curating.mdx @@ -2,37 +2,37 @@ title: Kurátorování --- -Curators are critical to The Graph's decentralized economy. They use their knowledge of the web3 ecosystem to assess and signal on the subgraphs that should be indexed by The Graph Network. Through Graph Explorer, Curators view network data to make signaling decisions. In turn, The Graph Network rewards Curators who signal on good quality subgraphs with a share of the query fees those subgraphs generate. The amount of GRT signaled is one of the key considerations for indexers when determining which subgraphs to index. +Curators are critical to The Graph's decentralized economy. They use their knowledge of the web3 ecosystem to assess and signal on the Subgraphs that should be indexed by The Graph Network. Through Graph Explorer, Curators view network data to make signaling decisions. In turn, The Graph Network rewards Curators who signal on good quality Subgraphs with a share of the query fees those Subgraphs generate. The amount of GRT signaled is one of the key considerations for indexers when determining which Subgraphs to index. ## What Does Signaling Mean for The Graph Network? -Before consumers can query a subgraph, it must be indexed. This is where curation comes into play. In order for Indexers to earn substantial query fees on quality subgraphs, they need to know what subgraphs to index. When Curators signal on a subgraph, it lets Indexers know that a subgraph is in demand and of sufficient quality that it should be indexed. +Before consumers can query a Subgraph, it must be indexed. This is where curation comes into play. In order for Indexers to earn substantial query fees on quality Subgraphs, they need to know what Subgraphs to index. When Curators signal on a Subgraph, it lets Indexers know that a Subgraph is in demand and of sufficient quality that it should be indexed. -Curators make The Graph network efficient and [signaling](#how-to-signal) is the process that Curators use to let Indexers know that a subgraph is good to index. Indexers can trust the signal from a Curator because upon signaling, Curators mint a curation share for the subgraph, entitling them to a portion of future query fees that the subgraph drives. +Curators make The Graph network efficient and [signaling](#how-to-signal) is the process that Curators use to let Indexers know that a Subgraph is good to index. Indexers can trust the signal from a Curator because upon signaling, Curators mint a curation share for the Subgraph, entitling them to a portion of future query fees that the Subgraph drives. -Curator signals are represented as ERC20 tokens called Graph Curation Shares (GCS). Those that want to earn more query fees should signal their GRT to subgraphs that they predict will generate a strong flow of fees to the network. Curators cannot be slashed for bad behavior, but there is a deposit tax on Curators to disincentivize poor decision-making that could harm the integrity of the network. Curators will also earn fewer query fees if they curate on a low-quality subgraph because there will be fewer queries to process or fewer Indexers to process them. +Curator signals are represented as ERC20 tokens called Graph Curation Shares (GCS). Those that want to earn more query fees should signal their GRT to Subgraphs that they predict will generate a strong flow of fees to the network. Curators cannot be slashed for bad behavior, but there is a deposit tax on Curators to disincentivize poor decision-making that could harm the integrity of the network. Curators will also earn fewer query fees if they curate on a low-quality Subgraph because there will be fewer queries to process or fewer Indexers to process them. -The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all subgraphs, signaling GRT on a particular subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. +The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all Subgraphs, signaling GRT on a particular Subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. -When signaling, Curators can decide to signal on a specific version of the subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. +When signaling, Curators can decide to signal on a specific version of the Subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. -If you require assistance with curation to enhance the quality of service, please send a request to the Edge & Node team at support@thegraph.zendesk.com and specify the subgraphs you need assistance with. +If you require assistance with curation to enhance the quality of service, please send a request to the Edge & Node team at support@thegraph.zendesk.com and specify the Subgraphs you need assistance with. -Indexers can find subgraphs to index based on curation signals they see in Graph Explorer (screenshot below). +Indexers can find Subgraphs to index based on curation signals they see in Graph Explorer (screenshot below). -![Explorer subgraphs](/img/explorer-subgraphs.png) +![Explorer Subgraphs](/img/explorer-subgraphs.png) ## Jak signalizovat -Within the Curator tab in Graph Explorer, curators will be able to signal and unsignal on certain subgraphs based on network stats. For a step-by-step overview of how to do this in Graph Explorer, [click here.](/subgraphs/explorer/) +Within the Curator tab in Graph Explorer, curators will be able to signal and unsignal on certain Subgraphs based on network stats. For a step-by-step overview of how to do this in Graph Explorer, [click here.](/subgraphs/explorer/) -Kurátor si může zvolit, zda bude signalizovat na konkrétní verzi podgrafu, nebo zda se jeho signál automaticky přenese na nejnovější produkční sestavení daného podgrafu. Obě strategie jsou platné a mají své výhody i nevýhody. +A curator can choose to signal on a specific Subgraph version, or they can choose to have their signal automatically migrate to the newest production build of that Subgraph. Both are valid strategies and come with their own pros and cons. -Signaling on a specific version is especially useful when one subgraph is used by multiple dapps. One dapp might need to regularly update the subgraph with new features. Another dapp might prefer to use an older, well-tested subgraph version. Upon initial curation, a 1% standard tax is incurred. +Signaling on a specific version is especially useful when one Subgraph is used by multiple dapps. One dapp might need to regularly update the Subgraph with new features. Another dapp might prefer to use an older, well-tested Subgraph version. Upon initial curation, a 1% standard tax is incurred. Automatická migrace signálu na nejnovější produkční sestavení může být cenná, protože zajistí, že se poplatky za dotazy budou neustále zvyšovat. Při každém kurátorství se platí 1% kurátorský poplatek. Při každé migraci také zaplatíte 0,5% kurátorskou daň. Vývojáři podgrafu jsou odrazováni od častého publikování nových verzí - musí zaplatit 0.5% kurátorskou daň ze všech automaticky migrovaných kurátorských podílů. -> **Note**: The first address to signal a particular subgraph is considered the first curator and will have to do much more gas-intensive work than the rest of the following curators because the first curator initializes the curation share tokens, and also transfers tokens into The Graph proxy. +> **Note**: The first address to signal a particular Subgraph is considered the first curator and will have to do much more gas-intensive work than the rest of the following curators because the first curator initializes the curation share tokens, and also transfers tokens into The Graph proxy. ## Withdrawing your GRT @@ -40,39 +40,39 @@ Curators have the option to withdraw their signaled GRT at any time. Unlike the process of delegating, if you decide to withdraw your signaled GRT you will not have to wait for a cooldown period and will receive the entire amount (minus the 1% curation tax). -Once a curator withdraws their signal, indexers may choose to keep indexing the subgraph, even if there's currently no active GRT signaled. +Once a curator withdraws their signal, indexers may choose to keep indexing the Subgraph, even if there's currently no active GRT signaled. -However, it is recommended that curators leave their signaled GRT in place not only to receive a portion of the query fees, but also to ensure reliability and uptime of the subgraph. +However, it is recommended that curators leave their signaled GRT in place not only to receive a portion of the query fees, but also to ensure reliability and uptime of the Subgraph. ## Rizika 1. Trh s dotazy je v Graf ze své podstaty mladý a existuje riziko, že vaše %APY může být nižší, než očekáváte, v důsledku dynamiky rodícího se trhu. -2. Curation Fee - when a curator signals GRT on a subgraph, they incur a 1% curation tax. This fee is burned. -3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). -4. Podgraf může selhat kvůli chybě. Za neúspěšný podgraf se neúčtují poplatky za dotaz. V důsledku toho budete muset počkat, až vývojář chybu opraví a nasadí novou verzi. - - Pokud jste přihlášeni k odběru nejnovější verze podgrafu, vaše sdílené položky se automaticky přemigrují na tuto novou verzi. Při tom bude účtována 0,5% kurátorská daň. - - If you have signaled on a specific subgraph version and it fails, you will have to manually burn your curation shares. You can then signal on the new subgraph version, thus incurring a 1% curation tax. +2. Curation Fee - when a curator signals GRT on a Subgraph, they incur a 1% curation tax. This fee is burned. +3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their Subgraph or if a Subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). +4. A Subgraph can fail due to a bug. A failed Subgraph does not accrue query fees. As a result, you’ll have to wait until the developer fixes the bug and deploys a new version. + - If you are subscribed to the newest version of a Subgraph, your shares will auto-migrate to that new version. This will incur a 0.5% curation tax. + - If you have signaled on a specific Subgraph version and it fails, you will have to manually burn your curation shares. You can then signal on the new Subgraph version, thus incurring a 1% curation tax. ## Nejčastější dotazy ke kurátorství ### 1. Kolik % z poplatků za dotazy kurátoři vydělávají? -By signalling on a subgraph, you will earn a share of all the query fees that the subgraph generates. 10% of all query fees go to the Curators pro-rata to their curation shares. This 10% is subject to governance. +By signalling on a Subgraph, you will earn a share of all the query fees that the Subgraph generates. 10% of all query fees go to the Curators pro-rata to their curation shares. This 10% is subject to governance. -### 2. Jak se rozhodnu, které podgrafy jsou kvalitní a na kterých je třeba signalizovat? +### 2. How do I decide which Subgraphs are high quality to signal on? -Finding high-quality subgraphs is a complex task, but it can be approached in many different ways. As a Curator, you want to look for trustworthy subgraphs that are driving query volume. A trustworthy subgraph may be valuable if it is complete, accurate, and supports a dapp’s data needs. A poorly architected subgraph might need to be revised or re-published, and can also end up failing. It is critical for Curators to review a subgraph’s architecture or code in order to assess if a subgraph is valuable. As a result: +Finding high-quality Subgraphs is a complex task, but it can be approached in many different ways. As a Curator, you want to look for trustworthy Subgraphs that are driving query volume. A trustworthy Subgraph may be valuable if it is complete, accurate, and supports a dapp’s data needs. A poorly architected Subgraph might need to be revised or re-published, and can also end up failing. It is critical for Curators to review a Subgraph’s architecture or code in order to assess if a Subgraph is valuable. As a result: -- Curators can use their understanding of a network to try and predict how an individual subgraph may generate a higher or lower query volume in the future -- Curators should also understand the metrics that are available through Graph Explorer. Metrics like past query volume and who the subgraph developer is can help determine whether or not a subgraph is worth signalling on. +- Curators can use their understanding of a network to try and predict how an individual Subgraph may generate a higher or lower query volume in the future +- Curators should also understand the metrics that are available through Graph Explorer. Metrics like past query volume and who the Subgraph developer is can help determine whether or not a Subgraph is worth signalling on. -### 3. Jaké jsou náklady na aktualizaci podgrafu? +### 3. What’s the cost of updating a Subgraph? -Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading subgraphs is an onchain action that costs gas. +Migrating your curation shares to a new Subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a Subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading Subgraphs is an onchain action that costs gas. -### 4. Jak často mohu svůj podgraf aktualizovat? +### 4. How often can I update my Subgraph? -Doporučujeme, abyste podgrafy neaktualizovali příliš často. Další podrobnosti naleznete v otázce výše. +It’s suggested that you don’t update your Subgraphs too frequently. See the question above for more details. ### 5. Mohu prodat své kurátorské podíly? From 624b76e3b41acd66b7778c73486c38b600cc98ae Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:17:12 -0500 Subject: [PATCH 0499/1789] New translations curating.mdx (German) --- .../src/pages/de/resources/roles/curating.mdx | 58 +++++++++---------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/website/src/pages/de/resources/roles/curating.mdx b/website/src/pages/de/resources/roles/curating.mdx index 7d145d84ab5e..e7ee43e466fe 100644 --- a/website/src/pages/de/resources/roles/curating.mdx +++ b/website/src/pages/de/resources/roles/curating.mdx @@ -2,37 +2,37 @@ title: Kuratieren --- -Kuratoren sind entscheidend für die dezentrale Wirtschaft von The Graph. Sie nutzen ihr Wissen über das web3-Ökosystem, um die Subgraphen zu bewerten und zu signalisieren, die von The Graph Network indiziert werden sollten. Über den Graph Explorer sehen die Kuratoren die Netzwerkdaten, um Signalisierungsentscheidungen zu treffen. Im Gegenzug belohnt The Graph Network Kuratoren, die auf qualitativ hochwertige Subgraphen hinweisen, mit einem Anteil an den Abfragegebühren, die diese Subgraphen generieren. Die Höhe der signalisierten GRT ist eine der wichtigsten Überlegungen für Indexer bei der Entscheidung, welche Subgraphen indiziert werden sollen. +Curators are critical to The Graph's decentralized economy. They use their knowledge of the web3 ecosystem to assess and signal on the Subgraphs that should be indexed by The Graph Network. Through Graph Explorer, Curators view network data to make signaling decisions. In turn, The Graph Network rewards Curators who signal on good quality Subgraphs with a share of the query fees those Subgraphs generate. The amount of GRT signaled is one of the key considerations for indexers when determining which Subgraphs to index. ## Was bedeutet Signalisierung für The Graph Network? -Bevor Verbraucher einen Subgraphen abfragen können, muss er indiziert werden. An dieser Stelle kommt die Kuratierung ins Spiel. Damit Indexer erhebliche Abfragegebühren für hochwertige Subgraphen verdienen können, müssen sie wissen, welche Subgraphen indiziert werden sollen. Wenn Kuratoren ein Signal für einen Subgraphen geben, wissen Indexer, dass ein Subgraph gefragt und von ausreichender Qualität ist, so dass er indiziert werden sollte. +Before consumers can query a Subgraph, it must be indexed. This is where curation comes into play. In order for Indexers to earn substantial query fees on quality Subgraphs, they need to know what Subgraphs to index. When Curators signal on a Subgraph, it lets Indexers know that a Subgraph is in demand and of sufficient quality that it should be indexed. -Kuratoren machen das The Graph Netzwerk effizient und [signaling](#how-to-signal) ist der Prozess, den Kuratoren verwenden, um Indexer wissen zu lassen, dass ein Subgraph gut zu indizieren ist. Indexer können dem Signal eines Kurators vertrauen, da Kuratoren nach dem Signalisieren einen Kurationsanteil für den Subgraphen prägen, der sie zu einem Teil der zukünftigen Abfragegebühren berechtigt, die der Subgraph verursacht. +Curators make The Graph network efficient and [signaling](#how-to-signal) is the process that Curators use to let Indexers know that a Subgraph is good to index. Indexers can trust the signal from a Curator because upon signaling, Curators mint a curation share for the Subgraph, entitling them to a portion of future query fees that the Subgraph drives. -Die Signale der Kuratoren werden als ERC20-Token dargestellt, die Graph Curation Shares (GCS) genannt werden. Diejenigen, die mehr Abfragegebühren verdienen wollen, sollten ihre GRT an Subgraphen signalisieren, von denen sie vorhersagen, dass sie einen starken Gebührenfluss an das Netzwerk generieren werden. Kuratoren können nicht für schlechtes Verhalten bestraft werden, aber es gibt eine Einlagensteuer für Kuratoren, um von schlechten Entscheidungen abzuschrecken, die der Integrität des Netzwerks schaden könnten. Kuratoren werden auch weniger Abfragegebühren verdienen, wenn sie einen Subgraphen von geringer Qualität kuratieren, weil es weniger Abfragen zu bearbeiten gibt oder weniger Indexer, die sie bearbeiten. +Curator signals are represented as ERC20 tokens called Graph Curation Shares (GCS). Those that want to earn more query fees should signal their GRT to Subgraphs that they predict will generate a strong flow of fees to the network. Curators cannot be slashed for bad behavior, but there is a deposit tax on Curators to disincentivize poor decision-making that could harm the integrity of the network. Curators will also earn fewer query fees if they curate on a low-quality Subgraph because there will be fewer queries to process or fewer Indexers to process them. -Der [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) stellt die Indizierung aller Subgraphen sicher und signalisiert, dass GRT auf einem bestimmten Subgraphen mehr Indexer anzieht. Dieser Anreiz für zusätzliche Indexer durch Kuration zielt darauf ab, die Servicequalität für Abfragen zu verbessern, indem die Latenzzeit verringert und die Netzwerkverfügbarkeit erhöht wird. +The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all Subgraphs, signaling GRT on a particular Subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. -Bei der Signalisierung können Kuratoren entscheiden, ob sie für eine bestimmte Version des Subgraphen signalisieren wollen oder ob sie die automatische Migration verwenden wollen. Bei der automatischen Migration werden die Freigaben eines Kurators immer auf die neueste vom Entwickler veröffentlichte Version aktualisiert. Wenn sie sich stattdessen für eine bestimmte Version entscheiden, bleiben die Freigaben immer auf dieser spezifischen Version. +When signaling, Curators can decide to signal on a specific version of the Subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. -Wenn Sie Unterstützung bei der Kuratierung benötigen, um die Qualität des Dienstes zu verbessern, senden Sie bitte eine Anfrage an das Edge & Node-Team unter support@thegraph.zendesk.com und geben Sie die Subgraphen an, für die Sie Unterstützung benötigen. +If you require assistance with curation to enhance the quality of service, please send a request to the Edge & Node team at support@thegraph.zendesk.com and specify the Subgraphs you need assistance with. -Indexer können Subgraphen für die Indizierung auf der Grundlage von Kurationssignalen finden, die sie im Graph Explorer sehen (siehe Screenshot unten). +Indexers can find Subgraphs to index based on curation signals they see in Graph Explorer (screenshot below). -![Explorer-Subgrafen](/img/explorer-subgraphs.png) +![Explorer Subgraphs](/img/explorer-subgraphs.png) ## Wie man signalisiert -Auf der Registerkarte Kurator im Graph Explorer können Kuratoren bestimmte Subgraphen auf der Grundlage von Netzwerkstatistiken an- und abmelden. Einen schrittweisen Überblick über die Vorgehensweise im Graph Explorer finden Sie [hier](/subgraphs/explorer/) +Within the Curator tab in Graph Explorer, curators will be able to signal and unsignal on certain Subgraphs based on network stats. For a step-by-step overview of how to do this in Graph Explorer, [click here.](/subgraphs/explorer/) -Ein Kurator kann sich dafür entscheiden, ein Signal für eine bestimmte Subgraph-Version abzugeben, oder er kann sein Signal automatisch auf die neueste Produktionsversion dieses Subgraphen migrieren lassen. Beides sind gültige Strategien und haben ihre eigenen Vor- und Nachteile. +A curator can choose to signal on a specific Subgraph version, or they can choose to have their signal automatically migrate to the newest production build of that Subgraph. Both are valid strategies and come with their own pros and cons. -Die Signalisierung einer bestimmten Version ist besonders nützlich, wenn ein Subgraph von mehreren Dapps verwendet wird. Eine Dapp muss den Subgraph vielleicht regelmäßig mit neuen Funktionen aktualisieren. Eine andere Dapp zieht es vielleicht vor, eine ältere, gut getestete Version des Subgraphs zu verwenden. Bei der ersten Kuration fällt eine Standardsteuer von 1% an. +Signaling on a specific version is especially useful when one Subgraph is used by multiple dapps. One dapp might need to regularly update the Subgraph with new features. Another dapp might prefer to use an older, well-tested Subgraph version. Upon initial curation, a 1% standard tax is incurred. Die automatische Migration Ihres Signals zum neuesten Produktions-Build kann sich als nützlich erweisen, um sicherzustellen, dass Sie weiterhin Abfragegebühren anfallen. Jedes Mal, wenn Sie kuratieren, fällt eine Kuratierungssteuer von 1 % an. Außerdem zahlen Sie bei jeder Migration eine Kuratierungssteuer von 0,5 %. Subgraph-Entwickler werden davon abgehalten, häufig neue Versionen zu veröffentlichen - sie müssen eine Kurationssteuer von 0,5 % auf alle automatisch migrierten Kurationsanteile zahlen. -> **Anmerkung**: Die erste Adresse, die einen bestimmten Subgraph signalisiert, wird als erster Kurator betrachtet und muss viel mehr Arbeit leisten als die übrigen folgenden Kuratoren, da der erste Kurator die Kurationsaktien-Token initialisiert und außerdem Token in den Graph-Proxy überträgt. +> **Note**: The first address to signal a particular Subgraph is considered the first curator and will have to do much more gas-intensive work than the rest of the following curators because the first curator initializes the curation share tokens, and also transfers tokens into The Graph proxy. ## Abhebung Ihrer GRT @@ -40,39 +40,39 @@ Die Kuratoren haben jederzeit die Möglichkeit, ihre signalisierten GRT zurückz Anders als beim Delegieren müssen Sie, wenn Sie sich entscheiden, Ihr signalisiertes GRT abzuheben, keine Abkühlungsphase abwarten und erhalten den gesamten Betrag (abzüglich der 1 % Kurationssteuer). -Sobald ein Kurator sein Signal zurückzieht, können die Indexer den Subgraphen weiter indizieren, auch wenn derzeit kein aktives GRT signalisiert wird. +Once a curator withdraws their signal, indexers may choose to keep indexing the Subgraph, even if there's currently no active GRT signaled. -Es wird jedoch empfohlen, dass Kuratoren ihr signalisiertes GRT bestehen lassen, nicht nur um einen Teil der Abfragegebühren zu erhalten, sondern auch um die Zuverlässigkeit und Betriebszeit des Subgraphen zu gewährleisten. +However, it is recommended that curators leave their signaled GRT in place not only to receive a portion of the query fees, but also to ensure reliability and uptime of the Subgraph. ## Risiken 1. Der Abfragemarkt ist bei The Graph noch sehr jung, und es besteht das Risiko, dass Ihr %APY aufgrund der noch jungen Marktdynamik niedriger ist als Sie erwarten. -2. Kurationsgebühr - wenn ein Kurator GRT auf einem Subgraphen meldet, fällt eine Kurationsgebühr von 1% an. Diese Gebühr wird verbrannt. -3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). -4. Ein Subgraph kann aufgrund eines Fehlers fehlschlagen. Für einen fehlgeschlagenen Subgraph fallen keine Abfragegebühren an. Daher müssen Sie warten, bis der Entwickler den Fehler behebt und eine neue Version bereitstellt. - - Wenn Sie die neueste Version eines Subgraphen abonniert haben, werden Ihre Anteile automatisch zu dieser neuen Version migriert. Dabei fällt eine Kurationsgebühr von 0,5 % an. - - Wenn Sie für eine bestimmte Version eines Subgraphen ein Signal gegeben haben und dieses fehlschlägt, müssen Sie Ihre Kurationsanteile manuell verbrennen. Sie können dann ein Signal für die neue Subgraph-Version geben, wodurch eine Kurationssteuer von 1 % anfällt. +2. Curation Fee - when a curator signals GRT on a Subgraph, they incur a 1% curation tax. This fee is burned. +3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their Subgraph or if a Subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). +4. A Subgraph can fail due to a bug. A failed Subgraph does not accrue query fees. As a result, you’ll have to wait until the developer fixes the bug and deploys a new version. + - If you are subscribed to the newest version of a Subgraph, your shares will auto-migrate to that new version. This will incur a 0.5% curation tax. + - If you have signaled on a specific Subgraph version and it fails, you will have to manually burn your curation shares. You can then signal on the new Subgraph version, thus incurring a 1% curation tax. ## FAQs zur Kuration ### 1. Wie viel Prozent der Abfragegebühren verdienen die Kuratoren? -Durch das Signalisieren auf einem Subgraphen erhalten Sie einen Anteil an allen Abfragegebühren, die der Subgraph generiert. 10 % aller Abfragegebühren gehen an die Kuratoren im Verhältnis zu ihren Kurationsanteilen. Diese 10 % unterliegen der Governance. +By signalling on a Subgraph, you will earn a share of all the query fees that the Subgraph generates. 10% of all query fees go to the Curators pro-rata to their curation shares. This 10% is subject to governance. -### 2. Wie entscheide ich, welche Subgraphen qualitativ hochwertig sind, um sie zu signalisieren? +### 2. How do I decide which Subgraphs are high quality to signal on? -Die Suche nach qualitativ hochwertigen Subgraphen ist eine komplexe Aufgabe, die auf viele verschiedene Arten angegangen werden kann. Als Kurator möchten Sie nach vertrauenswürdigen Subgraphen suchen, die das Abfragevolumen erhöhen. Ein vertrauenswürdiger Subgraph kann wertvoll sein, wenn er vollständig und genau ist und die Datenanforderungen einer App unterstützt. Ein schlecht entworfener Subgraph muss möglicherweise überarbeitet oder neu veröffentlicht werden und kann auch scheitern. Es ist wichtig, dass die Kuratoren die Architektur oder den Code eines Subgraphen überprüfen, um zu beurteilen, ob ein Subgraph wertvoll ist. Daraus folgt: +Finding high-quality Subgraphs is a complex task, but it can be approached in many different ways. As a Curator, you want to look for trustworthy Subgraphs that are driving query volume. A trustworthy Subgraph may be valuable if it is complete, accurate, and supports a dapp’s data needs. A poorly architected Subgraph might need to be revised or re-published, and can also end up failing. It is critical for Curators to review a Subgraph’s architecture or code in order to assess if a Subgraph is valuable. As a result: -- Kuratoren können ihr Verständnis eines Netzwerks nutzen, um vorherzusagen, wie ein einzelner Subgraph in der Zukunft ein höheres oder niedrigeres Suchvolumen generieren könnte -- Kuratoren sollten auch die Metriken verstehen, die über den Graph Explorer verfügbar sind. Metriken wie das vergangene Abfragevolumen und die Person des Subgraph-Entwicklers können dabei helfen, festzustellen, ob ein Subgraph eine Meldung wert ist oder nicht. +- Curators can use their understanding of a network to try and predict how an individual Subgraph may generate a higher or lower query volume in the future +- Curators should also understand the metrics that are available through Graph Explorer. Metrics like past query volume and who the Subgraph developer is can help determine whether or not a Subgraph is worth signalling on. -### 3. Wie hoch sind die Kosten für die Aktualisierung eines Subgraphen? +### 3. What’s the cost of updating a Subgraph? -Wenn Sie Ihre Kuratorenanteile auf eine neue Subgraph-Version migrieren, fällt eine Kuratorensteuer von 1 % an. Kuratoren können sich dafür entscheiden, die neueste Version eines Subgraphen zu abonnieren. Wenn Kuratorenanteile automatisch auf eine neue Version migriert werden, zahlen Kuratoren ebenfalls die Hälfte der Kurationssteuer, d. h. 0,5 %, da die Aktualisierung von Subgraphen eine Onchain-Aktion ist, die Gas kostet. +Migrating your curation shares to a new Subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a Subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading Subgraphs is an onchain action that costs gas. -### 4. Wie oft kann ich meinen Subgraphen aktualisieren? +### 4. How often can I update my Subgraph? -Es wird empfohlen, dass Sie Ihre Subgraphen nicht zu häufig aktualisieren. Sehen Sie die obige Frage für weitere Details. +It’s suggested that you don’t update your Subgraphs too frequently. See the question above for more details. ### 5. Kann ich meine Kurationsanteile verkaufen? From 978186c897b0bdf55f2b4cd9f8b162e4d0ec9b42 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:17:15 -0500 Subject: [PATCH 0500/1789] New translations curating.mdx (Italian) --- .../src/pages/it/resources/roles/curating.mdx | 58 +++++++++---------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/website/src/pages/it/resources/roles/curating.mdx b/website/src/pages/it/resources/roles/curating.mdx index 330a80715730..a449b5b9fcc0 100644 --- a/website/src/pages/it/resources/roles/curating.mdx +++ b/website/src/pages/it/resources/roles/curating.mdx @@ -2,37 +2,37 @@ title: Curating --- -Curators are critical to The Graph's decentralized economy. They use their knowledge of the web3 ecosystem to assess and signal on the subgraphs that should be indexed by The Graph Network. Through Graph Explorer, Curators view network data to make signaling decisions. In turn, The Graph Network rewards Curators who signal on good quality subgraphs with a share of the query fees those subgraphs generate. The amount of GRT signaled is one of the key considerations for indexers when determining which subgraphs to index. +Curators are critical to The Graph's decentralized economy. They use their knowledge of the web3 ecosystem to assess and signal on the Subgraphs that should be indexed by The Graph Network. Through Graph Explorer, Curators view network data to make signaling decisions. In turn, The Graph Network rewards Curators who signal on good quality Subgraphs with a share of the query fees those Subgraphs generate. The amount of GRT signaled is one of the key considerations for indexers when determining which Subgraphs to index. ## What Does Signaling Mean for The Graph Network? -Before consumers can query a subgraph, it must be indexed. This is where curation comes into play. In order for Indexers to earn substantial query fees on quality subgraphs, they need to know what subgraphs to index. When Curators signal on a subgraph, it lets Indexers know that a subgraph is in demand and of sufficient quality that it should be indexed. +Before consumers can query a Subgraph, it must be indexed. This is where curation comes into play. In order for Indexers to earn substantial query fees on quality Subgraphs, they need to know what Subgraphs to index. When Curators signal on a Subgraph, it lets Indexers know that a Subgraph is in demand and of sufficient quality that it should be indexed. -Curators make The Graph network efficient and [signaling](#how-to-signal) is the process that Curators use to let Indexers know that a subgraph is good to index. Indexers can trust the signal from a Curator because upon signaling, Curators mint a curation share for the subgraph, entitling them to a portion of future query fees that the subgraph drives. +Curators make The Graph network efficient and [signaling](#how-to-signal) is the process that Curators use to let Indexers know that a Subgraph is good to index. Indexers can trust the signal from a Curator because upon signaling, Curators mint a curation share for the Subgraph, entitling them to a portion of future query fees that the Subgraph drives. -Curator signals are represented as ERC20 tokens called Graph Curation Shares (GCS). Those that want to earn more query fees should signal their GRT to subgraphs that they predict will generate a strong flow of fees to the network. Curators cannot be slashed for bad behavior, but there is a deposit tax on Curators to disincentivize poor decision-making that could harm the integrity of the network. Curators will also earn fewer query fees if they curate on a low-quality subgraph because there will be fewer queries to process or fewer Indexers to process them. +Curator signals are represented as ERC20 tokens called Graph Curation Shares (GCS). Those that want to earn more query fees should signal their GRT to Subgraphs that they predict will generate a strong flow of fees to the network. Curators cannot be slashed for bad behavior, but there is a deposit tax on Curators to disincentivize poor decision-making that could harm the integrity of the network. Curators will also earn fewer query fees if they curate on a low-quality Subgraph because there will be fewer queries to process or fewer Indexers to process them. -The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all subgraphs, signaling GRT on a particular subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. +The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all Subgraphs, signaling GRT on a particular Subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. -When signaling, Curators can decide to signal on a specific version of the subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. +When signaling, Curators can decide to signal on a specific version of the Subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. -If you require assistance with curation to enhance the quality of service, please send a request to the Edge & Node team at support@thegraph.zendesk.com and specify the subgraphs you need assistance with. +If you require assistance with curation to enhance the quality of service, please send a request to the Edge & Node team at support@thegraph.zendesk.com and specify the Subgraphs you need assistance with. -Indexers can find subgraphs to index based on curation signals they see in Graph Explorer (screenshot below). +Indexers can find Subgraphs to index based on curation signals they see in Graph Explorer (screenshot below). -![Explorer subgraphs](/img/explorer-subgraphs.png) +![Explorer Subgraphs](/img/explorer-subgraphs.png) ## Come segnalare -Within the Curator tab in Graph Explorer, curators will be able to signal and unsignal on certain subgraphs based on network stats. For a step-by-step overview of how to do this in Graph Explorer, [click here.](/subgraphs/explorer/) +Within the Curator tab in Graph Explorer, curators will be able to signal and unsignal on certain Subgraphs based on network stats. For a step-by-step overview of how to do this in Graph Explorer, [click here.](/subgraphs/explorer/) -Un curator può scegliere di segnalare su una versione specifica del subgraph, oppure può scegliere di far migrare automaticamente il proprio segnale alla versione di produzione più recente di quel subgraph. Entrambe le strategie sono valide e hanno i loro pro e contro. +A curator can choose to signal on a specific Subgraph version, or they can choose to have their signal automatically migrate to the newest production build of that Subgraph. Both are valid strategies and come with their own pros and cons. -Signaling on a specific version is especially useful when one subgraph is used by multiple dapps. One dapp might need to regularly update the subgraph with new features. Another dapp might prefer to use an older, well-tested subgraph version. Upon initial curation, a 1% standard tax is incurred. +Signaling on a specific version is especially useful when one Subgraph is used by multiple dapps. One dapp might need to regularly update the Subgraph with new features. Another dapp might prefer to use an older, well-tested Subgraph version. Upon initial curation, a 1% standard tax is incurred. La migrazione automatica del segnale alla più recente versione di produzione può essere utile per garantire l'accumulo di tariffe di query. Ogni volta che si effettua una curation, si paga una tassa di curation del 1%. Si pagherà anche una tassa di curation del 0,5% per ogni migrazione. Gli sviluppatori di subgraph sono scoraggiati dal pubblicare frequentemente nuove versioni: devono pagare una tassa di curation del 0,5% su tutte le quote di curation auto-migrate. -> **Note**: The first address to signal a particular subgraph is considered the first curator and will have to do much more gas-intensive work than the rest of the following curators because the first curator initializes the curation share tokens, and also transfers tokens into The Graph proxy. +> **Note**: The first address to signal a particular Subgraph is considered the first curator and will have to do much more gas-intensive work than the rest of the following curators because the first curator initializes the curation share tokens, and also transfers tokens into The Graph proxy. ## Withdrawing your GRT @@ -40,39 +40,39 @@ Curators have the option to withdraw their signaled GRT at any time. Unlike the process of delegating, if you decide to withdraw your signaled GRT you will not have to wait for a cooldown period and will receive the entire amount (minus the 1% curation tax). -Once a curator withdraws their signal, indexers may choose to keep indexing the subgraph, even if there's currently no active GRT signaled. +Once a curator withdraws their signal, indexers may choose to keep indexing the Subgraph, even if there's currently no active GRT signaled. -However, it is recommended that curators leave their signaled GRT in place not only to receive a portion of the query fees, but also to ensure reliability and uptime of the subgraph. +However, it is recommended that curators leave their signaled GRT in place not only to receive a portion of the query fees, but also to ensure reliability and uptime of the Subgraph. ## Rischi 1. Il mercato delle query è intrinsecamente giovane per The Graph e c'è il rischio che la vostra %APY possa essere inferiore a quella prevista a causa delle dinamiche di mercato nascenti. -2. Curation Fee - when a curator signals GRT on a subgraph, they incur a 1% curation tax. This fee is burned. -3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). -4. Un subgraph può fallire a causa di un bug. Un subgraph fallito non matura commissioni della query. Di conseguenza, si dovrà attendere che lo sviluppatore risolva il bug e distribuisca una nuova versione. - - Se siete iscritti alla versione più recente di un subgraph, le vostre quote di partecipazione migreranno automaticamente a quella nuova versione. Questo comporta una tassa di curation di 0,5%. - - If you have signaled on a specific subgraph version and it fails, you will have to manually burn your curation shares. You can then signal on the new subgraph version, thus incurring a 1% curation tax. +2. Curation Fee - when a curator signals GRT on a Subgraph, they incur a 1% curation tax. This fee is burned. +3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their Subgraph or if a Subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). +4. A Subgraph can fail due to a bug. A failed Subgraph does not accrue query fees. As a result, you’ll have to wait until the developer fixes the bug and deploys a new version. + - If you are subscribed to the newest version of a Subgraph, your shares will auto-migrate to that new version. This will incur a 0.5% curation tax. + - If you have signaled on a specific Subgraph version and it fails, you will have to manually burn your curation shares. You can then signal on the new Subgraph version, thus incurring a 1% curation tax. ## FAQ sulla curation ### 1. Quale % delle tariffe di query guadagnano i curator? -By signalling on a subgraph, you will earn a share of all the query fees that the subgraph generates. 10% of all query fees go to the Curators pro-rata to their curation shares. This 10% is subject to governance. +By signalling on a Subgraph, you will earn a share of all the query fees that the Subgraph generates. 10% of all query fees go to the Curators pro-rata to their curation shares. This 10% is subject to governance. -### 2. Come si fa a decidere quali subgraph sono di alta qualità da segnalare? +### 2. How do I decide which Subgraphs are high quality to signal on? -Finding high-quality subgraphs is a complex task, but it can be approached in many different ways. As a Curator, you want to look for trustworthy subgraphs that are driving query volume. A trustworthy subgraph may be valuable if it is complete, accurate, and supports a dapp’s data needs. A poorly architected subgraph might need to be revised or re-published, and can also end up failing. It is critical for Curators to review a subgraph’s architecture or code in order to assess if a subgraph is valuable. As a result: +Finding high-quality Subgraphs is a complex task, but it can be approached in many different ways. As a Curator, you want to look for trustworthy Subgraphs that are driving query volume. A trustworthy Subgraph may be valuable if it is complete, accurate, and supports a dapp’s data needs. A poorly architected Subgraph might need to be revised or re-published, and can also end up failing. It is critical for Curators to review a Subgraph’s architecture or code in order to assess if a Subgraph is valuable. As a result: -- Curators can use their understanding of a network to try and predict how an individual subgraph may generate a higher or lower query volume in the future -- Curators should also understand the metrics that are available through Graph Explorer. Metrics like past query volume and who the subgraph developer is can help determine whether or not a subgraph is worth signalling on. +- Curators can use their understanding of a network to try and predict how an individual Subgraph may generate a higher or lower query volume in the future +- Curators should also understand the metrics that are available through Graph Explorer. Metrics like past query volume and who the Subgraph developer is can help determine whether or not a Subgraph is worth signalling on. -### 3. Qual è il costo dell'aggiornamento di un subgraph? +### 3. What’s the cost of updating a Subgraph? -Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading subgraphs is an onchain action that costs gas. +Migrating your curation shares to a new Subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a Subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading Subgraphs is an onchain action that costs gas. -### 4. Con quale frequenza posso aggiornare il mio subgraph? +### 4. How often can I update my Subgraph? -Si suggerisce di non aggiornare i subgraph troppo frequentemente. Si veda la domanda precedente per maggiori dettagli. +It’s suggested that you don’t update your Subgraphs too frequently. See the question above for more details. ### 5. Posso vendere le mie quote di curation? From 192a8ef92ef69270c8ed2597fbd3c563f81e288b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:17:17 -0500 Subject: [PATCH 0501/1789] New translations curating.mdx (Japanese) --- .../src/pages/ja/resources/roles/curating.mdx | 58 +++++++++---------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/website/src/pages/ja/resources/roles/curating.mdx b/website/src/pages/ja/resources/roles/curating.mdx index ff0ae8aced25..56560702df5c 100644 --- a/website/src/pages/ja/resources/roles/curating.mdx +++ b/website/src/pages/ja/resources/roles/curating.mdx @@ -2,37 +2,37 @@ title: キュレーティング --- -Curators are critical to The Graph's decentralized economy. They use their knowledge of the web3 ecosystem to assess and signal on the subgraphs that should be indexed by The Graph Network. Through Graph Explorer, Curators view network data to make signaling decisions. In turn, The Graph Network rewards Curators who signal on good quality subgraphs with a share of the query fees those subgraphs generate. The amount of GRT signaled is one of the key considerations for indexers when determining which subgraphs to index. +Curators are critical to The Graph's decentralized economy. They use their knowledge of the web3 ecosystem to assess and signal on the Subgraphs that should be indexed by The Graph Network. Through Graph Explorer, Curators view network data to make signaling decisions. In turn, The Graph Network rewards Curators who signal on good quality Subgraphs with a share of the query fees those Subgraphs generate. The amount of GRT signaled is one of the key considerations for indexers when determining which Subgraphs to index. ## What Does Signaling Mean for The Graph Network? -Before consumers can query a subgraph, it must be indexed. This is where curation comes into play. In order for Indexers to earn substantial query fees on quality subgraphs, they need to know what subgraphs to index. When Curators signal on a subgraph, it lets Indexers know that a subgraph is in demand and of sufficient quality that it should be indexed. +Before consumers can query a Subgraph, it must be indexed. This is where curation comes into play. In order for Indexers to earn substantial query fees on quality Subgraphs, they need to know what Subgraphs to index. When Curators signal on a Subgraph, it lets Indexers know that a Subgraph is in demand and of sufficient quality that it should be indexed. -Curators make The Graph network efficient and [signaling](#how-to-signal) is the process that Curators use to let Indexers know that a subgraph is good to index. Indexers can trust the signal from a Curator because upon signaling, Curators mint a curation share for the subgraph, entitling them to a portion of future query fees that the subgraph drives. +Curators make The Graph network efficient and [signaling](#how-to-signal) is the process that Curators use to let Indexers know that a Subgraph is good to index. Indexers can trust the signal from a Curator because upon signaling, Curators mint a curation share for the Subgraph, entitling them to a portion of future query fees that the Subgraph drives. -Curator signals are represented as ERC20 tokens called Graph Curation Shares (GCS). Those that want to earn more query fees should signal their GRT to subgraphs that they predict will generate a strong flow of fees to the network. Curators cannot be slashed for bad behavior, but there is a deposit tax on Curators to disincentivize poor decision-making that could harm the integrity of the network. Curators will also earn fewer query fees if they curate on a low-quality subgraph because there will be fewer queries to process or fewer Indexers to process them. +Curator signals are represented as ERC20 tokens called Graph Curation Shares (GCS). Those that want to earn more query fees should signal their GRT to Subgraphs that they predict will generate a strong flow of fees to the network. Curators cannot be slashed for bad behavior, but there is a deposit tax on Curators to disincentivize poor decision-making that could harm the integrity of the network. Curators will also earn fewer query fees if they curate on a low-quality Subgraph because there will be fewer queries to process or fewer Indexers to process them. -The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all subgraphs, signaling GRT on a particular subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. +The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all Subgraphs, signaling GRT on a particular Subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. -When signaling, Curators can decide to signal on a specific version of the subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. +When signaling, Curators can decide to signal on a specific version of the Subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. -If you require assistance with curation to enhance the quality of service, please send a request to the Edge & Node team at support@thegraph.zendesk.com and specify the subgraphs you need assistance with. +If you require assistance with curation to enhance the quality of service, please send a request to the Edge & Node team at support@thegraph.zendesk.com and specify the Subgraphs you need assistance with. -Indexers can find subgraphs to index based on curation signals they see in Graph Explorer (screenshot below). +Indexers can find Subgraphs to index based on curation signals they see in Graph Explorer (screenshot below). -![Explorer subgraphs](/img/explorer-subgraphs.png) +![Explorer Subgraphs](/img/explorer-subgraphs.png) ## シグナルの出し方 -Within the Curator tab in Graph Explorer, curators will be able to signal and unsignal on certain subgraphs based on network stats. For a step-by-step overview of how to do this in Graph Explorer, [click here.](/subgraphs/explorer/) +Within the Curator tab in Graph Explorer, curators will be able to signal and unsignal on certain Subgraphs based on network stats. For a step-by-step overview of how to do this in Graph Explorer, [click here.](/subgraphs/explorer/) -キュレーターは、特定のサブグラフのバージョンでシグナルを出すことも、そのサブグラフの最新のプロダクションビルドに自動的にシグナルを移行させることも可能ですます。 どちらも有効な戦略であり、それぞれに長所と短所があります。 +A curator can choose to signal on a specific Subgraph version, or they can choose to have their signal automatically migrate to the newest production build of that Subgraph. Both are valid strategies and come with their own pros and cons. -Signaling on a specific version is especially useful when one subgraph is used by multiple dapps. One dapp might need to regularly update the subgraph with new features. Another dapp might prefer to use an older, well-tested subgraph version. Upon initial curation, a 1% standard tax is incurred. +Signaling on a specific version is especially useful when one Subgraph is used by multiple dapps. One dapp might need to regularly update the Subgraph with new features. Another dapp might prefer to use an older, well-tested Subgraph version. Upon initial curation, a 1% standard tax is incurred. シグナルを最新のプロダクションビルドに自動的に移行させることは、クエリー料金の発生を確実にするために有効です。 キュレーションを行うたびに、1%のキュレーション税が発生します。 また、移行ごとに 0.5%のキュレーション税を支払うことになります。 つまり、サブグラフの開発者が、頻繁に新バージョンを公開することは推奨されません。 自動移行された全てのキュレーションシェアに対して、0.5%のキュレーション税を支払わなければならないからです。 -> **Note**: The first address to signal a particular subgraph is considered the first curator and will have to do much more gas-intensive work than the rest of the following curators because the first curator initializes the curation share tokens, and also transfers tokens into The Graph proxy. +> **Note**: The first address to signal a particular Subgraph is considered the first curator and will have to do much more gas-intensive work than the rest of the following curators because the first curator initializes the curation share tokens, and also transfers tokens into The Graph proxy. ## Withdrawing your GRT @@ -40,39 +40,39 @@ Curators have the option to withdraw their signaled GRT at any time. Unlike the process of delegating, if you decide to withdraw your signaled GRT you will not have to wait for a cooldown period and will receive the entire amount (minus the 1% curation tax). -Once a curator withdraws their signal, indexers may choose to keep indexing the subgraph, even if there's currently no active GRT signaled. +Once a curator withdraws their signal, indexers may choose to keep indexing the Subgraph, even if there's currently no active GRT signaled. -However, it is recommended that curators leave their signaled GRT in place not only to receive a portion of the query fees, but also to ensure reliability and uptime of the subgraph. +However, it is recommended that curators leave their signaled GRT in place not only to receive a portion of the query fees, but also to ensure reliability and uptime of the Subgraph. ## リスク 1. The Graph では、クエリ市場は本質的に歴史が浅く、初期の市場ダイナミクスのために、あなたの%APY が予想より低くなるリスクがあります。 -2. Curation Fee - when a curator signals GRT on a subgraph, they incur a 1% curation tax. This fee is burned. -3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). -4. サブグラフはバグで失敗することがあります。 失敗したサブグラフは、クエリフィーが発生しません。 結果的に、開発者がバグを修正して新しいバージョンを展開するまで待たなければならなくなります。 - - サブグラフの最新バージョンに加入している場合、シェアはその新バージョンに自動移行します。 これには 0.5%のキュレーション税がかかります。 - - If you have signaled on a specific subgraph version and it fails, you will have to manually burn your curation shares. You can then signal on the new subgraph version, thus incurring a 1% curation tax. +2. Curation Fee - when a curator signals GRT on a Subgraph, they incur a 1% curation tax. This fee is burned. +3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their Subgraph or if a Subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). +4. A Subgraph can fail due to a bug. A failed Subgraph does not accrue query fees. As a result, you’ll have to wait until the developer fixes the bug and deploys a new version. + - If you are subscribed to the newest version of a Subgraph, your shares will auto-migrate to that new version. This will incur a 0.5% curation tax. + - If you have signaled on a specific Subgraph version and it fails, you will have to manually burn your curation shares. You can then signal on the new Subgraph version, thus incurring a 1% curation tax. ## キューレーション FAQ ### 1. キュレータはクエリフィーの何%を獲得できますか? -By signalling on a subgraph, you will earn a share of all the query fees that the subgraph generates. 10% of all query fees go to the Curators pro-rata to their curation shares. This 10% is subject to governance. +By signalling on a Subgraph, you will earn a share of all the query fees that the Subgraph generates. 10% of all query fees go to the Curators pro-rata to their curation shares. This 10% is subject to governance. -### 2. シグナルを出すのに適した質の高いサブグラフはどのようにして決めるのですか? +### 2. How do I decide which Subgraphs are high quality to signal on? -Finding high-quality subgraphs is a complex task, but it can be approached in many different ways. As a Curator, you want to look for trustworthy subgraphs that are driving query volume. A trustworthy subgraph may be valuable if it is complete, accurate, and supports a dapp’s data needs. A poorly architected subgraph might need to be revised or re-published, and can also end up failing. It is critical for Curators to review a subgraph’s architecture or code in order to assess if a subgraph is valuable. As a result: +Finding high-quality Subgraphs is a complex task, but it can be approached in many different ways. As a Curator, you want to look for trustworthy Subgraphs that are driving query volume. A trustworthy Subgraph may be valuable if it is complete, accurate, and supports a dapp’s data needs. A poorly architected Subgraph might need to be revised or re-published, and can also end up failing. It is critical for Curators to review a Subgraph’s architecture or code in order to assess if a Subgraph is valuable. As a result: -- Curators can use their understanding of a network to try and predict how an individual subgraph may generate a higher or lower query volume in the future -- Curators should also understand the metrics that are available through Graph Explorer. Metrics like past query volume and who the subgraph developer is can help determine whether or not a subgraph is worth signalling on. +- Curators can use their understanding of a network to try and predict how an individual Subgraph may generate a higher or lower query volume in the future +- Curators should also understand the metrics that are available through Graph Explorer. Metrics like past query volume and who the Subgraph developer is can help determine whether or not a Subgraph is worth signalling on. -### 3. サブグラフの更新にかかるコストはいくらですか? +### 3. What’s the cost of updating a Subgraph? -Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading subgraphs is an onchain action that costs gas. +Migrating your curation shares to a new Subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a Subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading Subgraphs is an onchain action that costs gas. -### 4. サブグラフはどれくらいの頻度で更新できますか? +### 4. How often can I update my Subgraph? -サブグラフをあまり頻繁に更新しないことをお勧めします。詳細については、上記の質問を参照してください。 +It’s suggested that you don’t update your Subgraphs too frequently. See the question above for more details. ### 5. キュレーションのシェアを売却することはできますか? From 5f632f310c005acc70b5345247550dfcdbd6f4d2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:17:18 -0500 Subject: [PATCH 0502/1789] New translations curating.mdx (Korean) --- .../src/pages/ko/resources/roles/curating.mdx | 58 +++++++++---------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/website/src/pages/ko/resources/roles/curating.mdx b/website/src/pages/ko/resources/roles/curating.mdx index 1cc05bb7b62f..a228ebfb3267 100644 --- a/website/src/pages/ko/resources/roles/curating.mdx +++ b/website/src/pages/ko/resources/roles/curating.mdx @@ -2,37 +2,37 @@ title: Curating --- -Curators are critical to The Graph's decentralized economy. They use their knowledge of the web3 ecosystem to assess and signal on the subgraphs that should be indexed by The Graph Network. Through Graph Explorer, Curators view network data to make signaling decisions. In turn, The Graph Network rewards Curators who signal on good quality subgraphs with a share of the query fees those subgraphs generate. The amount of GRT signaled is one of the key considerations for indexers when determining which subgraphs to index. +Curators are critical to The Graph's decentralized economy. They use their knowledge of the web3 ecosystem to assess and signal on the Subgraphs that should be indexed by The Graph Network. Through Graph Explorer, Curators view network data to make signaling decisions. In turn, The Graph Network rewards Curators who signal on good quality Subgraphs with a share of the query fees those Subgraphs generate. The amount of GRT signaled is one of the key considerations for indexers when determining which Subgraphs to index. ## What Does Signaling Mean for The Graph Network? -Before consumers can query a subgraph, it must be indexed. This is where curation comes into play. In order for Indexers to earn substantial query fees on quality subgraphs, they need to know what subgraphs to index. When Curators signal on a subgraph, it lets Indexers know that a subgraph is in demand and of sufficient quality that it should be indexed. +Before consumers can query a Subgraph, it must be indexed. This is where curation comes into play. In order for Indexers to earn substantial query fees on quality Subgraphs, they need to know what Subgraphs to index. When Curators signal on a Subgraph, it lets Indexers know that a Subgraph is in demand and of sufficient quality that it should be indexed. -Curators make The Graph network efficient and [signaling](#how-to-signal) is the process that Curators use to let Indexers know that a subgraph is good to index. Indexers can trust the signal from a Curator because upon signaling, Curators mint a curation share for the subgraph, entitling them to a portion of future query fees that the subgraph drives. +Curators make The Graph network efficient and [signaling](#how-to-signal) is the process that Curators use to let Indexers know that a Subgraph is good to index. Indexers can trust the signal from a Curator because upon signaling, Curators mint a curation share for the Subgraph, entitling them to a portion of future query fees that the Subgraph drives. -Curator signals are represented as ERC20 tokens called Graph Curation Shares (GCS). Those that want to earn more query fees should signal their GRT to subgraphs that they predict will generate a strong flow of fees to the network. Curators cannot be slashed for bad behavior, but there is a deposit tax on Curators to disincentivize poor decision-making that could harm the integrity of the network. Curators will also earn fewer query fees if they curate on a low-quality subgraph because there will be fewer queries to process or fewer Indexers to process them. +Curator signals are represented as ERC20 tokens called Graph Curation Shares (GCS). Those that want to earn more query fees should signal their GRT to Subgraphs that they predict will generate a strong flow of fees to the network. Curators cannot be slashed for bad behavior, but there is a deposit tax on Curators to disincentivize poor decision-making that could harm the integrity of the network. Curators will also earn fewer query fees if they curate on a low-quality Subgraph because there will be fewer queries to process or fewer Indexers to process them. -The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all subgraphs, signaling GRT on a particular subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. +The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all Subgraphs, signaling GRT on a particular Subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. -When signaling, Curators can decide to signal on a specific version of the subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. +When signaling, Curators can decide to signal on a specific version of the Subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. -If you require assistance with curation to enhance the quality of service, please send a request to the Edge & Node team at support@thegraph.zendesk.com and specify the subgraphs you need assistance with. +If you require assistance with curation to enhance the quality of service, please send a request to the Edge & Node team at support@thegraph.zendesk.com and specify the Subgraphs you need assistance with. -Indexers can find subgraphs to index based on curation signals they see in Graph Explorer (screenshot below). +Indexers can find Subgraphs to index based on curation signals they see in Graph Explorer (screenshot below). -![Explorer subgraphs](/img/explorer-subgraphs.png) +![Explorer Subgraphs](/img/explorer-subgraphs.png) ## How to Signal -Within the Curator tab in Graph Explorer, curators will be able to signal and unsignal on certain subgraphs based on network stats. For a step-by-step overview of how to do this in Graph Explorer, [click here.](/subgraphs/explorer/) +Within the Curator tab in Graph Explorer, curators will be able to signal and unsignal on certain Subgraphs based on network stats. For a step-by-step overview of how to do this in Graph Explorer, [click here.](/subgraphs/explorer/) -A curator can choose to signal on a specific subgraph version, or they can choose to have their signal automatically migrate to the newest production build of that subgraph. Both are valid strategies and come with their own pros and cons. +A curator can choose to signal on a specific Subgraph version, or they can choose to have their signal automatically migrate to the newest production build of that Subgraph. Both are valid strategies and come with their own pros and cons. -Signaling on a specific version is especially useful when one subgraph is used by multiple dapps. One dapp might need to regularly update the subgraph with new features. Another dapp might prefer to use an older, well-tested subgraph version. Upon initial curation, a 1% standard tax is incurred. +Signaling on a specific version is especially useful when one Subgraph is used by multiple dapps. One dapp might need to regularly update the Subgraph with new features. Another dapp might prefer to use an older, well-tested Subgraph version. Upon initial curation, a 1% standard tax is incurred. Having your signal automatically migrate to the newest production build can be valuable to ensure you keep accruing query fees. Every time you curate, a 1% curation tax is incurred. You will also pay a 0.5% curation tax on every migration. Subgraph developers are discouraged from frequently publishing new versions - they have to pay a 0.5% curation tax on all auto-migrated curation shares. -> **Note**: The first address to signal a particular subgraph is considered the first curator and will have to do much more gas-intensive work than the rest of the following curators because the first curator initializes the curation share tokens, and also transfers tokens into The Graph proxy. +> **Note**: The first address to signal a particular Subgraph is considered the first curator and will have to do much more gas-intensive work than the rest of the following curators because the first curator initializes the curation share tokens, and also transfers tokens into The Graph proxy. ## Withdrawing your GRT @@ -40,39 +40,39 @@ Curators have the option to withdraw their signaled GRT at any time. Unlike the process of delegating, if you decide to withdraw your signaled GRT you will not have to wait for a cooldown period and will receive the entire amount (minus the 1% curation tax). -Once a curator withdraws their signal, indexers may choose to keep indexing the subgraph, even if there's currently no active GRT signaled. +Once a curator withdraws their signal, indexers may choose to keep indexing the Subgraph, even if there's currently no active GRT signaled. -However, it is recommended that curators leave their signaled GRT in place not only to receive a portion of the query fees, but also to ensure reliability and uptime of the subgraph. +However, it is recommended that curators leave their signaled GRT in place not only to receive a portion of the query fees, but also to ensure reliability and uptime of the Subgraph. ## Risks 1. The query market is inherently young at The Graph and there is risk that your %APY may be lower than you expect due to nascent market dynamics. -2. Curation Fee - when a curator signals GRT on a subgraph, they incur a 1% curation tax. This fee is burned. -3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). -4. A subgraph can fail due to a bug. A failed subgraph does not accrue query fees. As a result, you’ll have to wait until the developer fixes the bug and deploys a new version. - - If you are subscribed to the newest version of a subgraph, your shares will auto-migrate to that new version. This will incur a 0.5% curation tax. - - If you have signaled on a specific subgraph version and it fails, you will have to manually burn your curation shares. You can then signal on the new subgraph version, thus incurring a 1% curation tax. +2. Curation Fee - when a curator signals GRT on a Subgraph, they incur a 1% curation tax. This fee is burned. +3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their Subgraph or if a Subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). +4. A Subgraph can fail due to a bug. A failed Subgraph does not accrue query fees. As a result, you’ll have to wait until the developer fixes the bug and deploys a new version. + - If you are subscribed to the newest version of a Subgraph, your shares will auto-migrate to that new version. This will incur a 0.5% curation tax. + - If you have signaled on a specific Subgraph version and it fails, you will have to manually burn your curation shares. You can then signal on the new Subgraph version, thus incurring a 1% curation tax. ## Curation FAQs ### 1. What % of query fees do Curators earn? -By signalling on a subgraph, you will earn a share of all the query fees that the subgraph generates. 10% of all query fees go to the Curators pro-rata to their curation shares. This 10% is subject to governance. +By signalling on a Subgraph, you will earn a share of all the query fees that the Subgraph generates. 10% of all query fees go to the Curators pro-rata to their curation shares. This 10% is subject to governance. -### 2. How do I decide which subgraphs are high quality to signal on? +### 2. How do I decide which Subgraphs are high quality to signal on? -Finding high-quality subgraphs is a complex task, but it can be approached in many different ways. As a Curator, you want to look for trustworthy subgraphs that are driving query volume. A trustworthy subgraph may be valuable if it is complete, accurate, and supports a dapp’s data needs. A poorly architected subgraph might need to be revised or re-published, and can also end up failing. It is critical for Curators to review a subgraph’s architecture or code in order to assess if a subgraph is valuable. As a result: +Finding high-quality Subgraphs is a complex task, but it can be approached in many different ways. As a Curator, you want to look for trustworthy Subgraphs that are driving query volume. A trustworthy Subgraph may be valuable if it is complete, accurate, and supports a dapp’s data needs. A poorly architected Subgraph might need to be revised or re-published, and can also end up failing. It is critical for Curators to review a Subgraph’s architecture or code in order to assess if a Subgraph is valuable. As a result: -- Curators can use their understanding of a network to try and predict how an individual subgraph may generate a higher or lower query volume in the future -- Curators should also understand the metrics that are available through Graph Explorer. Metrics like past query volume and who the subgraph developer is can help determine whether or not a subgraph is worth signalling on. +- Curators can use their understanding of a network to try and predict how an individual Subgraph may generate a higher or lower query volume in the future +- Curators should also understand the metrics that are available through Graph Explorer. Metrics like past query volume and who the Subgraph developer is can help determine whether or not a Subgraph is worth signalling on. -### 3. What’s the cost of updating a subgraph? +### 3. What’s the cost of updating a Subgraph? -Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading subgraphs is an onchain action that costs gas. +Migrating your curation shares to a new Subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a Subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading Subgraphs is an onchain action that costs gas. -### 4. How often can I update my subgraph? +### 4. How often can I update my Subgraph? -It’s suggested that you don’t update your subgraphs too frequently. See the question above for more details. +It’s suggested that you don’t update your Subgraphs too frequently. See the question above for more details. ### 5. Can I sell my curation shares? From 25cf9cd3db5c8f7dabaa638dfe80e8b7e25fcc10 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:17:19 -0500 Subject: [PATCH 0503/1789] New translations curating.mdx (Dutch) --- .../src/pages/nl/resources/roles/curating.mdx | 58 +++++++++---------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/website/src/pages/nl/resources/roles/curating.mdx b/website/src/pages/nl/resources/roles/curating.mdx index 99c74778c9bd..a2f4fff13893 100644 --- a/website/src/pages/nl/resources/roles/curating.mdx +++ b/website/src/pages/nl/resources/roles/curating.mdx @@ -2,37 +2,37 @@ title: Cureren --- -Curators are critical to The Graph's decentralized economy. They use their knowledge of the web3 ecosystem to assess and signal on the subgraphs that should be indexed by The Graph Network. Through Graph Explorer, Curators view network data to make signaling decisions. In turn, The Graph Network rewards Curators who signal on good quality subgraphs with a share of the query fees those subgraphs generate. The amount of GRT signaled is one of the key considerations for indexers when determining which subgraphs to index. +Curators are critical to The Graph's decentralized economy. They use their knowledge of the web3 ecosystem to assess and signal on the Subgraphs that should be indexed by The Graph Network. Through Graph Explorer, Curators view network data to make signaling decisions. In turn, The Graph Network rewards Curators who signal on good quality Subgraphs with a share of the query fees those Subgraphs generate. The amount of GRT signaled is one of the key considerations for indexers when determining which Subgraphs to index. ## What Does Signaling Mean for The Graph Network? -Before consumers can query a subgraph, it must be indexed. This is where curation comes into play. In order for Indexers to earn substantial query fees on quality subgraphs, they need to know what subgraphs to index. When Curators signal on a subgraph, it lets Indexers know that a subgraph is in demand and of sufficient quality that it should be indexed. +Before consumers can query a Subgraph, it must be indexed. This is where curation comes into play. In order for Indexers to earn substantial query fees on quality Subgraphs, they need to know what Subgraphs to index. When Curators signal on a Subgraph, it lets Indexers know that a Subgraph is in demand and of sufficient quality that it should be indexed. -Curators make The Graph network efficient and [signaling](#how-to-signal) is the process that Curators use to let Indexers know that a subgraph is good to index. Indexers can trust the signal from a Curator because upon signaling, Curators mint a curation share for the subgraph, entitling them to a portion of future query fees that the subgraph drives. +Curators make The Graph network efficient and [signaling](#how-to-signal) is the process that Curators use to let Indexers know that a Subgraph is good to index. Indexers can trust the signal from a Curator because upon signaling, Curators mint a curation share for the Subgraph, entitling them to a portion of future query fees that the Subgraph drives. -Curator signals are represented as ERC20 tokens called Graph Curation Shares (GCS). Those that want to earn more query fees should signal their GRT to subgraphs that they predict will generate a strong flow of fees to the network. Curators cannot be slashed for bad behavior, but there is a deposit tax on Curators to disincentivize poor decision-making that could harm the integrity of the network. Curators will also earn fewer query fees if they curate on a low-quality subgraph because there will be fewer queries to process or fewer Indexers to process them. +Curator signals are represented as ERC20 tokens called Graph Curation Shares (GCS). Those that want to earn more query fees should signal their GRT to Subgraphs that they predict will generate a strong flow of fees to the network. Curators cannot be slashed for bad behavior, but there is a deposit tax on Curators to disincentivize poor decision-making that could harm the integrity of the network. Curators will also earn fewer query fees if they curate on a low-quality Subgraph because there will be fewer queries to process or fewer Indexers to process them. -The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all subgraphs, signaling GRT on a particular subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. +The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all Subgraphs, signaling GRT on a particular Subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. -When signaling, Curators can decide to signal on a specific version of the subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. +When signaling, Curators can decide to signal on a specific version of the Subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. -If you require assistance with curation to enhance the quality of service, please send a request to the Edge & Node team at support@thegraph.zendesk.com and specify the subgraphs you need assistance with. +If you require assistance with curation to enhance the quality of service, please send a request to the Edge & Node team at support@thegraph.zendesk.com and specify the Subgraphs you need assistance with. -Indexers can find subgraphs to index based on curation signals they see in Graph Explorer (screenshot below). +Indexers can find Subgraphs to index based on curation signals they see in Graph Explorer (screenshot below). -![Explorer subgraphs](/img/explorer-subgraphs.png) +![Explorer Subgraphs](/img/explorer-subgraphs.png) ## Hoe werkt het Signaleren -Within the Curator tab in Graph Explorer, curators will be able to signal and unsignal on certain subgraphs based on network stats. For a step-by-step overview of how to do this in Graph Explorer, [click here.](/subgraphs/explorer/) +Within the Curator tab in Graph Explorer, curators will be able to signal and unsignal on certain Subgraphs based on network stats. For a step-by-step overview of how to do this in Graph Explorer, [click here.](/subgraphs/explorer/) -Een curator kan ervoor kiezen om een signaal af te geven voor een specifieke subgraph versie, of ze kunnen ervoor kiezen om hun signaal automatisch te laten migreren naar de nieuwste versie van de subgraph. Beide strategieën hebben voordelen en nadelen. +A curator can choose to signal on a specific Subgraph version, or they can choose to have their signal automatically migrate to the newest production build of that Subgraph. Both are valid strategies and come with their own pros and cons. -Signaling on a specific version is especially useful when one subgraph is used by multiple dapps. One dapp might need to regularly update the subgraph with new features. Another dapp might prefer to use an older, well-tested subgraph version. Upon initial curation, a 1% standard tax is incurred. +Signaling on a specific version is especially useful when one Subgraph is used by multiple dapps. One dapp might need to regularly update the Subgraph with new features. Another dapp might prefer to use an older, well-tested Subgraph version. Upon initial curation, a 1% standard tax is incurred. Automatische migratie van je signalering naar de nieuwste subgraphversie kan waardevol zijn om ervoor te zorgen dat je querykosten blijft ontvangen. Elke keer dat je signaleert, wordt een curatiebelasting van 1% in rekening gebracht. Je betaalt ook een curatiebelasting van 0,5% bij elke migratie. Subgraphontwikkelaars worden ontmoedigd om vaak nieuwe versies te publiceren - ze moeten een curatiebelasting van 0,5% betalen voor alle automatisch gemigreerde curatie-aandelen. -> **Note**: The first address to signal a particular subgraph is considered the first curator and will have to do much more gas-intensive work than the rest of the following curators because the first curator initializes the curation share tokens, and also transfers tokens into The Graph proxy. +> **Note**: The first address to signal a particular Subgraph is considered the first curator and will have to do much more gas-intensive work than the rest of the following curators because the first curator initializes the curation share tokens, and also transfers tokens into The Graph proxy. ## Withdrawing your GRT @@ -40,39 +40,39 @@ Curators have the option to withdraw their signaled GRT at any time. Unlike the process of delegating, if you decide to withdraw your signaled GRT you will not have to wait for a cooldown period and will receive the entire amount (minus the 1% curation tax). -Once a curator withdraws their signal, indexers may choose to keep indexing the subgraph, even if there's currently no active GRT signaled. +Once a curator withdraws their signal, indexers may choose to keep indexing the Subgraph, even if there's currently no active GRT signaled. -However, it is recommended that curators leave their signaled GRT in place not only to receive a portion of the query fees, but also to ensure reliability and uptime of the subgraph. +However, it is recommended that curators leave their signaled GRT in place not only to receive a portion of the query fees, but also to ensure reliability and uptime of the Subgraph. ## Risico's 1. De querymarkt is nog jong bij het Graph Netwerk en er bestaat een risico dat je %APY lager kan zijn dan je verwacht door opkomende marktdynamiek. -2. Curation Fee - when a curator signals GRT on a subgraph, they incur a 1% curation tax. This fee is burned. -3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). -4. Een subgraph kan stuk gaan door een bug. Een subgraph die stuk is gegenereerd geen querykosten. Als gevolg hiervan moet je wachten tot de ontwikkelaar de bug repareert en een nieuwe versie implementeert. - - Als je bent geabonneerd op de nieuwste versie van een subgraph, worden je curatieaandelen automatisch gemigreerd naar die nieuwe versie. Er is een curatiebelasting van 0,5%. - - If you have signaled on a specific subgraph version and it fails, you will have to manually burn your curation shares. You can then signal on the new subgraph version, thus incurring a 1% curation tax. +2. Curation Fee - when a curator signals GRT on a Subgraph, they incur a 1% curation tax. This fee is burned. +3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their Subgraph or if a Subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). +4. A Subgraph can fail due to a bug. A failed Subgraph does not accrue query fees. As a result, you’ll have to wait until the developer fixes the bug and deploys a new version. + - If you are subscribed to the newest version of a Subgraph, your shares will auto-migrate to that new version. This will incur a 0.5% curation tax. + - If you have signaled on a specific Subgraph version and it fails, you will have to manually burn your curation shares. You can then signal on the new Subgraph version, thus incurring a 1% curation tax. ## Veelgestelde Vragen over Curatie ### Welk percentage van de querykosten verdienen curatoren? -By signalling on a subgraph, you will earn a share of all the query fees that the subgraph generates. 10% of all query fees go to the Curators pro-rata to their curation shares. This 10% is subject to governance. +By signalling on a Subgraph, you will earn a share of all the query fees that the Subgraph generates. 10% of all query fees go to the Curators pro-rata to their curation shares. This 10% is subject to governance. -### Hoe bepaal ik welke subgraphs van hoge kwaliteit zijn om op te signaleren? +### 2. How do I decide which Subgraphs are high quality to signal on? -Finding high-quality subgraphs is a complex task, but it can be approached in many different ways. As a Curator, you want to look for trustworthy subgraphs that are driving query volume. A trustworthy subgraph may be valuable if it is complete, accurate, and supports a dapp’s data needs. A poorly architected subgraph might need to be revised or re-published, and can also end up failing. It is critical for Curators to review a subgraph’s architecture or code in order to assess if a subgraph is valuable. As a result: +Finding high-quality Subgraphs is a complex task, but it can be approached in many different ways. As a Curator, you want to look for trustworthy Subgraphs that are driving query volume. A trustworthy Subgraph may be valuable if it is complete, accurate, and supports a dapp’s data needs. A poorly architected Subgraph might need to be revised or re-published, and can also end up failing. It is critical for Curators to review a Subgraph’s architecture or code in order to assess if a Subgraph is valuable. As a result: -- Curators can use their understanding of a network to try and predict how an individual subgraph may generate a higher or lower query volume in the future -- Curators should also understand the metrics that are available through Graph Explorer. Metrics like past query volume and who the subgraph developer is can help determine whether or not a subgraph is worth signalling on. +- Curators can use their understanding of a network to try and predict how an individual Subgraph may generate a higher or lower query volume in the future +- Curators should also understand the metrics that are available through Graph Explorer. Metrics like past query volume and who the Subgraph developer is can help determine whether or not a Subgraph is worth signalling on. -### Wat zijn de kosten voor het updaten van een subgraph? +### 3. What’s the cost of updating a Subgraph? -Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading subgraphs is an onchain action that costs gas. +Migrating your curation shares to a new Subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a Subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading Subgraphs is an onchain action that costs gas. -### Hoe vaak kan ik mijn subgraph updaten? +### 4. How often can I update my Subgraph? -Het wordt aanbevolen om je subgraphs niet te vaak bij te werken. Zie de bovenstaande vraag voor meer details. +It’s suggested that you don’t update your Subgraphs too frequently. See the question above for more details. ### Kan ik mijn curatieaandelen verkopen? From 674288100bc2b71120b2e2e4b184c56318919f41 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:17:20 -0500 Subject: [PATCH 0504/1789] New translations curating.mdx (Polish) --- .../src/pages/pl/resources/roles/curating.mdx | 58 +++++++++---------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/website/src/pages/pl/resources/roles/curating.mdx b/website/src/pages/pl/resources/roles/curating.mdx index 1cc05bb7b62f..a228ebfb3267 100644 --- a/website/src/pages/pl/resources/roles/curating.mdx +++ b/website/src/pages/pl/resources/roles/curating.mdx @@ -2,37 +2,37 @@ title: Curating --- -Curators are critical to The Graph's decentralized economy. They use their knowledge of the web3 ecosystem to assess and signal on the subgraphs that should be indexed by The Graph Network. Through Graph Explorer, Curators view network data to make signaling decisions. In turn, The Graph Network rewards Curators who signal on good quality subgraphs with a share of the query fees those subgraphs generate. The amount of GRT signaled is one of the key considerations for indexers when determining which subgraphs to index. +Curators are critical to The Graph's decentralized economy. They use their knowledge of the web3 ecosystem to assess and signal on the Subgraphs that should be indexed by The Graph Network. Through Graph Explorer, Curators view network data to make signaling decisions. In turn, The Graph Network rewards Curators who signal on good quality Subgraphs with a share of the query fees those Subgraphs generate. The amount of GRT signaled is one of the key considerations for indexers when determining which Subgraphs to index. ## What Does Signaling Mean for The Graph Network? -Before consumers can query a subgraph, it must be indexed. This is where curation comes into play. In order for Indexers to earn substantial query fees on quality subgraphs, they need to know what subgraphs to index. When Curators signal on a subgraph, it lets Indexers know that a subgraph is in demand and of sufficient quality that it should be indexed. +Before consumers can query a Subgraph, it must be indexed. This is where curation comes into play. In order for Indexers to earn substantial query fees on quality Subgraphs, they need to know what Subgraphs to index. When Curators signal on a Subgraph, it lets Indexers know that a Subgraph is in demand and of sufficient quality that it should be indexed. -Curators make The Graph network efficient and [signaling](#how-to-signal) is the process that Curators use to let Indexers know that a subgraph is good to index. Indexers can trust the signal from a Curator because upon signaling, Curators mint a curation share for the subgraph, entitling them to a portion of future query fees that the subgraph drives. +Curators make The Graph network efficient and [signaling](#how-to-signal) is the process that Curators use to let Indexers know that a Subgraph is good to index. Indexers can trust the signal from a Curator because upon signaling, Curators mint a curation share for the Subgraph, entitling them to a portion of future query fees that the Subgraph drives. -Curator signals are represented as ERC20 tokens called Graph Curation Shares (GCS). Those that want to earn more query fees should signal their GRT to subgraphs that they predict will generate a strong flow of fees to the network. Curators cannot be slashed for bad behavior, but there is a deposit tax on Curators to disincentivize poor decision-making that could harm the integrity of the network. Curators will also earn fewer query fees if they curate on a low-quality subgraph because there will be fewer queries to process or fewer Indexers to process them. +Curator signals are represented as ERC20 tokens called Graph Curation Shares (GCS). Those that want to earn more query fees should signal their GRT to Subgraphs that they predict will generate a strong flow of fees to the network. Curators cannot be slashed for bad behavior, but there is a deposit tax on Curators to disincentivize poor decision-making that could harm the integrity of the network. Curators will also earn fewer query fees if they curate on a low-quality Subgraph because there will be fewer queries to process or fewer Indexers to process them. -The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all subgraphs, signaling GRT on a particular subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. +The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all Subgraphs, signaling GRT on a particular Subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. -When signaling, Curators can decide to signal on a specific version of the subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. +When signaling, Curators can decide to signal on a specific version of the Subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. -If you require assistance with curation to enhance the quality of service, please send a request to the Edge & Node team at support@thegraph.zendesk.com and specify the subgraphs you need assistance with. +If you require assistance with curation to enhance the quality of service, please send a request to the Edge & Node team at support@thegraph.zendesk.com and specify the Subgraphs you need assistance with. -Indexers can find subgraphs to index based on curation signals they see in Graph Explorer (screenshot below). +Indexers can find Subgraphs to index based on curation signals they see in Graph Explorer (screenshot below). -![Explorer subgraphs](/img/explorer-subgraphs.png) +![Explorer Subgraphs](/img/explorer-subgraphs.png) ## How to Signal -Within the Curator tab in Graph Explorer, curators will be able to signal and unsignal on certain subgraphs based on network stats. For a step-by-step overview of how to do this in Graph Explorer, [click here.](/subgraphs/explorer/) +Within the Curator tab in Graph Explorer, curators will be able to signal and unsignal on certain Subgraphs based on network stats. For a step-by-step overview of how to do this in Graph Explorer, [click here.](/subgraphs/explorer/) -A curator can choose to signal on a specific subgraph version, or they can choose to have their signal automatically migrate to the newest production build of that subgraph. Both are valid strategies and come with their own pros and cons. +A curator can choose to signal on a specific Subgraph version, or they can choose to have their signal automatically migrate to the newest production build of that Subgraph. Both are valid strategies and come with their own pros and cons. -Signaling on a specific version is especially useful when one subgraph is used by multiple dapps. One dapp might need to regularly update the subgraph with new features. Another dapp might prefer to use an older, well-tested subgraph version. Upon initial curation, a 1% standard tax is incurred. +Signaling on a specific version is especially useful when one Subgraph is used by multiple dapps. One dapp might need to regularly update the Subgraph with new features. Another dapp might prefer to use an older, well-tested Subgraph version. Upon initial curation, a 1% standard tax is incurred. Having your signal automatically migrate to the newest production build can be valuable to ensure you keep accruing query fees. Every time you curate, a 1% curation tax is incurred. You will also pay a 0.5% curation tax on every migration. Subgraph developers are discouraged from frequently publishing new versions - they have to pay a 0.5% curation tax on all auto-migrated curation shares. -> **Note**: The first address to signal a particular subgraph is considered the first curator and will have to do much more gas-intensive work than the rest of the following curators because the first curator initializes the curation share tokens, and also transfers tokens into The Graph proxy. +> **Note**: The first address to signal a particular Subgraph is considered the first curator and will have to do much more gas-intensive work than the rest of the following curators because the first curator initializes the curation share tokens, and also transfers tokens into The Graph proxy. ## Withdrawing your GRT @@ -40,39 +40,39 @@ Curators have the option to withdraw their signaled GRT at any time. Unlike the process of delegating, if you decide to withdraw your signaled GRT you will not have to wait for a cooldown period and will receive the entire amount (minus the 1% curation tax). -Once a curator withdraws their signal, indexers may choose to keep indexing the subgraph, even if there's currently no active GRT signaled. +Once a curator withdraws their signal, indexers may choose to keep indexing the Subgraph, even if there's currently no active GRT signaled. -However, it is recommended that curators leave their signaled GRT in place not only to receive a portion of the query fees, but also to ensure reliability and uptime of the subgraph. +However, it is recommended that curators leave their signaled GRT in place not only to receive a portion of the query fees, but also to ensure reliability and uptime of the Subgraph. ## Risks 1. The query market is inherently young at The Graph and there is risk that your %APY may be lower than you expect due to nascent market dynamics. -2. Curation Fee - when a curator signals GRT on a subgraph, they incur a 1% curation tax. This fee is burned. -3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). -4. A subgraph can fail due to a bug. A failed subgraph does not accrue query fees. As a result, you’ll have to wait until the developer fixes the bug and deploys a new version. - - If you are subscribed to the newest version of a subgraph, your shares will auto-migrate to that new version. This will incur a 0.5% curation tax. - - If you have signaled on a specific subgraph version and it fails, you will have to manually burn your curation shares. You can then signal on the new subgraph version, thus incurring a 1% curation tax. +2. Curation Fee - when a curator signals GRT on a Subgraph, they incur a 1% curation tax. This fee is burned. +3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their Subgraph or if a Subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). +4. A Subgraph can fail due to a bug. A failed Subgraph does not accrue query fees. As a result, you’ll have to wait until the developer fixes the bug and deploys a new version. + - If you are subscribed to the newest version of a Subgraph, your shares will auto-migrate to that new version. This will incur a 0.5% curation tax. + - If you have signaled on a specific Subgraph version and it fails, you will have to manually burn your curation shares. You can then signal on the new Subgraph version, thus incurring a 1% curation tax. ## Curation FAQs ### 1. What % of query fees do Curators earn? -By signalling on a subgraph, you will earn a share of all the query fees that the subgraph generates. 10% of all query fees go to the Curators pro-rata to their curation shares. This 10% is subject to governance. +By signalling on a Subgraph, you will earn a share of all the query fees that the Subgraph generates. 10% of all query fees go to the Curators pro-rata to their curation shares. This 10% is subject to governance. -### 2. How do I decide which subgraphs are high quality to signal on? +### 2. How do I decide which Subgraphs are high quality to signal on? -Finding high-quality subgraphs is a complex task, but it can be approached in many different ways. As a Curator, you want to look for trustworthy subgraphs that are driving query volume. A trustworthy subgraph may be valuable if it is complete, accurate, and supports a dapp’s data needs. A poorly architected subgraph might need to be revised or re-published, and can also end up failing. It is critical for Curators to review a subgraph’s architecture or code in order to assess if a subgraph is valuable. As a result: +Finding high-quality Subgraphs is a complex task, but it can be approached in many different ways. As a Curator, you want to look for trustworthy Subgraphs that are driving query volume. A trustworthy Subgraph may be valuable if it is complete, accurate, and supports a dapp’s data needs. A poorly architected Subgraph might need to be revised or re-published, and can also end up failing. It is critical for Curators to review a Subgraph’s architecture or code in order to assess if a Subgraph is valuable. As a result: -- Curators can use their understanding of a network to try and predict how an individual subgraph may generate a higher or lower query volume in the future -- Curators should also understand the metrics that are available through Graph Explorer. Metrics like past query volume and who the subgraph developer is can help determine whether or not a subgraph is worth signalling on. +- Curators can use their understanding of a network to try and predict how an individual Subgraph may generate a higher or lower query volume in the future +- Curators should also understand the metrics that are available through Graph Explorer. Metrics like past query volume and who the Subgraph developer is can help determine whether or not a Subgraph is worth signalling on. -### 3. What’s the cost of updating a subgraph? +### 3. What’s the cost of updating a Subgraph? -Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading subgraphs is an onchain action that costs gas. +Migrating your curation shares to a new Subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a Subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading Subgraphs is an onchain action that costs gas. -### 4. How often can I update my subgraph? +### 4. How often can I update my Subgraph? -It’s suggested that you don’t update your subgraphs too frequently. See the question above for more details. +It’s suggested that you don’t update your Subgraphs too frequently. See the question above for more details. ### 5. Can I sell my curation shares? From 69fdbca88793463276688c71250db3f7507225aa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:17:21 -0500 Subject: [PATCH 0505/1789] New translations curating.mdx (Portuguese) --- .../src/pages/pt/resources/roles/curating.mdx | 58 +++++++++---------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/website/src/pages/pt/resources/roles/curating.mdx b/website/src/pages/pt/resources/roles/curating.mdx index 582a7926b9ee..0bdc3248b7be 100644 --- a/website/src/pages/pt/resources/roles/curating.mdx +++ b/website/src/pages/pt/resources/roles/curating.mdx @@ -2,37 +2,37 @@ title: Curadorias --- -Curadores são importantes para a economia descentralizada do The Graph. Eles utilizam o seu conhecimento do ecossistema web3 para avaliar e sinalizar nos subgraphs que devem ser indexados pela Graph Network. Através do Graph Explorer, Curadores visualizam dados de rede para tomar decisões sobre sinalizações. Em troca, a Graph Network recompensa Curadores que sinalizam em subgraphs de alta qualidade com uma parte das taxas de query geradas por estes subgraphs. A quantidade de GRT sinalizada é uma das considerações mais importantes para Indexadores ao determinar quais subgraphs indexar. +Curators are critical to The Graph's decentralized economy. They use their knowledge of the web3 ecosystem to assess and signal on the Subgraphs that should be indexed by The Graph Network. Through Graph Explorer, Curators view network data to make signaling decisions. In turn, The Graph Network rewards Curators who signal on good quality Subgraphs with a share of the query fees those Subgraphs generate. The amount of GRT signaled is one of the key considerations for indexers when determining which Subgraphs to index. ## O que a Sinalização Significa para a Graph Network? -Antes que consumidores possam indexar um subgraph, ele deve ser indexado. É aqui que entra a curadoria. Para que Indexadores ganhem taxas de query substanciais em subgraphs de qualidade, eles devem saber quais subgraphs indexar. Quando Curadores sinalizam um subgraph, isto diz aos Indexadores que um subgraph está em demanda e tem qualidade suficiente para ser indexado. +Before consumers can query a Subgraph, it must be indexed. This is where curation comes into play. In order for Indexers to earn substantial query fees on quality Subgraphs, they need to know what Subgraphs to index. When Curators signal on a Subgraph, it lets Indexers know that a Subgraph is in demand and of sufficient quality that it should be indexed. -Os Curadores trazem eficiência à Graph Network, e a [sinalização](#how-to-signal) é o processo que curadores usam para avisar aos Indexadores que um subgraph é bom para indexar. Os Indexadores podem confiar no sinal de um Curador, porque ao sinalizar, os Curadores mintam uma ação de curadoria para o subgraph, o que concede aos Curadores uma porção das futuras taxas de query movidas pelo subgraph. +Curators make The Graph network efficient and [signaling](#how-to-signal) is the process that Curators use to let Indexers know that a Subgraph is good to index. Indexers can trust the signal from a Curator because upon signaling, Curators mint a curation share for the Subgraph, entitling them to a portion of future query fees that the Subgraph drives. -Sinais de curador são representados como tokens ERC20 chamados de Ações de Curadoria do Graph (GCS). Quem quiser ganhar mais taxas de query devem sinalizar o seu GRT a subgraphs que apostam que gerará um fluxo forte de taxas á rede. Curadores não podem ser cortados por mau comportamento, mas há uma taxa de depósito em Curadores para desincentivar más decisões que possam ferir a integridade da rede. Curadores também ganharão menos taxas de query se curarem um subgraph de baixa qualidade, já que haverão menos queries a processar ou menos Indexadores para processá-las. +Curator signals are represented as ERC20 tokens called Graph Curation Shares (GCS). Those that want to earn more query fees should signal their GRT to Subgraphs that they predict will generate a strong flow of fees to the network. Curators cannot be slashed for bad behavior, but there is a deposit tax on Curators to disincentivize poor decision-making that could harm the integrity of the network. Curators will also earn fewer query fees if they curate on a low-quality Subgraph because there will be fewer queries to process or fewer Indexers to process them. -O [Indexador de Atualização do Nascer do Sol](/sunrise/#what-is-the-upgrade-indexer) garante a indexação de todos os subgraphs; sinalizar GRT em um subgraph específico atrairá mais Indexadores a ele. Este incentivo para Indexadores através da curadoria visa melhorar a qualidade do serviço de queries através da redução de latência e do aprimoramento da disponibilidade de rede. +The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all Subgraphs, signaling GRT on a particular Subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. -Ao sinalizar, Curadores podem decidir entre sinalizar numa versão específica do subgraph ou sinalizar com a automigração. Caso sinalizem com a automigração, as ações de um curador sempre serão atualizadas à versão mais recente publicada pelo programador. Se decidirem sinalizar numa versão específica, as ações sempre permanecerão nesta versão específica. +When signaling, Curators can decide to signal on a specific version of the Subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. -Se precisar de ajuda com a curadoria para melhorar a qualidade do serviço, peça ajuda à equipa da Edge Node em support@thegraph.zendesk.com e especifique os subgraphs com que precisa de assistência. +If you require assistance with curation to enhance the quality of service, please send a request to the Edge & Node team at support@thegraph.zendesk.com and specify the Subgraphs you need assistance with. -Os indexadores podem achar subgraphs para indexar com base em sinais de curadoria que veem no Graph Explorer (imagem abaixo). +Indexers can find Subgraphs to index based on curation signals they see in Graph Explorer (screenshot below). -![Subgraphs do Explorer](/img/explorer-subgraphs.png) +![Explorer Subgraphs](/img/explorer-subgraphs.png) ## Como Sinalizar -Na aba "Curator" (Curador) do Graph Explorer, os curadores podem sinalizar e tirar sinal de certos subgraphs baseados nas estatísticas de rede. [Clique aqui](/subgraphs/explorer/) para um passo-a-passo deste processo no Graph Explorer. +Within the Curator tab in Graph Explorer, curators will be able to signal and unsignal on certain Subgraphs based on network stats. For a step-by-step overview of how to do this in Graph Explorer, [click here.](/subgraphs/explorer/) -Um curador pode escolher sinalizar uma versão específica de subgraph, ou pode automaticamente migrar o seu sinal à versão mais recente desse subgraph. Ambas estratégias são válidas, e vêm com as suas próprias vantagens e desvantagens. +A curator can choose to signal on a specific Subgraph version, or they can choose to have their signal automatically migrate to the newest production build of that Subgraph. Both are valid strategies and come with their own pros and cons. -Sinalizar numa versão específica serve muito mais quando um subgraph é usado por vários dApps. Um dApp pode precisar atualizar o subgraph regularmente com novos recursos; outro dApp pode preferir usar uma versão mais antiga, porém melhor testada. Na curadoria inicial, é incorrida uma taxa de 1%. +Signaling on a specific version is especially useful when one Subgraph is used by multiple dapps. One dapp might need to regularly update the Subgraph with new features. Another dapp might prefer to use an older, well-tested Subgraph version. Upon initial curation, a 1% standard tax is incurred. Ter um sinal que migra automaticamente à build mais recente de um subgraph pode ser bom para garantir o acúmulo de taxas de consulta. Toda vez que cura, é incorrida uma taxa de 1% de curadoria. Também pagará uma taxa de 0.5% em toda migração. É recomendado que rogramadores de subgraphs evitem editar novas versões com frequência - eles devem pagar uma taxa de curadoria de 0.5% em todas as ações de curadoria auto-migradas. -> \*\*Nota: O primeiro endereço a sinalizar um subgraph particular é considerado o primeiro curador e deverá realizar tarefas muito mais intensivas em gas do que o resto dos curadores seguintes — porque o primeiro curador inicializa os tokens de ação de curadoria, inicializa o bonding curve, e também transfere tokens no proxy do Graph. +> **Note**: The first address to signal a particular Subgraph is considered the first curator and will have to do much more gas-intensive work than the rest of the following curators because the first curator initializes the curation share tokens, and also transfers tokens into The Graph proxy. ## Como Sacar o Seu GRT @@ -40,39 +40,39 @@ Curadores têm a opção de sacar o seu GRT sinalizado a qualquer momento. Ao contrário do processo de delegação, se decidir sacar o seu GRT sinalizado, você não precisará esperar um período de recarga, e receberá a quantidade completa (menos a taxa de curadoria de 1%). -Quando um curador retira o seu sinal, Indexadores podem escolher continuar a indexar o subgraph, mesmo se não houver no momento nenhum GRT sinalizado. +Once a curator withdraws their signal, indexers may choose to keep indexing the Subgraph, even if there's currently no active GRT signaled. -Porém, é recomendado que curadores deixem o seu GRT no lugar, não apenas para receber uma porção das taxas de query, mas também para garantir a confiança e disponibilidade do subgraph. +However, it is recommended that curators leave their signaled GRT in place not only to receive a portion of the query fees, but also to ensure reliability and uptime of the Subgraph. ## Riscos 1. O mercado de consulta é jovem por natureza no The Graph, e há sempre o risco do seu rendimento anual ser menor que o esperado devido às dinâmicas nascentes do mercado. -2. Taxa de Curadoria - Quando um curador sinaliza GRT em um subgraph, ele incorre uma taxa de curadoria de 1%, que é queimada. -3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). -4. Um subgraph pode falhar devido a um erro de código. Um subgraph falho não acumula taxas de consulta. Portanto, espere até o programador consertar o erro e lançar uma nova versão. - - Caso se inscreva à versão mais recente de um subgraph, suas ações migrarão automaticamente a esta versão nova. Isto incorrerá uma taxa de curadoria de 0.5%. - - Se sinalizou em um subgraph específico e ele falhou, deverá queimar as suas ações de curadoria manualmente. Será então possível sinalizar na nova versão do subgraph, o que incorre uma taxa de curadoria de 1%. +2. Curation Fee - when a curator signals GRT on a Subgraph, they incur a 1% curation tax. This fee is burned. +3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their Subgraph or if a Subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). +4. A Subgraph can fail due to a bug. A failed Subgraph does not accrue query fees. As a result, you’ll have to wait until the developer fixes the bug and deploys a new version. + - If you are subscribed to the newest version of a Subgraph, your shares will auto-migrate to that new version. This will incur a 0.5% curation tax. + - If you have signaled on a specific Subgraph version and it fails, you will have to manually burn your curation shares. You can then signal on the new Subgraph version, thus incurring a 1% curation tax. ## Perguntas Frequentes sobre Curadoria ### 1. Qual a % das taxas de query que os Curadores ganham? -Ao sinalizar em um subgraph, ganhará parte de todas as taxas de query geradas pelo subgraph. 10% de todas as taxas de curadoria vão aos Curadores, pro-rata às suas ações de curadoria. Estes 10% são sujeitos à governança. +By signalling on a Subgraph, you will earn a share of all the query fees that the Subgraph generates. 10% of all query fees go to the Curators pro-rata to their curation shares. This 10% is subject to governance. -### 2. Como decidir quais subgraphs são de qualidade alta para sinalizar? +### 2. How do I decide which Subgraphs are high quality to signal on? -Achar subgraphs de alta qualidade é uma tarefa complexa, mas o processo pode ser abordado de várias formas diferentes. Como Curador, procure subgraphs confiáveis que movem volumes de query. Um subgraph confiável pode ser valioso se for completo, preciso, e apoiar as necessidades de dados de um dApp. Um subgraph mal arquitetado pode precisar de revisões ou reedições, além de correr risco de falhar. É importante que os Curadores verifiquem a arquitetura ou código de um subgraph, para averiguar se ele é valioso. Portanto: +Finding high-quality Subgraphs is a complex task, but it can be approached in many different ways. As a Curator, you want to look for trustworthy Subgraphs that are driving query volume. A trustworthy Subgraph may be valuable if it is complete, accurate, and supports a dapp’s data needs. A poorly architected Subgraph might need to be revised or re-published, and can also end up failing. It is critical for Curators to review a Subgraph’s architecture or code in order to assess if a Subgraph is valuable. As a result: -- Os curadores podem usar o seu conhecimento de uma rede para tentar adivinhar como um subgraph individual pode gerar um volume maior ou menor de queries no futuro -- Os curadores também devem entender as métricas disponíveis através do Graph Explorer. Métricas como o volume de queries passados e a identidade do programador do subgraph podem ajudar a determinar se um subgraph vale ou não o sinal. +- Curators can use their understanding of a network to try and predict how an individual Subgraph may generate a higher or lower query volume in the future +- Curators should also understand the metrics that are available through Graph Explorer. Metrics like past query volume and who the Subgraph developer is can help determine whether or not a Subgraph is worth signalling on. -### 3. Qual o custo de atualizar um subgraph? +### 3. What’s the cost of updating a Subgraph? -Migrar as suas ações de curadoria a uma nova versão de subgraph incorre uma taxa de curadoria de 1%. Os curadores podem escolher se inscrever na versão mais nova de um subgraph. Quando ações de curadores são automigradas a uma nova versão, os Curadores também pagarão metade da taxa de curadoria, por ex., 0.5%, porque a atualização de subgraphs é uma ação on-chain que custa gas. +Migrating your curation shares to a new Subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a Subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading Subgraphs is an onchain action that costs gas. -### 4. Com que frequência posso atualizar o meu subgraph? +### 4. How often can I update my Subgraph? -Não atualize os seus subgraphs com frequência excessiva. Veja a questão acima para mais detalhes. +It’s suggested that you don’t update your Subgraphs too frequently. See the question above for more details. ### 5. Posso vender as minhas ações de curadoria? From 735484ec4a33024bd770f2050961a3e7b9588897 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:17:22 -0500 Subject: [PATCH 0506/1789] New translations curating.mdx (Russian) --- .../src/pages/ru/resources/roles/curating.mdx | 76 +++++++++---------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/website/src/pages/ru/resources/roles/curating.mdx b/website/src/pages/ru/resources/roles/curating.mdx index ef319cda705e..1c2c64d7429c 100644 --- a/website/src/pages/ru/resources/roles/curating.mdx +++ b/website/src/pages/ru/resources/roles/curating.mdx @@ -2,87 +2,87 @@ title: Кураторство --- -Curators are critical to The Graph's decentralized economy. They use their knowledge of the web3 ecosystem to assess and signal on the subgraphs that should be indexed by The Graph Network. Through Graph Explorer, Curators view network data to make signaling decisions. In turn, The Graph Network rewards Curators who signal on good quality subgraphs with a share of the query fees those subgraphs generate. The amount of GRT signaled is one of the key considerations for indexers when determining which subgraphs to index. +Curators are critical to The Graph's decentralized economy. They use their knowledge of the web3 ecosystem to assess and signal on the Subgraphs that should be indexed by The Graph Network. Through Graph Explorer, Curators view network data to make signaling decisions. In turn, The Graph Network rewards Curators who signal on good quality Subgraphs with a share of the query fees those Subgraphs generate. The amount of GRT signaled is one of the key considerations for indexers when determining which Subgraphs to index. -## What Does Signaling Mean for The Graph Network? +## Что означает сигнализация для The Graph Network? -Before consumers can query a subgraph, it must be indexed. This is where curation comes into play. In order for Indexers to earn substantial query fees on quality subgraphs, they need to know what subgraphs to index. When Curators signal on a subgraph, it lets Indexers know that a subgraph is in demand and of sufficient quality that it should be indexed. +Before consumers can query a Subgraph, it must be indexed. This is where curation comes into play. In order for Indexers to earn substantial query fees on quality Subgraphs, they need to know what Subgraphs to index. When Curators signal on a Subgraph, it lets Indexers know that a Subgraph is in demand and of sufficient quality that it should be indexed. -Curators make The Graph network efficient and [signaling](#how-to-signal) is the process that Curators use to let Indexers know that a subgraph is good to index. Indexers can trust the signal from a Curator because upon signaling, Curators mint a curation share for the subgraph, entitling them to a portion of future query fees that the subgraph drives. +Curators make The Graph network efficient and [signaling](#how-to-signal) is the process that Curators use to let Indexers know that a Subgraph is good to index. Indexers can trust the signal from a Curator because upon signaling, Curators mint a curation share for the Subgraph, entitling them to a portion of future query fees that the Subgraph drives. -Curator signals are represented as ERC20 tokens called Graph Curation Shares (GCS). Those that want to earn more query fees should signal their GRT to subgraphs that they predict will generate a strong flow of fees to the network. Curators cannot be slashed for bad behavior, but there is a deposit tax on Curators to disincentivize poor decision-making that could harm the integrity of the network. Curators will also earn fewer query fees if they curate on a low-quality subgraph because there will be fewer queries to process or fewer Indexers to process them. +Curator signals are represented as ERC20 tokens called Graph Curation Shares (GCS). Those that want to earn more query fees should signal their GRT to Subgraphs that they predict will generate a strong flow of fees to the network. Curators cannot be slashed for bad behavior, but there is a deposit tax on Curators to disincentivize poor decision-making that could harm the integrity of the network. Curators will also earn fewer query fees if they curate on a low-quality Subgraph because there will be fewer queries to process or fewer Indexers to process them. -The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all subgraphs, signaling GRT on a particular subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. +The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all Subgraphs, signaling GRT on a particular Subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. -При подаче сигнала Кураторы могут решить подать сигнал на определенную версию субграфа или использовать автомиграцию. Если они подают сигнал с помощью автомиграции, доли куратора всегда будут обновляться до последней версии, опубликованной разработчиком. Если же они решат подать сигнал на определенную версию, доли всегда будут оставаться на этой конкретной версии. +When signaling, Curators can decide to signal on a specific version of the Subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. -If you require assistance with curation to enhance the quality of service, please send a request to the Edge & Node team at support@thegraph.zendesk.com and specify the subgraphs you need assistance with. +If you require assistance with curation to enhance the quality of service, please send a request to the Edge & Node team at support@thegraph.zendesk.com and specify the Subgraphs you need assistance with. -Indexers can find subgraphs to index based on curation signals they see in Graph Explorer (screenshot below). +Indexers can find Subgraphs to index based on curation signals they see in Graph Explorer (screenshot below). -![Explorer subgraphs](/img/explorer-subgraphs.png) +![Explorer Subgraphs](/img/explorer-subgraphs.png) ## Как подавать Сигнал -Within the Curator tab in Graph Explorer, curators will be able to signal and unsignal on certain subgraphs based on network stats. For a step-by-step overview of how to do this in Graph Explorer, [click here.](/subgraphs/explorer/) +Within the Curator tab in Graph Explorer, curators will be able to signal and unsignal on certain Subgraphs based on network stats. For a step-by-step overview of how to do this in Graph Explorer, [click here.](/subgraphs/explorer/) -Куратор может выбрать конкретную версию подграфа для сигнализации, или же он может выбрать автоматическую миграцию своего сигнала на самую новую рабочую сборку этого подграфа. Оба варианта являются допустимыми стратегиями и имеют свои плюсы и минусы. +A curator can choose to signal on a specific Subgraph version, or they can choose to have their signal automatically migrate to the newest production build of that Subgraph. Both are valid strategies and come with their own pros and cons. -Signaling on a specific version is especially useful when one subgraph is used by multiple dapps. One dapp might need to regularly update the subgraph with new features. Another dapp might prefer to use an older, well-tested subgraph version. Upon initial curation, a 1% standard tax is incurred. +Signaling on a specific version is especially useful when one Subgraph is used by multiple dapps. One dapp might need to regularly update the Subgraph with new features. Another dapp might prefer to use an older, well-tested Subgraph version. Upon initial curation, a 1% standard tax is incurred. -Автоматическая миграция вашего сигнала на самую новую рабочую сборку может быть ценной, чтобы гарантировать непрерывное начисление комиссий за запросы. Каждый раз, когда вы осуществляете курирование, взимается комиссия в размере 1%. Вы также заплатите комиссию в размере 0,5% при каждой миграции. Разработчикам подграфов не рекомендуется часто публиковать новые версии - они должны заплатить комиссию на курирование в размере 0,5% на все автоматически мигрированные доли курации. +Автоматическая миграция вашего сигнала на самую новую рабочую сборку может быть ценной, чтобы гарантировать непрерывное начисление комиссий за запросы. Каждый раз, когда вы осуществляете курирование, взимается комиссия в размере 1%. Вы также заплатите комиссию в размере 0,5% при каждой миграции. Разработчикам субграфов не рекомендуется часто публиковать новые версии - они должны заплатить комиссию на курирование в размере 0,5% на все автоматически мигрированные доли курации. -> **Note**: The first address to signal a particular subgraph is considered the first curator and will have to do much more gas-intensive work than the rest of the following curators because the first curator initializes the curation share tokens, and also transfers tokens into The Graph proxy. +> **Note**: The first address to signal a particular Subgraph is considered the first curator and will have to do much more gas-intensive work than the rest of the following curators because the first curator initializes the curation share tokens, and also transfers tokens into The Graph proxy. -## Withdrawing your GRT +## Вывод Вашего GRT -Curators have the option to withdraw their signaled GRT at any time. +Кураторы имеют возможность в любой момент отозвать свои заявленные GRT. -Unlike the process of delegating, if you decide to withdraw your signaled GRT you will not have to wait for a cooldown period and will receive the entire amount (minus the 1% curation tax). +В отличие от процесса делегирования, если Вы решите отозвать заявленный Вами GRT, Вам не придется ждать периода размораживания и Вы получите всю сумму (за вычетом 1% налога на курирование). -Once a curator withdraws their signal, indexers may choose to keep indexing the subgraph, even if there's currently no active GRT signaled. +Once a curator withdraws their signal, indexers may choose to keep indexing the Subgraph, even if there's currently no active GRT signaled. -However, it is recommended that curators leave their signaled GRT in place not only to receive a portion of the query fees, but also to ensure reliability and uptime of the subgraph. +However, it is recommended that curators leave their signaled GRT in place not only to receive a portion of the query fees, but also to ensure reliability and uptime of the Subgraph. ## Риски 1. Рынок запросов в The Graph по своей сути молод, и существует риск того, что ваш %APY может оказаться ниже, чем вы ожидаете, из-за зарождающейся динамики рынка. -2. Curation Fee - when a curator signals GRT on a subgraph, they incur a 1% curation tax. This fee is burned. -3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). -4. Подграф может выйти из строя из-за ошибки. За неудавшийся подграф не начисляется плата за запрос. В результате вам придется ждать, пока разработчик исправит ошибку и выложит новую версию. - - Если вы подписаны на новейшую версию подграфа, ваши общие ресурсы автоматически перейдут на эту новую версию. При этом будет взиматься кураторская комиссия в размере 0,5%. - - If you have signaled on a specific subgraph version and it fails, you will have to manually burn your curation shares. You can then signal on the new subgraph version, thus incurring a 1% curation tax. +2. Curation Fee - when a curator signals GRT on a Subgraph, they incur a 1% curation tax. This fee is burned. +3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their Subgraph or if a Subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). +4. A Subgraph can fail due to a bug. A failed Subgraph does not accrue query fees. As a result, you’ll have to wait until the developer fixes the bug and deploys a new version. + - If you are subscribed to the newest version of a Subgraph, your shares will auto-migrate to that new version. This will incur a 0.5% curation tax. + - If you have signaled on a specific Subgraph version and it fails, you will have to manually burn your curation shares. You can then signal on the new Subgraph version, thus incurring a 1% curation tax. ## Часто задаваемые вопросы по кураторству ### 1. Какой % от оплаты за запрос получают кураторы? -By signalling on a subgraph, you will earn a share of all the query fees that the subgraph generates. 10% of all query fees go to the Curators pro-rata to their curation shares. This 10% is subject to governance. +By signalling on a Subgraph, you will earn a share of all the query fees that the Subgraph generates. 10% of all query fees go to the Curators pro-rata to their curation shares. This 10% is subject to governance. -### 2. Как определить, какие подграфы являются высококачественными, чтобы подавать на них сигналы? +### 2. How do I decide which Subgraphs are high quality to signal on? -Finding high-quality subgraphs is a complex task, but it can be approached in many different ways. As a Curator, you want to look for trustworthy subgraphs that are driving query volume. A trustworthy subgraph may be valuable if it is complete, accurate, and supports a dapp’s data needs. A poorly architected subgraph might need to be revised or re-published, and can also end up failing. It is critical for Curators to review a subgraph’s architecture or code in order to assess if a subgraph is valuable. As a result: +Finding high-quality Subgraphs is a complex task, but it can be approached in many different ways. As a Curator, you want to look for trustworthy Subgraphs that are driving query volume. A trustworthy Subgraph may be valuable if it is complete, accurate, and supports a dapp’s data needs. A poorly architected Subgraph might need to be revised or re-published, and can also end up failing. It is critical for Curators to review a Subgraph’s architecture or code in order to assess if a Subgraph is valuable. As a result: -- Curators can use their understanding of a network to try and predict how an individual subgraph may generate a higher or lower query volume in the future -- Curators should also understand the metrics that are available through Graph Explorer. Metrics like past query volume and who the subgraph developer is can help determine whether or not a subgraph is worth signalling on. +- Curators can use their understanding of a network to try and predict how an individual Subgraph may generate a higher or lower query volume in the future +- Curators should also understand the metrics that are available through Graph Explorer. Metrics like past query volume and who the Subgraph developer is can help determine whether or not a Subgraph is worth signalling on. -### 3. Какова стоимость обновления подграфа? +### 3. What’s the cost of updating a Subgraph? -Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading subgraphs is an onchain action that costs gas. +Migrating your curation shares to a new Subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a Subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading Subgraphs is an onchain action that costs gas. -### 4. Как часто я могу обновлять свой подграф? +### 4. How often can I update my Subgraph? -Рекомендуется не обновлять свои подграфы слишком часто. См. выше для более подробной информации. +It’s suggested that you don’t update your Subgraphs too frequently. See the question above for more details. ### 5. Могу ли я продать свои кураторские доли? -Curation shares cannot be "bought" or "sold" like other ERC20 tokens that you may be familiar with. They can only be minted (created) or burned (destroyed). +Акции курирования нельзя «купить» или «продать», как другие токены ERC20, с которыми Вы, возможно, знакомы. Их можно только отчеканить (создать) или сжечь (уничтожить). -As a Curator on Arbitrum, you are guaranteed to get back the GRT you initially deposited (minus the tax). +Будучи куратором Arbitrum, вы гарантированно вернете первоначально внесенный вами GRT (за вычетом налога). -### 6. Am I eligible for a curation grant? +### 6. Имею ли я право на получение гранта на кураторство? -Curation grants are determined individually on a case-by-case basis. If you need assistance with curation, please send a request to support@thegraph.zendesk.com. +Гранты на кураторство определяются индивидуально в каждом конкретном случае. Если Вам нужна помощь с кураторством, отправьте запрос на support@thegraph.zendesk.com. Вы все еще в замешательстве? Ознакомьтесь с нашим видеоруководством по кураторству: From b561fa6a02d385990056b554befda39e0c761718 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:17:23 -0500 Subject: [PATCH 0507/1789] New translations curating.mdx (Swedish) --- .../src/pages/sv/resources/roles/curating.mdx | 58 +++++++++---------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/website/src/pages/sv/resources/roles/curating.mdx b/website/src/pages/sv/resources/roles/curating.mdx index fa6a279e5b1e..0ae08de7bc3a 100644 --- a/website/src/pages/sv/resources/roles/curating.mdx +++ b/website/src/pages/sv/resources/roles/curating.mdx @@ -2,37 +2,37 @@ title: Kuratering --- -Curators are critical to The Graph's decentralized economy. They use their knowledge of the web3 ecosystem to assess and signal on the subgraphs that should be indexed by The Graph Network. Through Graph Explorer, Curators view network data to make signaling decisions. In turn, The Graph Network rewards Curators who signal on good quality subgraphs with a share of the query fees those subgraphs generate. The amount of GRT signaled is one of the key considerations for indexers when determining which subgraphs to index. +Curators are critical to The Graph's decentralized economy. They use their knowledge of the web3 ecosystem to assess and signal on the Subgraphs that should be indexed by The Graph Network. Through Graph Explorer, Curators view network data to make signaling decisions. In turn, The Graph Network rewards Curators who signal on good quality Subgraphs with a share of the query fees those Subgraphs generate. The amount of GRT signaled is one of the key considerations for indexers when determining which Subgraphs to index. ## What Does Signaling Mean for The Graph Network? -Before consumers can query a subgraph, it must be indexed. This is where curation comes into play. In order for Indexers to earn substantial query fees on quality subgraphs, they need to know what subgraphs to index. When Curators signal on a subgraph, it lets Indexers know that a subgraph is in demand and of sufficient quality that it should be indexed. +Before consumers can query a Subgraph, it must be indexed. This is where curation comes into play. In order for Indexers to earn substantial query fees on quality Subgraphs, they need to know what Subgraphs to index. When Curators signal on a Subgraph, it lets Indexers know that a Subgraph is in demand and of sufficient quality that it should be indexed. -Curators make The Graph network efficient and [signaling](#how-to-signal) is the process that Curators use to let Indexers know that a subgraph is good to index. Indexers can trust the signal from a Curator because upon signaling, Curators mint a curation share for the subgraph, entitling them to a portion of future query fees that the subgraph drives. +Curators make The Graph network efficient and [signaling](#how-to-signal) is the process that Curators use to let Indexers know that a Subgraph is good to index. Indexers can trust the signal from a Curator because upon signaling, Curators mint a curation share for the Subgraph, entitling them to a portion of future query fees that the Subgraph drives. -Curator signals are represented as ERC20 tokens called Graph Curation Shares (GCS). Those that want to earn more query fees should signal their GRT to subgraphs that they predict will generate a strong flow of fees to the network. Curators cannot be slashed for bad behavior, but there is a deposit tax on Curators to disincentivize poor decision-making that could harm the integrity of the network. Curators will also earn fewer query fees if they curate on a low-quality subgraph because there will be fewer queries to process or fewer Indexers to process them. +Curator signals are represented as ERC20 tokens called Graph Curation Shares (GCS). Those that want to earn more query fees should signal their GRT to Subgraphs that they predict will generate a strong flow of fees to the network. Curators cannot be slashed for bad behavior, but there is a deposit tax on Curators to disincentivize poor decision-making that could harm the integrity of the network. Curators will also earn fewer query fees if they curate on a low-quality Subgraph because there will be fewer queries to process or fewer Indexers to process them. -The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all subgraphs, signaling GRT on a particular subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. +The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all Subgraphs, signaling GRT on a particular Subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. -When signaling, Curators can decide to signal on a specific version of the subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. +When signaling, Curators can decide to signal on a specific version of the Subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. -If you require assistance with curation to enhance the quality of service, please send a request to the Edge & Node team at support@thegraph.zendesk.com and specify the subgraphs you need assistance with. +If you require assistance with curation to enhance the quality of service, please send a request to the Edge & Node team at support@thegraph.zendesk.com and specify the Subgraphs you need assistance with. -Indexers can find subgraphs to index based on curation signals they see in Graph Explorer (screenshot below). +Indexers can find Subgraphs to index based on curation signals they see in Graph Explorer (screenshot below). -![Explorer subgraphs](/img/explorer-subgraphs.png) +![Explorer Subgraphs](/img/explorer-subgraphs.png) ## Hur man Signaliserar -Within the Curator tab in Graph Explorer, curators will be able to signal and unsignal on certain subgraphs based on network stats. For a step-by-step overview of how to do this in Graph Explorer, [click here.](/subgraphs/explorer/) +Within the Curator tab in Graph Explorer, curators will be able to signal and unsignal on certain Subgraphs based on network stats. For a step-by-step overview of how to do this in Graph Explorer, [click here.](/subgraphs/explorer/) -En kurator kan välja att signalera på en specifik subgrafversion, eller så kan de välja att ha sin signal automatiskt migrerad till den nyaste produktionsversionen av den subgrafen. Båda är giltiga strategier och har sina egna för- och nackdelar. +A curator can choose to signal on a specific Subgraph version, or they can choose to have their signal automatically migrate to the newest production build of that Subgraph. Both are valid strategies and come with their own pros and cons. -Signaling on a specific version is especially useful when one subgraph is used by multiple dapps. One dapp might need to regularly update the subgraph with new features. Another dapp might prefer to use an older, well-tested subgraph version. Upon initial curation, a 1% standard tax is incurred. +Signaling on a specific version is especially useful when one Subgraph is used by multiple dapps. One dapp might need to regularly update the Subgraph with new features. Another dapp might prefer to use an older, well-tested Subgraph version. Upon initial curation, a 1% standard tax is incurred. Att ha din signal automatiskt migrerad till den nyaste produktionsversionen kan vara värdefullt för att säkerställa att du fortsätter att ackumulera frågeavgifter. Varje gång du signalerar åläggs en kuratoravgift på 1%. Du kommer också att betala en kuratoravgift på 0,5% vid varje migration. Subgrafutvecklare uppmanas att inte publicera nya versioner för ofta - de måste betala en kuratoravgift på 0,5% på alla automatiskt migrerade kuratorandelar. -> **Note**: The first address to signal a particular subgraph is considered the first curator and will have to do much more gas-intensive work than the rest of the following curators because the first curator initializes the curation share tokens, and also transfers tokens into The Graph proxy. +> **Note**: The first address to signal a particular Subgraph is considered the first curator and will have to do much more gas-intensive work than the rest of the following curators because the first curator initializes the curation share tokens, and also transfers tokens into The Graph proxy. ## Withdrawing your GRT @@ -40,39 +40,39 @@ Curators have the option to withdraw their signaled GRT at any time. Unlike the process of delegating, if you decide to withdraw your signaled GRT you will not have to wait for a cooldown period and will receive the entire amount (minus the 1% curation tax). -Once a curator withdraws their signal, indexers may choose to keep indexing the subgraph, even if there's currently no active GRT signaled. +Once a curator withdraws their signal, indexers may choose to keep indexing the Subgraph, even if there's currently no active GRT signaled. -However, it is recommended that curators leave their signaled GRT in place not only to receive a portion of the query fees, but also to ensure reliability and uptime of the subgraph. +However, it is recommended that curators leave their signaled GRT in place not only to receive a portion of the query fees, but also to ensure reliability and uptime of the Subgraph. ## Risker 1. Frågemarknaden är i grunden ung på The Graph och det finns en risk att din %APY kan vara lägre än du förväntar dig på grund av tidiga marknadsmekanik. -2. Curation Fee - when a curator signals GRT on a subgraph, they incur a 1% curation tax. This fee is burned. -3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). -4. En subgraf kan misslyckas på grund av en bugg. En misslyckad subgraf genererar inte frågeavgifter. Som ett resultat måste du vänta tills utvecklaren rättar felet och distribuerar en ny version. - - Om du prenumererar på den nyaste versionen av en subgraf kommer dina andelar automatiskt att migreras till den nya versionen. Detta kommer att medföra en kuratoravgift på 0,5%. - - If you have signaled on a specific subgraph version and it fails, you will have to manually burn your curation shares. You can then signal on the new subgraph version, thus incurring a 1% curation tax. +2. Curation Fee - when a curator signals GRT on a Subgraph, they incur a 1% curation tax. This fee is burned. +3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their Subgraph or if a Subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). +4. A Subgraph can fail due to a bug. A failed Subgraph does not accrue query fees. As a result, you’ll have to wait until the developer fixes the bug and deploys a new version. + - If you are subscribed to the newest version of a Subgraph, your shares will auto-migrate to that new version. This will incur a 0.5% curation tax. + - If you have signaled on a specific Subgraph version and it fails, you will have to manually burn your curation shares. You can then signal on the new Subgraph version, thus incurring a 1% curation tax. ## Kurations-FAQ ### 1. Vilken % av frågeavgifterna tjänar Kuratorer? -By signalling on a subgraph, you will earn a share of all the query fees that the subgraph generates. 10% of all query fees go to the Curators pro-rata to their curation shares. This 10% is subject to governance. +By signalling on a Subgraph, you will earn a share of all the query fees that the Subgraph generates. 10% of all query fees go to the Curators pro-rata to their curation shares. This 10% is subject to governance. -### 2. Hur bestämmer jag vilka subgrafer av hög kvalitet att signalera på? +### 2. How do I decide which Subgraphs are high quality to signal on? -Finding high-quality subgraphs is a complex task, but it can be approached in many different ways. As a Curator, you want to look for trustworthy subgraphs that are driving query volume. A trustworthy subgraph may be valuable if it is complete, accurate, and supports a dapp’s data needs. A poorly architected subgraph might need to be revised or re-published, and can also end up failing. It is critical for Curators to review a subgraph’s architecture or code in order to assess if a subgraph is valuable. As a result: +Finding high-quality Subgraphs is a complex task, but it can be approached in many different ways. As a Curator, you want to look for trustworthy Subgraphs that are driving query volume. A trustworthy Subgraph may be valuable if it is complete, accurate, and supports a dapp’s data needs. A poorly architected Subgraph might need to be revised or re-published, and can also end up failing. It is critical for Curators to review a Subgraph’s architecture or code in order to assess if a Subgraph is valuable. As a result: -- Curators can use their understanding of a network to try and predict how an individual subgraph may generate a higher or lower query volume in the future -- Curators should also understand the metrics that are available through Graph Explorer. Metrics like past query volume and who the subgraph developer is can help determine whether or not a subgraph is worth signalling on. +- Curators can use their understanding of a network to try and predict how an individual Subgraph may generate a higher or lower query volume in the future +- Curators should also understand the metrics that are available through Graph Explorer. Metrics like past query volume and who the Subgraph developer is can help determine whether or not a Subgraph is worth signalling on. -### 3. Vad kostar det att uppdatera en subgraf? +### 3. What’s the cost of updating a Subgraph? -Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading subgraphs is an onchain action that costs gas. +Migrating your curation shares to a new Subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a Subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading Subgraphs is an onchain action that costs gas. -### 4. Hur ofta kan jag uppdatera min subgraf? +### 4. How often can I update my Subgraph? -Det föreslås att du inte uppdaterar dina subgrafer för ofta. Se frågan ovan för mer information. +It’s suggested that you don’t update your Subgraphs too frequently. See the question above for more details. ### 5. Kan jag sälja mina kuratorandelar? From 9c2af87dd0ace0432dac3b1d60bbaeff1065a5e6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:17:24 -0500 Subject: [PATCH 0508/1789] New translations curating.mdx (Turkish) --- .../src/pages/tr/resources/roles/curating.mdx | 58 +++++++++---------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/website/src/pages/tr/resources/roles/curating.mdx b/website/src/pages/tr/resources/roles/curating.mdx index 33d63ae0f0bb..b12e73abac1f 100644 --- a/website/src/pages/tr/resources/roles/curating.mdx +++ b/website/src/pages/tr/resources/roles/curating.mdx @@ -2,37 +2,37 @@ title: Kürasyon --- -Küratörler The Graph'in merkeziyetsiz ekonomisi için kritik öneme sahiptir. Web3 ekosistemi hakkındaki bilgilerini kullanarak, The Graph Ağı tarafından endekslenmesi gereken subgraph’leri değerlendirir ve bunlara sinyal verirler. Küratörler Graph Gezgini aracılığıyla ağ verilerini inceleyerek sinyal verip vermeme kararını alır. The Graph Ağı, iyi kaliteye sahip subgraph’lere sinyal veren küratörleri, bu subgraph’lerin ürettiği sorgu ücretlerinden bir pay ile ödüllendirir. Sinyallenen GRT miktarı endeksleyiciler için hangi subgraph'leri endeksleyeceklerini belirlerken önemli bir faktördür. +Curators are critical to The Graph's decentralized economy. They use their knowledge of the web3 ecosystem to assess and signal on the Subgraphs that should be indexed by The Graph Network. Through Graph Explorer, Curators view network data to make signaling decisions. In turn, The Graph Network rewards Curators who signal on good quality Subgraphs with a share of the query fees those Subgraphs generate. The amount of GRT signaled is one of the key considerations for indexers when determining which Subgraphs to index. ## Sinyal Verme, The Graph Ağı için Ne Anlama Geliyor? -Bir subgraph'in tüketiciler tarafından sorgulanabilmesi için subgraph önce endekslenmelidir. İşte burada kürasyon devreye girer. Endeksleyicilerin kaliteli subgraph’lerden kayda değer sorgu ücretleri kazanabilmesi için hangi subgraph’leri endeksleyeceklerini bilmeleri gerekir. Küratörler bir subgraph’e sinyal verdiğinde bu, endeksleyicilere o subgraph’in talep gördüğünü ve yeterli kaliteye sahip olduğunu gösterir. +Before consumers can query a Subgraph, it must be indexed. This is where curation comes into play. In order for Indexers to earn substantial query fees on quality Subgraphs, they need to know what Subgraphs to index. When Curators signal on a Subgraph, it lets Indexers know that a Subgraph is in demand and of sufficient quality that it should be indexed. -Küratörler, The Graph ağını verimli hale getirirler. [Sinyalleme](#how-to-signal), Küratörlerin Endeksleyicilere hangi subgraph'in endekslenmeye uygun olduğunu bildirmelerini sağlayan süreçtir. Endeksleyiciler, bir Küratörden gelen sinyale güvenebilir çünkü sinyalleme sırasında, Küratörler subgraph için bir kürasyon payı üretir. Bu da onları subgraph'in sağladığı gelecekteki sorgu ücretlerinin bir kısmına hak sahibi kılar. +Curators make The Graph network efficient and [signaling](#how-to-signal) is the process that Curators use to let Indexers know that a Subgraph is good to index. Indexers can trust the signal from a Curator because upon signaling, Curators mint a curation share for the Subgraph, entitling them to a portion of future query fees that the Subgraph drives. -Küratör sinyalleri, Graph Kürasyon Payları (Graph Curation Shares - GCS) olarak adlandırılan ERC20 token ile temsil edilir. Daha fazla sorgu ücreti kazanmak isteyenler, GRT’lerini ağ için güçlü bir ücret akışı yaratacağını öngördükleri subgraph’lere sinyal vermelidir. Küratörler kötü davranışları nedeniyle cezalandırılmaz (slashing uygulanmaz), ancak ağın bütünlüğüne zarar verebilecek kötü kararları caydırmak için bir depozito vergisi bulunur. Düşük kaliteli bir subgraph üzerinde kürasyon yapan Küratörler, daha az sorgu olduğu ya da daha az Endeksleyici tarafından işlendiği için daha az sorgu ücreti kazanır. +Curator signals are represented as ERC20 tokens called Graph Curation Shares (GCS). Those that want to earn more query fees should signal their GRT to Subgraphs that they predict will generate a strong flow of fees to the network. Curators cannot be slashed for bad behavior, but there is a deposit tax on Curators to disincentivize poor decision-making that could harm the integrity of the network. Curators will also earn fewer query fees if they curate on a low-quality Subgraph because there will be fewer queries to process or fewer Indexers to process them. -[Sunrise Yükseltme Endeksleyici](/archived/sunrise/#what-is-the-upgrade-indexer) tüm subgraph'lerin endekslenmesini sağlar. Belirli bir subgraph'e GRT sinyallenmesi o subgraph'e daha fazla endeksleyici çeker. Kürasyon yoluyla ek Endeksleyicilerin teşvik edilmesi, sorgu hizmetinin kalitesini artırmayı amaçlar ve ağ erişilebilirliğini artırarak gecikmeyi azaltır. +The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all Subgraphs, signaling GRT on a particular Subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. -Sinyal verirken, Küratörler belirli bir subgraph sürümüne sinyal vermeyi veya otomatik geçiş (auto-migrate) özelliğini kullanmayı seçebilirler. Eğer otomatik geçiş özelliğini kullanarak sinyal verirlerse, bir küratörün payları her zaman geliştirici tarafından yayımlanan en son sürüme göre güncellenir. Bunun yerine belirli bir sürüme sinyal vermeyi seçerlerse, paylar her zaman bu belirli sürümdeki haliyle kalır. +When signaling, Curators can decide to signal on a specific version of the Subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. -Hizmet kalitenizi artırmak için kürasyon konusunda yardıma ihtiyacınız varsa, lütfen Edge & Node ekibine support@thegraph.zendesk.com adresinden bir talep gönderin ve yardıma ihtiyacınız olan subgraph'leri belirtin. +If you require assistance with curation to enhance the quality of service, please send a request to the Edge & Node team at support@thegraph.zendesk.com and specify the Subgraphs you need assistance with. -Endeksleyiciler, Graph Gezgini'nde gördükleri kürasyon sinyallerine dayanarak endeksleyecekleri subgraph’leri bulabilirler (aşağıdaki ekran görüntüsüne bakın). +Indexers can find Subgraphs to index based on curation signals they see in Graph Explorer (screenshot below). -![Gezgin subgraph'leri](/img/explorer-subgraphs.png) +![Explorer Subgraphs](/img/explorer-subgraphs.png) ## Nasıl Sinyal Verilir -Graph Gezgini'ndeki Küratör sekmesi içinde, küratörler ağ istatistiklerine dayalı olarak belirli subgraph'lere sinyal verip kaldırabilecekler. Bunu Graph Gezgini'nde nasıl yapacağınıza dair adım adım bir genel bakış için, [buraya tıklayın.](/subgraphs/explorer/) +Within the Curator tab in Graph Explorer, curators will be able to signal and unsignal on certain Subgraphs based on network stats. For a step-by-step overview of how to do this in Graph Explorer, [click here.](/subgraphs/explorer/) -Bir küratör, belirli bir subgraph sürümü üzerinde sinyal vermeyi seçebilir veya sinyalinin otomatik olarak o subgraph'in en yeni üretim sürümüne taşınmasını tercih edebilir. Her iki strateji de geçerli olup kendi avantaj ve dezavantajlarına sahiptir. +A curator can choose to signal on a specific Subgraph version, or they can choose to have their signal automatically migrate to the newest production build of that Subgraph. Both are valid strategies and come with their own pros and cons. -Belirli bir sürüme sinyal vermek, özellikle bir subgraph birden fazla dapp tarafından kullanıldığında faydalıdır. Bir dapp, subgraph'ini yeni özelliklerle düzenli olarak güncellemek isteyebilir. Diğer bir dapp ise daha eski, iyi test edilmiş bir subgraph sürümünü kullanmayı tercih edebilir. İlk kürasyon sırasında, %1'lik standart bir vergi alınır. +Signaling on a specific version is especially useful when one Subgraph is used by multiple dapps. One dapp might need to regularly update the Subgraph with new features. Another dapp might prefer to use an older, well-tested Subgraph version. Upon initial curation, a 1% standard tax is incurred. Sinyalinizin otomatik olarak en yeni üretim sürümüne geçiş yapması, sorgu ücretlerini biriktirmeye devam etmenizi sağlamak açısından değerli olabilir. Her kürasyon yaptığınızda %1'lik bir kürasyon vergisi uygulanır. Ayrıca her geçişte %0,5'lik bir kürasyon vergisi ödersiniz. Subgraph geliştiricilerinin sık sık yeni sürümler yayımlaması teşvik edilmez - geliştiriciler otomatik olarak taşınan tüm kürasyon payları için %0,5 kürasyon vergisi ödemek zorundadırlar. -> **Not**: Belirli bir subgraph'e ilk kez sinyal veren adres ilk küratör olarak kabul edilir. Bu ilk sinyal işlemi, sonraki küratörlerinkine kıyasla çok daha fazla gaz tüketen bir işlemdir. Bunun nedeni, ilk küratörün kürasyon payı token'larını ilklendirmesi ve ayrıca token'ları The Graph proxy'sine aktarmasıdır. +> **Note**: The first address to signal a particular Subgraph is considered the first curator and will have to do much more gas-intensive work than the rest of the following curators because the first curator initializes the curation share tokens, and also transfers tokens into The Graph proxy. ## GRT'nizi Çekme @@ -40,39 +40,39 @@ Küratörler, sinyal verdikleri GRT'yi istedikleri zaman çekme seçeneğine sah Yetkilendirme sürecinden farklı olarak, sinyal verdiğiniz GRT'yi çekmeye karar verirseniz bir bekleme süresiyle karşılaşmazsınız ve (%1 kürasyon vergisi düşüldükten sonra) toplam miktarı alırsınız. -Bir küratör sinyalini çektikten sonra, endeksleyiciler aktif olarak sinyal verilmiş GRT olmasa bile subgraph'i endekslemeye devam etmeyi seçebilirler. +Once a curator withdraws their signal, indexers may choose to keep indexing the Subgraph, even if there's currently no active GRT signaled. -Ancak, küratörlerin sinyal verdikleri GRT'yi yerinde bırakmaları tavsiye edilir; bu yalnızca sorgu ücretlerinden pay almak için değil, aynı zamanda subgraph'in güvenilirliğini ve kesintisiz çalışmasını sağlamak için de önemlidir. +However, it is recommended that curators leave their signaled GRT in place not only to receive a portion of the query fees, but also to ensure reliability and uptime of the Subgraph. ## Riskler 1. The Graph üzerindeki sorgu pazarı henüz nispeten yenidir ve erken aşama piyasa dinamikleri nedeniyle %APY'nin beklediğinizden daha düşük olması riski mevcuttur. -2. Kürasyon Ücreti - Bir küratör bir subgraph'e GRT ile sinyal verdiğinde, %1'lik bir kürasyon vergisine tabi olur. Bu ücret yakılır. -3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). -4. Bir subgraph, bir hata nedeniyle başarısız olabilir. Başarısız subgraph sorgu ücreti biriktirmez. Bu sebeple, geliştiricinin hatayı düzeltip yeni bir sürüm dağıtmasını beklemeniz gerekecektir. - - Eğer bir subgraph'in en yeni sürümüne aboneyseniz, paylarınız otomatik olarak o yeni sürüme geçecektir. Bu geçiş sırasında %0,5'lik bir kürasyon vergisi uygulanır. - - Belirli bir subgraph sürümüne sinyal verdiyseniz ve bu sürüm başarısız olduysa, kürasyon paylarınızı manuel olarak yakmanız gerekir. Daha sonra yeni subgraph sürümüne sinyal verebilirsiniz; bu işlem sırasında %1'lik bir kürasyon vergisi uygulanır. +2. Curation Fee - when a curator signals GRT on a Subgraph, they incur a 1% curation tax. This fee is burned. +3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their Subgraph or if a Subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). +4. A Subgraph can fail due to a bug. A failed Subgraph does not accrue query fees. As a result, you’ll have to wait until the developer fixes the bug and deploys a new version. + - If you are subscribed to the newest version of a Subgraph, your shares will auto-migrate to that new version. This will incur a 0.5% curation tax. + - If you have signaled on a specific Subgraph version and it fails, you will have to manually burn your curation shares. You can then signal on the new Subgraph version, thus incurring a 1% curation tax. ## Kürasyon Hakkında SSS ### 1. Küratörler, sorgu ücretlerinin yüzde kaçını kazanır? -Bir subgraph'e sinyal vererek, subgraph'in ürettiği tüm sorgu ücretlerinden pay alırsınız. Tüm sorgu ücretlerinin %10'u, kürasyon paylarına orantılı olarak Küratörlere gider. Bu %10'luk oran yönetişime tabidir (yani yönetişim kararlarıyla değiştirilebilir). +By signalling on a Subgraph, you will earn a share of all the query fees that the Subgraph generates. 10% of all query fees go to the Curators pro-rata to their curation shares. This 10% is subject to governance. -### 2. Sinyal vereceğim subgraph'lerin hangilerinin yüksek kaliteli olduğunu nasıl belirlerim? +### 2. How do I decide which Subgraphs are high quality to signal on? -Yüksek kaliteli subgraph'leri bulmak karmaşık bir iştir. Ancak bu duruma farklı şekillerde yaklaşılabilir. Bir Küratör olarak, sorgu hacmi oluşturan güvenilir subgraph'ler aramak istersiniz. Güvenilir bir subgraph; tamamlanmış, doğru ve bir dapp’in veri ihtiyaçlarını destekliyorsa değerli olabilir. Kötü tasarlanmış bir subgraph'in revize edilmesi veya yeniden yayımlanması gerekebilir ve ileride hata alıp çalışmayı durdurabilir. Küratörler için bir subgraph'in değerli olup olmadığını değerlendirmek için subgraph'in mimarisini veya kodunu gözden geçirmesi önemlidir. Sonuç olarak: +Finding high-quality Subgraphs is a complex task, but it can be approached in many different ways. As a Curator, you want to look for trustworthy Subgraphs that are driving query volume. A trustworthy Subgraph may be valuable if it is complete, accurate, and supports a dapp’s data needs. A poorly architected Subgraph might need to be revised or re-published, and can also end up failing. It is critical for Curators to review a Subgraph’s architecture or code in order to assess if a Subgraph is valuable. As a result: -- Küratörler, bir ağ hakkındaki bilgilerini kullanarak, belirli bir subgraph'in gelecekte daha yüksek veya daha düşük sorgu hacmi oluşturma olasılığını tahmin etmeye çalışabilirler. -- Küratörler Graph Gezgini üzerinden erişilebilen metrikleri de anlamalıdır. Geçmiş sorgu hacmi ve subgraph geliştiricisinin kim olduğu gibi metrikler, bir subgraph'in sinyal vermeye değer olup olmadığını belirlemekte yardımcı olabilir. +- Curators can use their understanding of a network to try and predict how an individual Subgraph may generate a higher or lower query volume in the future +- Curators should also understand the metrics that are available through Graph Explorer. Metrics like past query volume and who the Subgraph developer is can help determine whether or not a Subgraph is worth signalling on. -### 3. Bir subgraph'ı güncellemenin maliyeti nedir? +### 3. What’s the cost of updating a Subgraph? -Kürasyon paylarınızı yeni bir subgraph sürümüne taşımak %1'lik bir kürasyon vergisine tabidir. Küratörler, bir subgraph'in en yeni sürümüne abone olmayı tercih edebilir. Küratör payları otomatik olarak yeni bir sürüme taşındığında, Küratörler ayrıca kürasyon vergisinin yarısını (yani %0,5) öderler. Çünkü subgraph'lerin yükseltilmesi, zincir üzerinde gerçekleşen ve dolayısıyla gaz harcamayı gerektiren bir eylemdir. +Migrating your curation shares to a new Subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a Subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading Subgraphs is an onchain action that costs gas. -### 4. Subgraph'ımı ne sıklıkla güncelleyebilirim? +### 4. How often can I update my Subgraph? -Subgraph'ınızı çok sık güncellememeniz önerilir. Daha fazla ayrıntı için yukarıdaki soruya bakın. +It’s suggested that you don’t update your Subgraphs too frequently. See the question above for more details. ### 5. Kürasyon paylarımı satabilir miyim? From e7cbaa905a3879d2218e7dc7208dc1d15387587e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:17:25 -0500 Subject: [PATCH 0509/1789] New translations curating.mdx (Ukrainian) --- .../src/pages/uk/resources/roles/curating.mdx | 58 +++++++++---------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/website/src/pages/uk/resources/roles/curating.mdx b/website/src/pages/uk/resources/roles/curating.mdx index 4304c7c138df..547fe31b6272 100644 --- a/website/src/pages/uk/resources/roles/curating.mdx +++ b/website/src/pages/uk/resources/roles/curating.mdx @@ -2,37 +2,37 @@ title: Кураторство --- -Curators are critical to The Graph's decentralized economy. They use their knowledge of the web3 ecosystem to assess and signal on the subgraphs that should be indexed by The Graph Network. Through Graph Explorer, Curators view network data to make signaling decisions. In turn, The Graph Network rewards Curators who signal on good quality subgraphs with a share of the query fees those subgraphs generate. The amount of GRT signaled is one of the key considerations for indexers when determining which subgraphs to index. +Curators are critical to The Graph's decentralized economy. They use their knowledge of the web3 ecosystem to assess and signal on the Subgraphs that should be indexed by The Graph Network. Through Graph Explorer, Curators view network data to make signaling decisions. In turn, The Graph Network rewards Curators who signal on good quality Subgraphs with a share of the query fees those Subgraphs generate. The amount of GRT signaled is one of the key considerations for indexers when determining which Subgraphs to index. ## What Does Signaling Mean for The Graph Network? -Before consumers can query a subgraph, it must be indexed. This is where curation comes into play. In order for Indexers to earn substantial query fees on quality subgraphs, they need to know what subgraphs to index. When Curators signal on a subgraph, it lets Indexers know that a subgraph is in demand and of sufficient quality that it should be indexed. +Before consumers can query a Subgraph, it must be indexed. This is where curation comes into play. In order for Indexers to earn substantial query fees on quality Subgraphs, they need to know what Subgraphs to index. When Curators signal on a Subgraph, it lets Indexers know that a Subgraph is in demand and of sufficient quality that it should be indexed. -Curators make The Graph network efficient and [signaling](#how-to-signal) is the process that Curators use to let Indexers know that a subgraph is good to index. Indexers can trust the signal from a Curator because upon signaling, Curators mint a curation share for the subgraph, entitling them to a portion of future query fees that the subgraph drives. +Curators make The Graph network efficient and [signaling](#how-to-signal) is the process that Curators use to let Indexers know that a Subgraph is good to index. Indexers can trust the signal from a Curator because upon signaling, Curators mint a curation share for the Subgraph, entitling them to a portion of future query fees that the Subgraph drives. -Curator signals are represented as ERC20 tokens called Graph Curation Shares (GCS). Those that want to earn more query fees should signal their GRT to subgraphs that they predict will generate a strong flow of fees to the network. Curators cannot be slashed for bad behavior, but there is a deposit tax on Curators to disincentivize poor decision-making that could harm the integrity of the network. Curators will also earn fewer query fees if they curate on a low-quality subgraph because there will be fewer queries to process or fewer Indexers to process them. +Curator signals are represented as ERC20 tokens called Graph Curation Shares (GCS). Those that want to earn more query fees should signal their GRT to Subgraphs that they predict will generate a strong flow of fees to the network. Curators cannot be slashed for bad behavior, but there is a deposit tax on Curators to disincentivize poor decision-making that could harm the integrity of the network. Curators will also earn fewer query fees if they curate on a low-quality Subgraph because there will be fewer queries to process or fewer Indexers to process them. -The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all subgraphs, signaling GRT on a particular subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. +The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all Subgraphs, signaling GRT on a particular Subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. -When signaling, Curators can decide to signal on a specific version of the subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. +When signaling, Curators can decide to signal on a specific version of the Subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. -If you require assistance with curation to enhance the quality of service, please send a request to the Edge & Node team at support@thegraph.zendesk.com and specify the subgraphs you need assistance with. +If you require assistance with curation to enhance the quality of service, please send a request to the Edge & Node team at support@thegraph.zendesk.com and specify the Subgraphs you need assistance with. -Indexers can find subgraphs to index based on curation signals they see in Graph Explorer (screenshot below). +Indexers can find Subgraphs to index based on curation signals they see in Graph Explorer (screenshot below). -![Explorer subgraphs](/img/explorer-subgraphs.png) +![Explorer Subgraphs](/img/explorer-subgraphs.png) ## Як сигналізувати -Within the Curator tab in Graph Explorer, curators will be able to signal and unsignal on certain subgraphs based on network stats. For a step-by-step overview of how to do this in Graph Explorer, [click here.](/subgraphs/explorer/) +Within the Curator tab in Graph Explorer, curators will be able to signal and unsignal on certain Subgraphs based on network stats. For a step-by-step overview of how to do this in Graph Explorer, [click here.](/subgraphs/explorer/) -Куратор може обрати подання сигналу на певну версію підграфа, або ж він може обрати автоматичне перенесення сигналу на найновішу версію цього підграфа. Обидва варіанти є прийнятними та мають свої плюси та мінуси. +A curator can choose to signal on a specific Subgraph version, or they can choose to have their signal automatically migrate to the newest production build of that Subgraph. Both are valid strategies and come with their own pros and cons. -Signaling on a specific version is especially useful when one subgraph is used by multiple dapps. One dapp might need to regularly update the subgraph with new features. Another dapp might prefer to use an older, well-tested subgraph version. Upon initial curation, a 1% standard tax is incurred. +Signaling on a specific version is especially useful when one Subgraph is used by multiple dapps. One dapp might need to regularly update the Subgraph with new features. Another dapp might prefer to use an older, well-tested Subgraph version. Upon initial curation, a 1% standard tax is incurred. Автоматичне переміщення вашого сигналу на найновішу версію може бути корисним для того, щоб ви продовжували нараховувати комісію за запити. Кожного разу, коли ви здійснюєте кураторську роботу, стягується плата за в розмірі 1%. Ви також сплачуєте 0,5% за кураторство, за кожну міграцію. Розробникам підграфів не рекомендується часто публікувати нові версії - вони повинні сплачувати 0.5% кураторам за всі автоматично переміщені частки кураторів. -> **Note**: The first address to signal a particular subgraph is considered the first curator and will have to do much more gas-intensive work than the rest of the following curators because the first curator initializes the curation share tokens, and also transfers tokens into The Graph proxy. +> **Note**: The first address to signal a particular Subgraph is considered the first curator and will have to do much more gas-intensive work than the rest of the following curators because the first curator initializes the curation share tokens, and also transfers tokens into The Graph proxy. ## Withdrawing your GRT @@ -40,39 +40,39 @@ Curators have the option to withdraw their signaled GRT at any time. Unlike the process of delegating, if you decide to withdraw your signaled GRT you will not have to wait for a cooldown period and will receive the entire amount (minus the 1% curation tax). -Once a curator withdraws their signal, indexers may choose to keep indexing the subgraph, even if there's currently no active GRT signaled. +Once a curator withdraws their signal, indexers may choose to keep indexing the Subgraph, even if there's currently no active GRT signaled. -However, it is recommended that curators leave their signaled GRT in place not only to receive a portion of the query fees, but also to ensure reliability and uptime of the subgraph. +However, it is recommended that curators leave their signaled GRT in place not only to receive a portion of the query fees, but also to ensure reliability and uptime of the Subgraph. ## Ризики 1. Ринок запитів за своєю суттю молодий в Graph, і існує ризик того, що ваш %APY може бути нижчим, ніж ви очікуєте, через динаміку ринку, що зароджується. -2. Curation Fee - when a curator signals GRT on a subgraph, they incur a 1% curation tax. This fee is burned. -3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). -4. Підграф може не працювати через різноманітні помилки (баги). Підграф, що не працює не стягує комісію за запити. В результаті вам доведеться почекати, поки розробник виправить усі помилки й випустить нову версію. - - Якщо ви підключені до найновішої версії підграфу, ваші частки будуть автоматично перенесені до цієї нової версії. При цьому буде стягуватися податок на в розмірі 0,5%. - - If you have signaled on a specific subgraph version and it fails, you will have to manually burn your curation shares. You can then signal on the new subgraph version, thus incurring a 1% curation tax. +2. Curation Fee - when a curator signals GRT on a Subgraph, they incur a 1% curation tax. This fee is burned. +3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their Subgraph or if a Subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). +4. A Subgraph can fail due to a bug. A failed Subgraph does not accrue query fees. As a result, you’ll have to wait until the developer fixes the bug and deploys a new version. + - If you are subscribed to the newest version of a Subgraph, your shares will auto-migrate to that new version. This will incur a 0.5% curation tax. + - If you have signaled on a specific Subgraph version and it fails, you will have to manually burn your curation shares. You can then signal on the new Subgraph version, thus incurring a 1% curation tax. ## Часті запитання про кураторство ### 1. Який % від комісії за запити отримують куратори? -By signalling on a subgraph, you will earn a share of all the query fees that the subgraph generates. 10% of all query fees go to the Curators pro-rata to their curation shares. This 10% is subject to governance. +By signalling on a Subgraph, you will earn a share of all the query fees that the Subgraph generates. 10% of all query fees go to the Curators pro-rata to their curation shares. This 10% is subject to governance. -### 2. Як вирішити, які підграфи є якісними для подачі сигналу? +### 2. How do I decide which Subgraphs are high quality to signal on? -Finding high-quality subgraphs is a complex task, but it can be approached in many different ways. As a Curator, you want to look for trustworthy subgraphs that are driving query volume. A trustworthy subgraph may be valuable if it is complete, accurate, and supports a dapp’s data needs. A poorly architected subgraph might need to be revised or re-published, and can also end up failing. It is critical for Curators to review a subgraph’s architecture or code in order to assess if a subgraph is valuable. As a result: +Finding high-quality Subgraphs is a complex task, but it can be approached in many different ways. As a Curator, you want to look for trustworthy Subgraphs that are driving query volume. A trustworthy Subgraph may be valuable if it is complete, accurate, and supports a dapp’s data needs. A poorly architected Subgraph might need to be revised or re-published, and can also end up failing. It is critical for Curators to review a Subgraph’s architecture or code in order to assess if a Subgraph is valuable. As a result: -- Curators can use their understanding of a network to try and predict how an individual subgraph may generate a higher or lower query volume in the future -- Curators should also understand the metrics that are available through Graph Explorer. Metrics like past query volume and who the subgraph developer is can help determine whether or not a subgraph is worth signalling on. +- Curators can use their understanding of a network to try and predict how an individual Subgraph may generate a higher or lower query volume in the future +- Curators should also understand the metrics that are available through Graph Explorer. Metrics like past query volume and who the Subgraph developer is can help determine whether or not a Subgraph is worth signalling on. -### 3. What’s the cost of updating a subgraph? +### 3. What’s the cost of updating a Subgraph? -Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading subgraphs is an onchain action that costs gas. +Migrating your curation shares to a new Subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a Subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading Subgraphs is an onchain action that costs gas. -### 4. How often can I update my subgraph? +### 4. How often can I update my Subgraph? -It’s suggested that you don’t update your subgraphs too frequently. See the question above for more details. +It’s suggested that you don’t update your Subgraphs too frequently. See the question above for more details. ### 5. Чи можу я продати свої частки куратора? From 5a36083608436e3f63e092627ecbd5823241222a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:17:27 -0500 Subject: [PATCH 0510/1789] New translations curating.mdx (Chinese Simplified) --- .../src/pages/zh/resources/roles/curating.mdx | 58 +++++++++---------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/website/src/pages/zh/resources/roles/curating.mdx b/website/src/pages/zh/resources/roles/curating.mdx index 54f4658473d7..e7a442fc4fa7 100644 --- a/website/src/pages/zh/resources/roles/curating.mdx +++ b/website/src/pages/zh/resources/roles/curating.mdx @@ -2,37 +2,37 @@ title: 策展 --- -Curators are critical to The Graph's decentralized economy. They use their knowledge of the web3 ecosystem to assess and signal on the subgraphs that should be indexed by The Graph Network. Through Graph Explorer, Curators view network data to make signaling decisions. In turn, The Graph Network rewards Curators who signal on good quality subgraphs with a share of the query fees those subgraphs generate. The amount of GRT signaled is one of the key considerations for indexers when determining which subgraphs to index. +Curators are critical to The Graph's decentralized economy. They use their knowledge of the web3 ecosystem to assess and signal on the Subgraphs that should be indexed by The Graph Network. Through Graph Explorer, Curators view network data to make signaling decisions. In turn, The Graph Network rewards Curators who signal on good quality Subgraphs with a share of the query fees those Subgraphs generate. The amount of GRT signaled is one of the key considerations for indexers when determining which Subgraphs to index. ## What Does Signaling Mean for The Graph Network? -Before consumers can query a subgraph, it must be indexed. This is where curation comes into play. In order for Indexers to earn substantial query fees on quality subgraphs, they need to know what subgraphs to index. When Curators signal on a subgraph, it lets Indexers know that a subgraph is in demand and of sufficient quality that it should be indexed. +Before consumers can query a Subgraph, it must be indexed. This is where curation comes into play. In order for Indexers to earn substantial query fees on quality Subgraphs, they need to know what Subgraphs to index. When Curators signal on a Subgraph, it lets Indexers know that a Subgraph is in demand and of sufficient quality that it should be indexed. -Curators make The Graph network efficient and [signaling](#how-to-signal) is the process that Curators use to let Indexers know that a subgraph is good to index. Indexers can trust the signal from a Curator because upon signaling, Curators mint a curation share for the subgraph, entitling them to a portion of future query fees that the subgraph drives. +Curators make The Graph network efficient and [signaling](#how-to-signal) is the process that Curators use to let Indexers know that a Subgraph is good to index. Indexers can trust the signal from a Curator because upon signaling, Curators mint a curation share for the Subgraph, entitling them to a portion of future query fees that the Subgraph drives. -Curator signals are represented as ERC20 tokens called Graph Curation Shares (GCS). Those that want to earn more query fees should signal their GRT to subgraphs that they predict will generate a strong flow of fees to the network. Curators cannot be slashed for bad behavior, but there is a deposit tax on Curators to disincentivize poor decision-making that could harm the integrity of the network. Curators will also earn fewer query fees if they curate on a low-quality subgraph because there will be fewer queries to process or fewer Indexers to process them. +Curator signals are represented as ERC20 tokens called Graph Curation Shares (GCS). Those that want to earn more query fees should signal their GRT to Subgraphs that they predict will generate a strong flow of fees to the network. Curators cannot be slashed for bad behavior, but there is a deposit tax on Curators to disincentivize poor decision-making that could harm the integrity of the network. Curators will also earn fewer query fees if they curate on a low-quality Subgraph because there will be fewer queries to process or fewer Indexers to process them. -The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all subgraphs, signaling GRT on a particular subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. +The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all Subgraphs, signaling GRT on a particular Subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. -When signaling, Curators can decide to signal on a specific version of the subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. +When signaling, Curators can decide to signal on a specific version of the Subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. -If you require assistance with curation to enhance the quality of service, please send a request to the Edge & Node team at support@thegraph.zendesk.com and specify the subgraphs you need assistance with. +If you require assistance with curation to enhance the quality of service, please send a request to the Edge & Node team at support@thegraph.zendesk.com and specify the Subgraphs you need assistance with. -Indexers can find subgraphs to index based on curation signals they see in Graph Explorer (screenshot below). +Indexers can find Subgraphs to index based on curation signals they see in Graph Explorer (screenshot below). -![Explorer subgraphs](/img/explorer-subgraphs.png) +![Explorer Subgraphs](/img/explorer-subgraphs.png) ## 如何进行信号处理 -Within the Curator tab in Graph Explorer, curators will be able to signal and unsignal on certain subgraphs based on network stats. For a step-by-step overview of how to do this in Graph Explorer, [click here.](/subgraphs/explorer/) +Within the Curator tab in Graph Explorer, curators will be able to signal and unsignal on certain Subgraphs based on network stats. For a step-by-step overview of how to do this in Graph Explorer, [click here.](/subgraphs/explorer/) -策展人可以选择在特定的子图版本上发出信号,或者他们可以选择让他们的策展份额自动迁移到该子图的最新生产版本。 这两种策略都是有效的,都有各自的优点和缺点。 +A curator can choose to signal on a specific Subgraph version, or they can choose to have their signal automatically migrate to the newest production build of that Subgraph. Both are valid strategies and come with their own pros and cons. -Signaling on a specific version is especially useful when one subgraph is used by multiple dapps. One dapp might need to regularly update the subgraph with new features. Another dapp might prefer to use an older, well-tested subgraph version. Upon initial curation, a 1% standard tax is incurred. +Signaling on a specific version is especially useful when one Subgraph is used by multiple dapps. One dapp might need to regularly update the Subgraph with new features. Another dapp might prefer to use an older, well-tested Subgraph version. Upon initial curation, a 1% standard tax is incurred. 让你的策展份额自动迁移到最新的生产构建,对确保你不断累积查询费用是有价值的。 每次你策展时,都会产生 1%的策展税。 每次迁移时,你也将支付 0.5%的策展税。 不鼓励子图开发人员频繁发布新版本--他们必须为所有自动迁移的策展份额支付 0.5%的策展税。 -> **Note**: The first address to signal a particular subgraph is considered the first curator and will have to do much more gas-intensive work than the rest of the following curators because the first curator initializes the curation share tokens, and also transfers tokens into The Graph proxy. +> **Note**: The first address to signal a particular Subgraph is considered the first curator and will have to do much more gas-intensive work than the rest of the following curators because the first curator initializes the curation share tokens, and also transfers tokens into The Graph proxy. ## Withdrawing your GRT @@ -40,39 +40,39 @@ Curators have the option to withdraw their signaled GRT at any time. Unlike the process of delegating, if you decide to withdraw your signaled GRT you will not have to wait for a cooldown period and will receive the entire amount (minus the 1% curation tax). -Once a curator withdraws their signal, indexers may choose to keep indexing the subgraph, even if there's currently no active GRT signaled. +Once a curator withdraws their signal, indexers may choose to keep indexing the Subgraph, even if there's currently no active GRT signaled. -However, it is recommended that curators leave their signaled GRT in place not only to receive a portion of the query fees, but also to ensure reliability and uptime of the subgraph. +However, it is recommended that curators leave their signaled GRT in place not only to receive a portion of the query fees, but also to ensure reliability and uptime of the Subgraph. ## 风险 1. 在Graph,查询市场本来就很年轻,由于市场动态刚刚开始,你的年收益率可能低于你的预期,这是有风险的。 -2. Curation Fee - when a curator signals GRT on a subgraph, they incur a 1% curation tax. This fee is burned. -3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). -4. 一个子图可能由于错误而失败。 一个失败的子图不会累积查询费用。 因此,你必须等待,直到开发人员修复错误并部署一个新的版本。 - - 如果你订阅了一个子图的最新版本,你的份额将自动迁移到该新版本。 这将产生 0.5%的策展税。 - - If you have signaled on a specific subgraph version and it fails, you will have to manually burn your curation shares. You can then signal on the new subgraph version, thus incurring a 1% curation tax. +2. Curation Fee - when a curator signals GRT on a Subgraph, they incur a 1% curation tax. This fee is burned. +3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their Subgraph or if a Subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). +4. A Subgraph can fail due to a bug. A failed Subgraph does not accrue query fees. As a result, you’ll have to wait until the developer fixes the bug and deploys a new version. + - If you are subscribed to the newest version of a Subgraph, your shares will auto-migrate to that new version. This will incur a 0.5% curation tax. + - If you have signaled on a specific Subgraph version and it fails, you will have to manually burn your curation shares. You can then signal on the new Subgraph version, thus incurring a 1% curation tax. ## 策展常见问题 ### 1. 策展人能赚取多少百分比的查询费? -By signalling on a subgraph, you will earn a share of all the query fees that the subgraph generates. 10% of all query fees go to the Curators pro-rata to their curation shares. This 10% is subject to governance. +By signalling on a Subgraph, you will earn a share of all the query fees that the Subgraph generates. 10% of all query fees go to the Curators pro-rata to their curation shares. This 10% is subject to governance. -### 2. 如何决定哪些子图是高质量的信号? +### 2. How do I decide which Subgraphs are high quality to signal on? -Finding high-quality subgraphs is a complex task, but it can be approached in many different ways. As a Curator, you want to look for trustworthy subgraphs that are driving query volume. A trustworthy subgraph may be valuable if it is complete, accurate, and supports a dapp’s data needs. A poorly architected subgraph might need to be revised or re-published, and can also end up failing. It is critical for Curators to review a subgraph’s architecture or code in order to assess if a subgraph is valuable. As a result: +Finding high-quality Subgraphs is a complex task, but it can be approached in many different ways. As a Curator, you want to look for trustworthy Subgraphs that are driving query volume. A trustworthy Subgraph may be valuable if it is complete, accurate, and supports a dapp’s data needs. A poorly architected Subgraph might need to be revised or re-published, and can also end up failing. It is critical for Curators to review a Subgraph’s architecture or code in order to assess if a Subgraph is valuable. As a result: -- Curators can use their understanding of a network to try and predict how an individual subgraph may generate a higher or lower query volume in the future -- Curators should also understand the metrics that are available through Graph Explorer. Metrics like past query volume and who the subgraph developer is can help determine whether or not a subgraph is worth signalling on. +- Curators can use their understanding of a network to try and predict how an individual Subgraph may generate a higher or lower query volume in the future +- Curators should also understand the metrics that are available through Graph Explorer. Metrics like past query volume and who the Subgraph developer is can help determine whether or not a Subgraph is worth signalling on. -### 3. 升级一个子图的成本是多少? +### 3. What’s the cost of updating a Subgraph? -Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading subgraphs is an onchain action that costs gas. +Migrating your curation shares to a new Subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a Subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading Subgraphs is an onchain action that costs gas. -### 4. 我可以多频繁的升级子图? +### 4. How often can I update my Subgraph? -建议你不要太频繁地升级子图。 更多细节请见上面的问题。 +It’s suggested that you don’t update your Subgraphs too frequently. See the question above for more details. ### 5. 我可以出售我的策展份额吗? From ff02ff7042e0d48e12e79907807825d8b42a2a0a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:17:28 -0500 Subject: [PATCH 0511/1789] New translations curating.mdx (Urdu (Pakistan)) --- .../src/pages/ur/resources/roles/curating.mdx | 58 +++++++++---------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/website/src/pages/ur/resources/roles/curating.mdx b/website/src/pages/ur/resources/roles/curating.mdx index 9e972e55ab7f..c5138d8482d2 100644 --- a/website/src/pages/ur/resources/roles/curating.mdx +++ b/website/src/pages/ur/resources/roles/curating.mdx @@ -2,37 +2,37 @@ title: کیورٹنگ --- -Curators are critical to The Graph's decentralized economy. They use their knowledge of the web3 ecosystem to assess and signal on the subgraphs that should be indexed by The Graph Network. Through Graph Explorer, Curators view network data to make signaling decisions. In turn, The Graph Network rewards Curators who signal on good quality subgraphs with a share of the query fees those subgraphs generate. The amount of GRT signaled is one of the key considerations for indexers when determining which subgraphs to index. +Curators are critical to The Graph's decentralized economy. They use their knowledge of the web3 ecosystem to assess and signal on the Subgraphs that should be indexed by The Graph Network. Through Graph Explorer, Curators view network data to make signaling decisions. In turn, The Graph Network rewards Curators who signal on good quality Subgraphs with a share of the query fees those Subgraphs generate. The amount of GRT signaled is one of the key considerations for indexers when determining which Subgraphs to index. ## What Does Signaling Mean for The Graph Network? -Before consumers can query a subgraph, it must be indexed. This is where curation comes into play. In order for Indexers to earn substantial query fees on quality subgraphs, they need to know what subgraphs to index. When Curators signal on a subgraph, it lets Indexers know that a subgraph is in demand and of sufficient quality that it should be indexed. +Before consumers can query a Subgraph, it must be indexed. This is where curation comes into play. In order for Indexers to earn substantial query fees on quality Subgraphs, they need to know what Subgraphs to index. When Curators signal on a Subgraph, it lets Indexers know that a Subgraph is in demand and of sufficient quality that it should be indexed. -Curators make The Graph network efficient and [signaling](#how-to-signal) is the process that Curators use to let Indexers know that a subgraph is good to index. Indexers can trust the signal from a Curator because upon signaling, Curators mint a curation share for the subgraph, entitling them to a portion of future query fees that the subgraph drives. +Curators make The Graph network efficient and [signaling](#how-to-signal) is the process that Curators use to let Indexers know that a Subgraph is good to index. Indexers can trust the signal from a Curator because upon signaling, Curators mint a curation share for the Subgraph, entitling them to a portion of future query fees that the Subgraph drives. -Curator signals are represented as ERC20 tokens called Graph Curation Shares (GCS). Those that want to earn more query fees should signal their GRT to subgraphs that they predict will generate a strong flow of fees to the network. Curators cannot be slashed for bad behavior, but there is a deposit tax on Curators to disincentivize poor decision-making that could harm the integrity of the network. Curators will also earn fewer query fees if they curate on a low-quality subgraph because there will be fewer queries to process or fewer Indexers to process them. +Curator signals are represented as ERC20 tokens called Graph Curation Shares (GCS). Those that want to earn more query fees should signal their GRT to Subgraphs that they predict will generate a strong flow of fees to the network. Curators cannot be slashed for bad behavior, but there is a deposit tax on Curators to disincentivize poor decision-making that could harm the integrity of the network. Curators will also earn fewer query fees if they curate on a low-quality Subgraph because there will be fewer queries to process or fewer Indexers to process them. -The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all subgraphs, signaling GRT on a particular subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. +The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all Subgraphs, signaling GRT on a particular Subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. -When signaling, Curators can decide to signal on a specific version of the subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. +When signaling, Curators can decide to signal on a specific version of the Subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. -If you require assistance with curation to enhance the quality of service, please send a request to the Edge & Node team at support@thegraph.zendesk.com and specify the subgraphs you need assistance with. +If you require assistance with curation to enhance the quality of service, please send a request to the Edge & Node team at support@thegraph.zendesk.com and specify the Subgraphs you need assistance with. -Indexers can find subgraphs to index based on curation signals they see in Graph Explorer (screenshot below). +Indexers can find Subgraphs to index based on curation signals they see in Graph Explorer (screenshot below). -![Explorer subgraphs](/img/explorer-subgraphs.png) +![Explorer Subgraphs](/img/explorer-subgraphs.png) ## سگنل کرنے کا طریقہ -Within the Curator tab in Graph Explorer, curators will be able to signal and unsignal on certain subgraphs based on network stats. For a step-by-step overview of how to do this in Graph Explorer, [click here.](/subgraphs/explorer/) +Within the Curator tab in Graph Explorer, curators will be able to signal and unsignal on certain Subgraphs based on network stats. For a step-by-step overview of how to do this in Graph Explorer, [click here.](/subgraphs/explorer/) -ایک کیوریٹر مخصوص سب گراف ورژن پر سگنل دینے کا انتخاب کر سکتا ہے، یا وہ اپنے سگنل کو خود بخود اس سب گراف کی جدید ترین پروڈکشن بلڈ میں منتقل کرنے کا انتخاب کر سکتا ہے۔ دونوں درست حکمت عملی ہیں اور ان کے اپنے فوائد اور نقصانات کے ساتھ آتے ہیں. +A curator can choose to signal on a specific Subgraph version, or they can choose to have their signal automatically migrate to the newest production build of that Subgraph. Both are valid strategies and come with their own pros and cons. -Signaling on a specific version is especially useful when one subgraph is used by multiple dapps. One dapp might need to regularly update the subgraph with new features. Another dapp might prefer to use an older, well-tested subgraph version. Upon initial curation, a 1% standard tax is incurred. +Signaling on a specific version is especially useful when one Subgraph is used by multiple dapps. One dapp might need to regularly update the Subgraph with new features. Another dapp might prefer to use an older, well-tested Subgraph version. Upon initial curation, a 1% standard tax is incurred. آپ کے سگنل کو خود بخود جدید ترین پروڈکشن کی تعمیر میں منتقل کرنا اس بات کو یقینی بنانے کے لیے قابل قدر ہو سکتا ہے کہ آپ کیوری کی فیس جمع کرتے رہیں۔ جب بھی آپ کیوریشن کرتے ہیں، 1% کیوریشن ٹیکس لاگو ہوتا ہے۔ آپ ہر دفعہ منتقلی پر 0.5% کا کیوریشن ٹیکس ادا کریں گے. سب گراف ڈویلپرز کو نئے ورژنز کثرت سے شائع کرنے کی حوصلہ شکنی کی جاتی ہے - انہیں تمام خود کار طریقے سے منتقل کیوریشن شیئرز پر 0.5% کیوریشن ٹیکس ادا کرنا پڑتا ہے. -> **Note**: The first address to signal a particular subgraph is considered the first curator and will have to do much more gas-intensive work than the rest of the following curators because the first curator initializes the curation share tokens, and also transfers tokens into The Graph proxy. +> **Note**: The first address to signal a particular Subgraph is considered the first curator and will have to do much more gas-intensive work than the rest of the following curators because the first curator initializes the curation share tokens, and also transfers tokens into The Graph proxy. ## Withdrawing your GRT @@ -40,39 +40,39 @@ Curators have the option to withdraw their signaled GRT at any time. Unlike the process of delegating, if you decide to withdraw your signaled GRT you will not have to wait for a cooldown period and will receive the entire amount (minus the 1% curation tax). -Once a curator withdraws their signal, indexers may choose to keep indexing the subgraph, even if there's currently no active GRT signaled. +Once a curator withdraws their signal, indexers may choose to keep indexing the Subgraph, even if there's currently no active GRT signaled. -However, it is recommended that curators leave their signaled GRT in place not only to receive a portion of the query fees, but also to ensure reliability and uptime of the subgraph. +However, it is recommended that curators leave their signaled GRT in place not only to receive a portion of the query fees, but also to ensure reliability and uptime of the Subgraph. ## خطرات 1. گراف میں کیوری کی مارکیٹ فطری طور پر جوان ہے اور اس بات کا خطرہ ہے کہ آپ کا %APY مارکیٹ کی نئی حرکیات کی وجہ سے آپ کی توقع سے کم ہو سکتا ہے. -2. Curation Fee - when a curator signals GRT on a subgraph, they incur a 1% curation tax. This fee is burned. -3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). -4. ایک سب گراف ایک بگ کی وجہ سے ناکام ہو سکتا ہے. ایک ناکام سب گراف کیوری کی فیس جمع نہیں کرتا ہے. اس کے نتیجے میں،آپ کو انتظار کرنا پڑے گاجب تک کہ ڈویلپر اس بگ کو کو ٹھیک نہیں کرتا اور نیا ورژن تعینات کرتا ہے. - - اگر آپ نےسب گراف کے نۓ ورژن کو سبسکرائب کیا ہے. آپ کے حصص خود بخود اس نئے ورژن میں منتقل ہو جائیں گے۔ اس پر 0.5% کیوریشن ٹیکس لگے گا. - - If you have signaled on a specific subgraph version and it fails, you will have to manually burn your curation shares. You can then signal on the new subgraph version, thus incurring a 1% curation tax. +2. Curation Fee - when a curator signals GRT on a Subgraph, they incur a 1% curation tax. This fee is burned. +3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their Subgraph or if a Subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). +4. A Subgraph can fail due to a bug. A failed Subgraph does not accrue query fees. As a result, you’ll have to wait until the developer fixes the bug and deploys a new version. + - If you are subscribed to the newest version of a Subgraph, your shares will auto-migrate to that new version. This will incur a 0.5% curation tax. + - If you have signaled on a specific Subgraph version and it fails, you will have to manually burn your curation shares. You can then signal on the new Subgraph version, thus incurring a 1% curation tax. ## کیوریشن کے اکثر پوچھے گئے سوالات ### 1. کیوریٹرز کتنی % کیوری فیس کماتے ہیں؟ -By signalling on a subgraph, you will earn a share of all the query fees that the subgraph generates. 10% of all query fees go to the Curators pro-rata to their curation shares. This 10% is subject to governance. +By signalling on a Subgraph, you will earn a share of all the query fees that the Subgraph generates. 10% of all query fees go to the Curators pro-rata to their curation shares. This 10% is subject to governance. -### 2. میں یہ کیسے طے کروں کہ کون سے سب گرافس اعلیٰ معیار کے ہیں؟ +### 2. How do I decide which Subgraphs are high quality to signal on? -Finding high-quality subgraphs is a complex task, but it can be approached in many different ways. As a Curator, you want to look for trustworthy subgraphs that are driving query volume. A trustworthy subgraph may be valuable if it is complete, accurate, and supports a dapp’s data needs. A poorly architected subgraph might need to be revised or re-published, and can also end up failing. It is critical for Curators to review a subgraph’s architecture or code in order to assess if a subgraph is valuable. As a result: +Finding high-quality Subgraphs is a complex task, but it can be approached in many different ways. As a Curator, you want to look for trustworthy Subgraphs that are driving query volume. A trustworthy Subgraph may be valuable if it is complete, accurate, and supports a dapp’s data needs. A poorly architected Subgraph might need to be revised or re-published, and can also end up failing. It is critical for Curators to review a Subgraph’s architecture or code in order to assess if a Subgraph is valuable. As a result: -- Curators can use their understanding of a network to try and predict how an individual subgraph may generate a higher or lower query volume in the future -- Curators should also understand the metrics that are available through Graph Explorer. Metrics like past query volume and who the subgraph developer is can help determine whether or not a subgraph is worth signalling on. +- Curators can use their understanding of a network to try and predict how an individual Subgraph may generate a higher or lower query volume in the future +- Curators should also understand the metrics that are available through Graph Explorer. Metrics like past query volume and who the Subgraph developer is can help determine whether or not a Subgraph is worth signalling on. -### 3. سب گراف کو اپ ڈیٹ کرنے کی کیا قیمت ہے؟ +### 3. What’s the cost of updating a Subgraph? -Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading subgraphs is an onchain action that costs gas. +Migrating your curation shares to a new Subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a Subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading Subgraphs is an onchain action that costs gas. -### 4. میں اپنے سب گراف کو کتنی بار اپ گریڈ کر سکتا ہوں؟ +### 4. How often can I update my Subgraph? -یہ تجویز کی جاتی ہے کہ آپ اپنے سب گراف کو کثرت سے اپ گریڈ نہ کریں۔ مزید تفصیلات کو لیے اوپر والا سوال دیکھیں۔ +It’s suggested that you don’t update your Subgraphs too frequently. See the question above for more details. ### 5. کیا میں اپنے کیوریشن شیئرز بیچ سکتا ہوں؟ From 95e4f418f8c6457a7721434a248abbb05f6f8afa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:17:29 -0500 Subject: [PATCH 0512/1789] New translations curating.mdx (Vietnamese) --- .../src/pages/vi/resources/roles/curating.mdx | 58 +++++++++---------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/website/src/pages/vi/resources/roles/curating.mdx b/website/src/pages/vi/resources/roles/curating.mdx index e1633707faf3..06aa7b62b93f 100644 --- a/website/src/pages/vi/resources/roles/curating.mdx +++ b/website/src/pages/vi/resources/roles/curating.mdx @@ -2,37 +2,37 @@ title: Curating --- -Curators are critical to The Graph's decentralized economy. They use their knowledge of the web3 ecosystem to assess and signal on the subgraphs that should be indexed by The Graph Network. Through Graph Explorer, Curators view network data to make signaling decisions. In turn, The Graph Network rewards Curators who signal on good quality subgraphs with a share of the query fees those subgraphs generate. The amount of GRT signaled is one of the key considerations for indexers when determining which subgraphs to index. +Curators are critical to The Graph's decentralized economy. They use their knowledge of the web3 ecosystem to assess and signal on the Subgraphs that should be indexed by The Graph Network. Through Graph Explorer, Curators view network data to make signaling decisions. In turn, The Graph Network rewards Curators who signal on good quality Subgraphs with a share of the query fees those Subgraphs generate. The amount of GRT signaled is one of the key considerations for indexers when determining which Subgraphs to index. ## What Does Signaling Mean for The Graph Network? -Before consumers can query a subgraph, it must be indexed. This is where curation comes into play. In order for Indexers to earn substantial query fees on quality subgraphs, they need to know what subgraphs to index. When Curators signal on a subgraph, it lets Indexers know that a subgraph is in demand and of sufficient quality that it should be indexed. +Before consumers can query a Subgraph, it must be indexed. This is where curation comes into play. In order for Indexers to earn substantial query fees on quality Subgraphs, they need to know what Subgraphs to index. When Curators signal on a Subgraph, it lets Indexers know that a Subgraph is in demand and of sufficient quality that it should be indexed. -Curators make The Graph network efficient and [signaling](#how-to-signal) is the process that Curators use to let Indexers know that a subgraph is good to index. Indexers can trust the signal from a Curator because upon signaling, Curators mint a curation share for the subgraph, entitling them to a portion of future query fees that the subgraph drives. +Curators make The Graph network efficient and [signaling](#how-to-signal) is the process that Curators use to let Indexers know that a Subgraph is good to index. Indexers can trust the signal from a Curator because upon signaling, Curators mint a curation share for the Subgraph, entitling them to a portion of future query fees that the Subgraph drives. -Curator signals are represented as ERC20 tokens called Graph Curation Shares (GCS). Those that want to earn more query fees should signal their GRT to subgraphs that they predict will generate a strong flow of fees to the network. Curators cannot be slashed for bad behavior, but there is a deposit tax on Curators to disincentivize poor decision-making that could harm the integrity of the network. Curators will also earn fewer query fees if they curate on a low-quality subgraph because there will be fewer queries to process or fewer Indexers to process them. +Curator signals are represented as ERC20 tokens called Graph Curation Shares (GCS). Those that want to earn more query fees should signal their GRT to Subgraphs that they predict will generate a strong flow of fees to the network. Curators cannot be slashed for bad behavior, but there is a deposit tax on Curators to disincentivize poor decision-making that could harm the integrity of the network. Curators will also earn fewer query fees if they curate on a low-quality Subgraph because there will be fewer queries to process or fewer Indexers to process them. -The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all subgraphs, signaling GRT on a particular subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. +The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all Subgraphs, signaling GRT on a particular Subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. -When signaling, Curators can decide to signal on a specific version of the subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. +When signaling, Curators can decide to signal on a specific version of the Subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. -If you require assistance with curation to enhance the quality of service, please send a request to the Edge & Node team at support@thegraph.zendesk.com and specify the subgraphs you need assistance with. +If you require assistance with curation to enhance the quality of service, please send a request to the Edge & Node team at support@thegraph.zendesk.com and specify the Subgraphs you need assistance with. -Indexers can find subgraphs to index based on curation signals they see in Graph Explorer (screenshot below). +Indexers can find Subgraphs to index based on curation signals they see in Graph Explorer (screenshot below). -![Explorer subgraphs](/img/explorer-subgraphs.png) +![Explorer Subgraphs](/img/explorer-subgraphs.png) ## Làm thế nào để phát tín hiệu -Within the Curator tab in Graph Explorer, curators will be able to signal and unsignal on certain subgraphs based on network stats. For a step-by-step overview of how to do this in Graph Explorer, [click here.](/subgraphs/explorer/) +Within the Curator tab in Graph Explorer, curators will be able to signal and unsignal on certain Subgraphs based on network stats. For a step-by-step overview of how to do this in Graph Explorer, [click here.](/subgraphs/explorer/) -Curator có thể chọn phát tín hiệu trên một phiên bản subgraph cụ thể hoặc họ có thể chọn để tín hiệu của họ tự động chuyển sang bản dựng sản xuất mới nhất của subgraph đó. Cả hai đều là những chiến lược hợp lệ và đi kèm với những ưu và nhược điểm của riêng chúng. +A curator can choose to signal on a specific Subgraph version, or they can choose to have their signal automatically migrate to the newest production build of that Subgraph. Both are valid strategies and come with their own pros and cons. -Signaling on a specific version is especially useful when one subgraph is used by multiple dapps. One dapp might need to regularly update the subgraph with new features. Another dapp might prefer to use an older, well-tested subgraph version. Upon initial curation, a 1% standard tax is incurred. +Signaling on a specific version is especially useful when one Subgraph is used by multiple dapps. One dapp might need to regularly update the Subgraph with new features. Another dapp might prefer to use an older, well-tested Subgraph version. Upon initial curation, a 1% standard tax is incurred. Having your signal automatically migrate to the newest production build can be valuable to ensure you keep accruing query fees. Every time you curate, a 1% curation tax is incurred. You will also pay a 0.5% curation tax on every migration. Subgraph developers are discouraged from frequently publishing new versions - they have to pay a 0.5% curation tax on all auto-migrated curation shares. -> **Note**: The first address to signal a particular subgraph is considered the first curator and will have to do much more gas-intensive work than the rest of the following curators because the first curator initializes the curation share tokens, and also transfers tokens into The Graph proxy. +> **Note**: The first address to signal a particular Subgraph is considered the first curator and will have to do much more gas-intensive work than the rest of the following curators because the first curator initializes the curation share tokens, and also transfers tokens into The Graph proxy. ## Withdrawing your GRT @@ -40,39 +40,39 @@ Curators have the option to withdraw their signaled GRT at any time. Unlike the process of delegating, if you decide to withdraw your signaled GRT you will not have to wait for a cooldown period and will receive the entire amount (minus the 1% curation tax). -Once a curator withdraws their signal, indexers may choose to keep indexing the subgraph, even if there's currently no active GRT signaled. +Once a curator withdraws their signal, indexers may choose to keep indexing the Subgraph, even if there's currently no active GRT signaled. -However, it is recommended that curators leave their signaled GRT in place not only to receive a portion of the query fees, but also to ensure reliability and uptime of the subgraph. +However, it is recommended that curators leave their signaled GRT in place not only to receive a portion of the query fees, but also to ensure reliability and uptime of the Subgraph. ## Những rủi ro 1. The query market is inherently young at The Graph and there is risk that your %APY may be lower than you expect due to nascent market dynamics. -2. Curation Fee - when a curator signals GRT on a subgraph, they incur a 1% curation tax. This fee is burned. -3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). -4. Một subgraph có thể thất bại do một lỗi. Một subgraph thất bại không tích lũy phí truy vấn. Do đó, bạn sẽ phải đợi cho đến khi nhà phát triển sửa lỗi và triển khai phiên bản mới. - - Nếu bạn đã đăng ký phiên bản mới nhất của một subgraph, các cổ phần của bạn sẽ tự động chuyển sang phiên bản mới đó. Điều này sẽ phát sinh một khoản thuế curation 0.5%. - - If you have signaled on a specific subgraph version and it fails, you will have to manually burn your curation shares. You can then signal on the new subgraph version, thus incurring a 1% curation tax. +2. Curation Fee - when a curator signals GRT on a Subgraph, they incur a 1% curation tax. This fee is burned. +3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their Subgraph or if a Subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). +4. A Subgraph can fail due to a bug. A failed Subgraph does not accrue query fees. As a result, you’ll have to wait until the developer fixes the bug and deploys a new version. + - If you are subscribed to the newest version of a Subgraph, your shares will auto-migrate to that new version. This will incur a 0.5% curation tax. + - If you have signaled on a specific Subgraph version and it fails, you will have to manually burn your curation shares. You can then signal on the new Subgraph version, thus incurring a 1% curation tax. ## Câu hỏi thường gặp về Curation ### 1. Curator kiếm được bao nhiêu % phí truy vấn? -By signalling on a subgraph, you will earn a share of all the query fees that the subgraph generates. 10% of all query fees go to the Curators pro-rata to their curation shares. This 10% is subject to governance. +By signalling on a Subgraph, you will earn a share of all the query fees that the Subgraph generates. 10% of all query fees go to the Curators pro-rata to their curation shares. This 10% is subject to governance. -### 2. Làm cách nào để tôi quyết định xem các subgraph nào có chất lượng cao để báo hiệu? +### 2. How do I decide which Subgraphs are high quality to signal on? -Finding high-quality subgraphs is a complex task, but it can be approached in many different ways. As a Curator, you want to look for trustworthy subgraphs that are driving query volume. A trustworthy subgraph may be valuable if it is complete, accurate, and supports a dapp’s data needs. A poorly architected subgraph might need to be revised or re-published, and can also end up failing. It is critical for Curators to review a subgraph’s architecture or code in order to assess if a subgraph is valuable. As a result: +Finding high-quality Subgraphs is a complex task, but it can be approached in many different ways. As a Curator, you want to look for trustworthy Subgraphs that are driving query volume. A trustworthy Subgraph may be valuable if it is complete, accurate, and supports a dapp’s data needs. A poorly architected Subgraph might need to be revised or re-published, and can also end up failing. It is critical for Curators to review a Subgraph’s architecture or code in order to assess if a Subgraph is valuable. As a result: -- Curators can use their understanding of a network to try and predict how an individual subgraph may generate a higher or lower query volume in the future -- Curators should also understand the metrics that are available through Graph Explorer. Metrics like past query volume and who the subgraph developer is can help determine whether or not a subgraph is worth signalling on. +- Curators can use their understanding of a network to try and predict how an individual Subgraph may generate a higher or lower query volume in the future +- Curators should also understand the metrics that are available through Graph Explorer. Metrics like past query volume and who the Subgraph developer is can help determine whether or not a Subgraph is worth signalling on. -### 3. What’s the cost of updating a subgraph? +### 3. What’s the cost of updating a Subgraph? -Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading subgraphs is an onchain action that costs gas. +Migrating your curation shares to a new Subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a Subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading Subgraphs is an onchain action that costs gas. -### 4. How often can I update my subgraph? +### 4. How often can I update my Subgraph? -It’s suggested that you don’t update your subgraphs too frequently. See the question above for more details. +It’s suggested that you don’t update your Subgraphs too frequently. See the question above for more details. ### 5. Tôi có thể bán cổ phần curation của mình không? From 78837b773ab490f8db9095b2537c2febe5cc430e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:17:30 -0500 Subject: [PATCH 0513/1789] New translations curating.mdx (Marathi) --- .../src/pages/mr/resources/roles/curating.mdx | 58 +++++++++---------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/website/src/pages/mr/resources/roles/curating.mdx b/website/src/pages/mr/resources/roles/curating.mdx index 2d504102644e..4c73d5b33d31 100644 --- a/website/src/pages/mr/resources/roles/curating.mdx +++ b/website/src/pages/mr/resources/roles/curating.mdx @@ -2,37 +2,37 @@ title: क्युरेटिंग --- -Curators are critical to The Graph's decentralized economy. They use their knowledge of the web3 ecosystem to assess and signal on the subgraphs that should be indexed by The Graph Network. Through Graph Explorer, Curators view network data to make signaling decisions. In turn, The Graph Network rewards Curators who signal on good quality subgraphs with a share of the query fees those subgraphs generate. The amount of GRT signaled is one of the key considerations for indexers when determining which subgraphs to index. +Curators are critical to The Graph's decentralized economy. They use their knowledge of the web3 ecosystem to assess and signal on the Subgraphs that should be indexed by The Graph Network. Through Graph Explorer, Curators view network data to make signaling decisions. In turn, The Graph Network rewards Curators who signal on good quality Subgraphs with a share of the query fees those Subgraphs generate. The amount of GRT signaled is one of the key considerations for indexers when determining which Subgraphs to index. ## What Does Signaling Mean for The Graph Network? -Before consumers can query a subgraph, it must be indexed. This is where curation comes into play. In order for Indexers to earn substantial query fees on quality subgraphs, they need to know what subgraphs to index. When Curators signal on a subgraph, it lets Indexers know that a subgraph is in demand and of sufficient quality that it should be indexed. +Before consumers can query a Subgraph, it must be indexed. This is where curation comes into play. In order for Indexers to earn substantial query fees on quality Subgraphs, they need to know what Subgraphs to index. When Curators signal on a Subgraph, it lets Indexers know that a Subgraph is in demand and of sufficient quality that it should be indexed. -Curators make The Graph network efficient and [signaling](#how-to-signal) is the process that Curators use to let Indexers know that a subgraph is good to index. Indexers can trust the signal from a Curator because upon signaling, Curators mint a curation share for the subgraph, entitling them to a portion of future query fees that the subgraph drives. +Curators make The Graph network efficient and [signaling](#how-to-signal) is the process that Curators use to let Indexers know that a Subgraph is good to index. Indexers can trust the signal from a Curator because upon signaling, Curators mint a curation share for the Subgraph, entitling them to a portion of future query fees that the Subgraph drives. -Curator signals are represented as ERC20 tokens called Graph Curation Shares (GCS). Those that want to earn more query fees should signal their GRT to subgraphs that they predict will generate a strong flow of fees to the network. Curators cannot be slashed for bad behavior, but there is a deposit tax on Curators to disincentivize poor decision-making that could harm the integrity of the network. Curators will also earn fewer query fees if they curate on a low-quality subgraph because there will be fewer queries to process or fewer Indexers to process them. +Curator signals are represented as ERC20 tokens called Graph Curation Shares (GCS). Those that want to earn more query fees should signal their GRT to Subgraphs that they predict will generate a strong flow of fees to the network. Curators cannot be slashed for bad behavior, but there is a deposit tax on Curators to disincentivize poor decision-making that could harm the integrity of the network. Curators will also earn fewer query fees if they curate on a low-quality Subgraph because there will be fewer queries to process or fewer Indexers to process them. -The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all subgraphs, signaling GRT on a particular subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. +The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all Subgraphs, signaling GRT on a particular Subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. -When signaling, Curators can decide to signal on a specific version of the subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. +When signaling, Curators can decide to signal on a specific version of the Subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. -If you require assistance with curation to enhance the quality of service, please send a request to the Edge & Node team at support@thegraph.zendesk.com and specify the subgraphs you need assistance with. +If you require assistance with curation to enhance the quality of service, please send a request to the Edge & Node team at support@thegraph.zendesk.com and specify the Subgraphs you need assistance with. -Indexers can find subgraphs to index based on curation signals they see in Graph Explorer (screenshot below). +Indexers can find Subgraphs to index based on curation signals they see in Graph Explorer (screenshot below). -![Explorer subgraphs](/img/explorer-subgraphs.png) +![Explorer Subgraphs](/img/explorer-subgraphs.png) ## सिग्नल कसे करावे -Within the Curator tab in Graph Explorer, curators will be able to signal and unsignal on certain subgraphs based on network stats. For a step-by-step overview of how to do this in Graph Explorer, [click here.](/subgraphs/explorer/) +Within the Curator tab in Graph Explorer, curators will be able to signal and unsignal on certain Subgraphs based on network stats. For a step-by-step overview of how to do this in Graph Explorer, [click here.](/subgraphs/explorer/) -क्युरेटर विशिष्ट सबग्राफ आवृत्तीवर सिग्नल करणे निवडू शकतो किंवा ते त्यांचे सिग्नल त्या सबग्राफच्या नवीनतम उत्पादन बिल्डमध्ये स्वयंचलितपणे स्थलांतरित करणे निवडू शकतात. दोन्ही वैध धोरणे आहेत आणि त्यांच्या स्वतःच्या साधक आणि बाधकांसह येतात. +A curator can choose to signal on a specific Subgraph version, or they can choose to have their signal automatically migrate to the newest production build of that Subgraph. Both are valid strategies and come with their own pros and cons. -Signaling on a specific version is especially useful when one subgraph is used by multiple dapps. One dapp might need to regularly update the subgraph with new features. Another dapp might prefer to use an older, well-tested subgraph version. Upon initial curation, a 1% standard tax is incurred. +Signaling on a specific version is especially useful when one Subgraph is used by multiple dapps. One dapp might need to regularly update the Subgraph with new features. Another dapp might prefer to use an older, well-tested Subgraph version. Upon initial curation, a 1% standard tax is incurred. तुमचा सिग्नल नवीनतम प्रोडक्शन बिल्डवर आपोआप स्थलांतरित होणे हे तुम्ही क्वेरी फी जमा करत असल्याचे सुनिश्चित करण्यासाठी मौल्यवान असू शकते. प्रत्येक वेळी तुम्ही क्युरेट करता तेव्हा 1% क्युरेशन कर लागतो. तुम्ही प्रत्येक स्थलांतरावर 0.5% क्युरेशन कर देखील द्याल. सबग्राफ विकसकांना वारंवार नवीन आवृत्त्या प्रकाशित करण्यापासून परावृत्त केले जाते - त्यांना सर्व स्वयं-स्थलांतरित क्युरेशन शेअर्सवर 0.5% क्युरेशन कर भरावा लागतो. -> **Note**: The first address to signal a particular subgraph is considered the first curator and will have to do much more gas-intensive work than the rest of the following curators because the first curator initializes the curation share tokens, and also transfers tokens into The Graph proxy. +> **Note**: The first address to signal a particular Subgraph is considered the first curator and will have to do much more gas-intensive work than the rest of the following curators because the first curator initializes the curation share tokens, and also transfers tokens into The Graph proxy. ## Withdrawing your GRT @@ -40,39 +40,39 @@ Curators have the option to withdraw their signaled GRT at any time. Unlike the process of delegating, if you decide to withdraw your signaled GRT you will not have to wait for a cooldown period and will receive the entire amount (minus the 1% curation tax). -Once a curator withdraws their signal, indexers may choose to keep indexing the subgraph, even if there's currently no active GRT signaled. +Once a curator withdraws their signal, indexers may choose to keep indexing the Subgraph, even if there's currently no active GRT signaled. -However, it is recommended that curators leave their signaled GRT in place not only to receive a portion of the query fees, but also to ensure reliability and uptime of the subgraph. +However, it is recommended that curators leave their signaled GRT in place not only to receive a portion of the query fees, but also to ensure reliability and uptime of the Subgraph. ## जोखीम 1. द ग्राफमध्ये क्वेरी मार्केट मूळतः तरुण आहे आणि नवीन मार्केट डायनॅमिक्समुळे तुमचा %APY तुमच्या अपेक्षेपेक्षा कमी असण्याचा धोका आहे. -2. Curation Fee - when a curator signals GRT on a subgraph, they incur a 1% curation tax. This fee is burned. -3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). -4. बगमुळे सबग्राफ अयशस्वी होऊ शकतो. अयशस्वी सबग्राफ क्वेरी शुल्क जमा करत नाही. परिणामी, विकसक बगचे निराकरण करेपर्यंत आणि नवीन आवृत्ती तैनात करेपर्यंत तुम्हाला प्रतीक्षा करावी लागेल. - - तुम्ही सबग्राफच्या नवीनतम आवृत्तीचे सदस्यत्व घेतले असल्यास, तुमचे शेअर्स त्या नवीन आवृत्तीमध्ये स्वयंचलितपणे स्थलांतरित होतील. यावर 0.5% क्युरेशन कर लागेल. - - If you have signaled on a specific subgraph version and it fails, you will have to manually burn your curation shares. You can then signal on the new subgraph version, thus incurring a 1% curation tax. +2. Curation Fee - when a curator signals GRT on a Subgraph, they incur a 1% curation tax. This fee is burned. +3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their Subgraph or if a Subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). +4. A Subgraph can fail due to a bug. A failed Subgraph does not accrue query fees. As a result, you’ll have to wait until the developer fixes the bug and deploys a new version. + - If you are subscribed to the newest version of a Subgraph, your shares will auto-migrate to that new version. This will incur a 0.5% curation tax. + - If you have signaled on a specific Subgraph version and it fails, you will have to manually burn your curation shares. You can then signal on the new Subgraph version, thus incurring a 1% curation tax. ## क्युरेशन FAQs ### 1. क्युरेटर्स किती % क्वेरी फी मिळवतात? -By signalling on a subgraph, you will earn a share of all the query fees that the subgraph generates. 10% of all query fees go to the Curators pro-rata to their curation shares. This 10% is subject to governance. +By signalling on a Subgraph, you will earn a share of all the query fees that the Subgraph generates. 10% of all query fees go to the Curators pro-rata to their curation shares. This 10% is subject to governance. -### 2. कोणते सबग्राफ उच्च दर्जाचे आहेत हे मी कसे ठरवू? +### 2. How do I decide which Subgraphs are high quality to signal on? -Finding high-quality subgraphs is a complex task, but it can be approached in many different ways. As a Curator, you want to look for trustworthy subgraphs that are driving query volume. A trustworthy subgraph may be valuable if it is complete, accurate, and supports a dapp’s data needs. A poorly architected subgraph might need to be revised or re-published, and can also end up failing. It is critical for Curators to review a subgraph’s architecture or code in order to assess if a subgraph is valuable. As a result: +Finding high-quality Subgraphs is a complex task, but it can be approached in many different ways. As a Curator, you want to look for trustworthy Subgraphs that are driving query volume. A trustworthy Subgraph may be valuable if it is complete, accurate, and supports a dapp’s data needs. A poorly architected Subgraph might need to be revised or re-published, and can also end up failing. It is critical for Curators to review a Subgraph’s architecture or code in order to assess if a Subgraph is valuable. As a result: -- Curators can use their understanding of a network to try and predict how an individual subgraph may generate a higher or lower query volume in the future -- Curators should also understand the metrics that are available through Graph Explorer. Metrics like past query volume and who the subgraph developer is can help determine whether or not a subgraph is worth signalling on. +- Curators can use their understanding of a network to try and predict how an individual Subgraph may generate a higher or lower query volume in the future +- Curators should also understand the metrics that are available through Graph Explorer. Metrics like past query volume and who the Subgraph developer is can help determine whether or not a Subgraph is worth signalling on. -### 3. What’s the cost of updating a subgraph? +### 3. What’s the cost of updating a Subgraph? -Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading subgraphs is an onchain action that costs gas. +Migrating your curation shares to a new Subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a Subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading Subgraphs is an onchain action that costs gas. -### 4. How often can I update my subgraph? +### 4. How often can I update my Subgraph? -It’s suggested that you don’t update your subgraphs too frequently. See the question above for more details. +It’s suggested that you don’t update your Subgraphs too frequently. See the question above for more details. ### 5. मी माझे क्युरेशन शेअर्स विकू शकतो का? From 9a8bf703f58ce1f110ac768d31a4285e98f5fedc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:17:31 -0500 Subject: [PATCH 0514/1789] New translations curating.mdx (Hindi) --- .../src/pages/hi/resources/roles/curating.mdx | 58 +++++++++---------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/website/src/pages/hi/resources/roles/curating.mdx b/website/src/pages/hi/resources/roles/curating.mdx index 3d50ad907083..0ef4b09e70ec 100644 --- a/website/src/pages/hi/resources/roles/curating.mdx +++ b/website/src/pages/hi/resources/roles/curating.mdx @@ -2,37 +2,37 @@ title: क्यूरेटिंग --- -Curators are critical to The Graph's decentralized economy. They use their knowledge of the web3 ecosystem to assess and signal on the subgraphs that should be indexed by The Graph Network. Through Graph Explorer, Curators view network data to make signaling decisions. In turn, The Graph Network rewards Curators who signal on good quality subgraphs with a share of the query fees those subgraphs generate. The amount of GRT signaled is one of the key considerations for indexers when determining which subgraphs to index. +Curators are critical to The Graph's decentralized economy. They use their knowledge of the web3 ecosystem to assess and signal on the Subgraphs that should be indexed by The Graph Network. Through Graph Explorer, Curators view network data to make signaling decisions. In turn, The Graph Network rewards Curators who signal on good quality Subgraphs with a share of the query fees those Subgraphs generate. The amount of GRT signaled is one of the key considerations for indexers when determining which Subgraphs to index. ## What Does Signaling Mean for The Graph Network? -Before consumers can query a subgraph, it must be indexed. This is where curation comes into play. In order for Indexers to earn substantial query fees on quality subgraphs, they need to know what subgraphs to index. When Curators signal on a subgraph, it lets Indexers know that a subgraph is in demand and of sufficient quality that it should be indexed. +Before consumers can query a Subgraph, it must be indexed. This is where curation comes into play. In order for Indexers to earn substantial query fees on quality Subgraphs, they need to know what Subgraphs to index. When Curators signal on a Subgraph, it lets Indexers know that a Subgraph is in demand and of sufficient quality that it should be indexed. -Curators The Graph network को कुशल बनाते हैं और [संकेत देना](#how-to-signal) वह प्रक्रिया है जिसका उपयोग Curators यह बताने के लिए करते हैं कि कौन सा subgraph Indexer के लिए अच्छा है। Indexers Curator से आने वाले संकेत पर भरोसा कर सकते हैं क्योंकि संकेत देना के दौरान, Curators subgraph के लिए एक curation share मिंट करते हैं, जो उन्हें उस subgraph द्वारा उत्पन्न भविष्य के पूछताछ शुल्क के एक हिस्से का हकदार बनाता है। +Curators make The Graph network efficient and [signaling](#how-to-signal) is the process that Curators use to let Indexers know that a Subgraph is good to index. Indexers can trust the signal from a Curator because upon signaling, Curators mint a curation share for the Subgraph, entitling them to a portion of future query fees that the Subgraph drives. -Curator signals are represented as ERC20 tokens called Graph Curation Shares (GCS). Those that want to earn more query fees should signal their GRT to subgraphs that they predict will generate a strong flow of fees to the network. Curators cannot be slashed for bad behavior, but there is a deposit tax on Curators to disincentivize poor decision-making that could harm the integrity of the network. Curators will also earn fewer query fees if they curate on a low-quality subgraph because there will be fewer queries to process or fewer Indexers to process them. +Curator signals are represented as ERC20 tokens called Graph Curation Shares (GCS). Those that want to earn more query fees should signal their GRT to Subgraphs that they predict will generate a strong flow of fees to the network. Curators cannot be slashed for bad behavior, but there is a deposit tax on Curators to disincentivize poor decision-making that could harm the integrity of the network. Curators will also earn fewer query fees if they curate on a low-quality Subgraph because there will be fewer queries to process or fewer Indexers to process them. -[Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) यह सुनिश्चित करता है कि सभी सबग्राफ को index किया जाए। किसी विशेष subgraph पर GRT को संकेत करने से अधिक indexers उस पर आकर्षित होते हैं। curation के माध्यम से अतिरिक्त Indexers को प्रोत्साहित करना queries की सेवा की गुणवत्ता को बढ़ाने के लिए है, जिससे latency कम हो और नेटवर्क उपलब्धता में सुधार हो। +The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all Subgraphs, signaling GRT on a particular Subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. -When signaling, Curators can decide to signal on a specific version of the subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. +When signaling, Curators can decide to signal on a specific version of the Subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. -यदि आपको सेवा की गुणवत्ता बढ़ाने के लिए curation में सहायता की आवश्यकता हो, तो कृपया एज और नोड टीम को support@thegraph.zendesk.com पर अनुरोध भेजें और उन सबग्राफ को निर्दिष्ट करें जिनमें आपको सहायता चाहिए। +If you require assistance with curation to enhance the quality of service, please send a request to the Edge & Node team at support@thegraph.zendesk.com and specify the Subgraphs you need assistance with. -Indexers can find subgraphs to index based on curation signals they see in Graph Explorer (screenshot below). +Indexers can find Subgraphs to index based on curation signals they see in Graph Explorer (screenshot below). -![Explorer सबग्राफ](/img/explorer-subgraphs.png) +![Explorer Subgraphs](/img/explorer-subgraphs.png) ## सिग्नल कैसे करें -Graph Explorer के Curator टैब में, curators नेटवर्क स्टैट्स के आधार पर कुछ सबग्राफ पर signal और unsignal कर सकेंगे। Graph Explorer में यह कैसे करना है, इसका चरण-दर-चरण अवलोकन पाने के लिए [यहाँ क्लिक करें](/subgraphs/explorer/)। +Within the Curator tab in Graph Explorer, curators will be able to signal and unsignal on certain Subgraphs based on network stats. For a step-by-step overview of how to do this in Graph Explorer, [click here.](/subgraphs/explorer/) -एक क्यूरेटर एक विशिष्ट सबग्राफ संस्करण पर संकेत देना चुन सकता है, या वे अपने सिग्नल को स्वचालित रूप से उस सबग्राफ के नवीनतम उत्पादन निर्माण में माइग्रेट करना चुन सकते हैं। दोनों मान्य रणनीतियाँ हैं और अपने स्वयं के पेशेवरों और विपक्षों के साथ आती हैं। +A curator can choose to signal on a specific Subgraph version, or they can choose to have their signal automatically migrate to the newest production build of that Subgraph. Both are valid strategies and come with their own pros and cons. -विशेष संस्करण पर संकेत देना विशेष रूप से उपयोगी होता है जब एक subgraph को कई dapp द्वारा उपयोग किया जाता है। एक dapp को नियमित रूप से subgraph को नई विशेषता के साथ अपडेट करने की आवश्यकता हो सकती है। दूसरी dapp एक पुराना, अच्छी तरह से परीक्षण किया हुआ उपग्राफ subgraph संस्करण उपयोग करना पसंद कर सकती है। प्रारंभिक क्यूरेशन curation पर, 1% मानक कर tax लिया जाता है। +Signaling on a specific version is especially useful when one Subgraph is used by multiple dapps. One dapp might need to regularly update the Subgraph with new features. Another dapp might prefer to use an older, well-tested Subgraph version. Upon initial curation, a 1% standard tax is incurred. अपने सिग्नल को स्वचालित रूप से नवीनतम उत्पादन बिल्ड में माइग्रेट करना यह सुनिश्चित करने के लिए मूल्यवान हो सकता है कि आप क्वेरी शुल्क अर्जित करते रहें। हर बार जब आप क्यूरेट करते हैं, तो 1% क्यूरेशन टैक्स लगता है। आप हर माइग्रेशन पर 0.5% क्यूरेशन टैक्स भी देंगे। सबग्राफ डेवलपर्स को बार-बार नए संस्करण प्रकाशित करने से हतोत्साहित किया जाता है - उन्हें सभी ऑटो-माइग्रेटेड क्यूरेशन शेयरों पर 0.5% क्यूरेशन टैक्स देना पड़ता है। -> **नोट**पहला पता जो किसी विशेष subgraph को सिग्नल करता है, उसे पहला curator माना जाएगा और उसे बाकी आने वाले curators की तुलना में अधिक गैस-इंटेंसिव कार्य करना होगा क्योंकि पहला curator curation share टोकन को इनिशियलाइज़ करता है और टोकन को The Graph प्रॉक्सी में ट्रांसफर करता है। +> **Note**: The first address to signal a particular Subgraph is considered the first curator and will have to do much more gas-intensive work than the rest of the following curators because the first curator initializes the curation share tokens, and also transfers tokens into The Graph proxy. ## Withdrawing your GRT @@ -40,39 +40,39 @@ Curators have the option to withdraw their signaled GRT at any time. Unlike the process of delegating, if you decide to withdraw your signaled GRT you will not have to wait for a cooldown period and will receive the entire amount (minus the 1% curation tax). -Once a curator withdraws their signal, indexers may choose to keep indexing the subgraph, even if there's currently no active GRT signaled. +Once a curator withdraws their signal, indexers may choose to keep indexing the Subgraph, even if there's currently no active GRT signaled. -However, it is recommended that curators leave their signaled GRT in place not only to receive a portion of the query fees, but also to ensure reliability and uptime of the subgraph. +However, it is recommended that curators leave their signaled GRT in place not only to receive a portion of the query fees, but also to ensure reliability and uptime of the Subgraph. ## जोखिम 1. क्वेरी बाजार द ग्राफ में स्वाभाविक रूप से युवा है और इसमें जोखिम है कि नवजात बाजार की गतिशीलता के कारण आपका %APY आपकी अपेक्षा से कम हो सकता है। -2. क्यूरेशन शुल्क - जब कोई क्यूरेटर किसी सबग्राफ़ पर GRT सिग्नल करता है, तो उसे 1% क्यूरेशन टैक्स देना होता है। यह शुल्क जला दिया जाता है। -3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). -4. बग के कारण सबग्राफ विफल हो सकता है। एक विफल सबग्राफ क्वेरी शुल्क अर्जित नहीं करता है। नतीजतन, आपको तब तक इंतजार करना होगा जब तक कि डेवलपर बग को ठीक नहीं करता है और एक नया संस्करण तैनात करता है। - - यदि आपने सबग्राफ के नवीनतम संस्करण की सदस्यता ली है, तो आपके शेयर उस नए संस्करण में स्वत: माइग्रेट हो जाएंगे। इस पर 0.5% क्यूरेशन टैक्स लगेगा। - - If you have signaled on a specific subgraph version and it fails, you will have to manually burn your curation shares. You can then signal on the new subgraph version, thus incurring a 1% curation tax. +2. Curation Fee - when a curator signals GRT on a Subgraph, they incur a 1% curation tax. This fee is burned. +3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their Subgraph or if a Subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). +4. A Subgraph can fail due to a bug. A failed Subgraph does not accrue query fees. As a result, you’ll have to wait until the developer fixes the bug and deploys a new version. + - If you are subscribed to the newest version of a Subgraph, your shares will auto-migrate to that new version. This will incur a 0.5% curation tax. + - If you have signaled on a specific Subgraph version and it fails, you will have to manually burn your curation shares. You can then signal on the new Subgraph version, thus incurring a 1% curation tax. ## अवधि पूछे जाने वाले प्रश्न ### 1. क्यूरेटर क्वेरी फीस का कितना % कमाते हैं? -By signalling on a subgraph, you will earn a share of all the query fees that the subgraph generates. 10% of all query fees go to the Curators pro-rata to their curation shares. This 10% is subject to governance. +By signalling on a Subgraph, you will earn a share of all the query fees that the Subgraph generates. 10% of all query fees go to the Curators pro-rata to their curation shares. This 10% is subject to governance. -### 2. मैं यह कैसे तय करूं कि कौन से सबग्राफ सिग्नल देने के लिए उच्च गुणवत्ता वाले हैं? +### 2. How do I decide which Subgraphs are high quality to signal on? -उच्च-गुणवत्ता वाले सबग्राफ खोजना एक जटिल कार्य है, लेकिन इसे कई अलग-अलग तरीकों से किया जा सकता है। एक Curator के रूप में, आपको उन भरोसेमंद सबग्राफ को देखना चाहिए जो query volume को बढ़ा रहे हैं। एक भरोसेमंद subgraph मूल्यवान हो सकता है यदि वह पूर्ण, सटीक हो और किसी dapp की डेटा आवश्यकताओं को पूरा करता हो। एक खराब डिज़ाइन किया गया subgraph संशोधित या पुनः प्रकाशित करने की आवश्यकता हो सकती है और अंततः असफल भी हो सकता है। यह Curators के लिए अत्यंत महत्वपूर्ण है कि वे किसी subgraph की संरचना या कोड की समीक्षा करें ताकि यह आकलन कर सकें कि subgraph मूल्यवान है या नहीं। +Finding high-quality Subgraphs is a complex task, but it can be approached in many different ways. As a Curator, you want to look for trustworthy Subgraphs that are driving query volume. A trustworthy Subgraph may be valuable if it is complete, accurate, and supports a dapp’s data needs. A poorly architected Subgraph might need to be revised or re-published, and can also end up failing. It is critical for Curators to review a Subgraph’s architecture or code in order to assess if a Subgraph is valuable. As a result: -- क्यूरेटर नेटवर्क की अपनी समझ का उपयोग करके यह अनुमान लगाने की कोशिश कर सकते हैं कि भविष्य में कोई विशेष सबग्राफ़ अधिक या कम क्वेरी वॉल्यूम कैसे उत्पन्न कर सकता है। -- क्यूरेटर को Graph Explorer के माध्यम से उपलब्ध मेट्रिक्स को भी समझना चाहिए। जैसे कि पिछले क्वेरी वॉल्यूम और सबग्राफ़ डेवलपर कौन है, ये मेट्रिक्स यह तय करने में मदद कर सकते हैं कि किसी सबग्राफ़ पर सिग्नलिंग करना उचित है या नहीं। +- Curators can use their understanding of a network to try and predict how an individual Subgraph may generate a higher or lower query volume in the future +- Curators should also understand the metrics that are available through Graph Explorer. Metrics like past query volume and who the Subgraph developer is can help determine whether or not a Subgraph is worth signalling on. -### 3. What’s the cost of updating a subgraph? +### 3. What’s the cost of updating a Subgraph? -नए subgraph संस्करण में अपनी curation shares को माइग्रेट करने पर 1% curation टैक्स लगता है। Curators नए subgraph संस्करण को सब्सक्राइब करने का विकल्प चुन सकते हैं। जब curator shares अपने आप नए संस्करण में माइग्रेट होती हैं, तो Curators को आधा curation टैक्स, यानी 0.5%, देना पड़ता है क्योंकि सबग्राफ को अपग्रेड करना एक ऑनचेन क्रिया है जो गैस खर्च करती है। +Migrating your curation shares to a new Subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a Subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading Subgraphs is an onchain action that costs gas. -### 4. How often can I update my subgraph? +### 4. How often can I update my Subgraph? -It’s suggested that you don’t update your subgraphs too frequently. See the question above for more details. +It’s suggested that you don’t update your Subgraphs too frequently. See the question above for more details. ### 5. क्या मैं अपने क्यूरेशन शेयर बेच सकता हूँ? From 4fbc1de709401b619fdd8a825fd064c3eb202953 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:17:32 -0500 Subject: [PATCH 0515/1789] New translations curating.mdx (Swahili) --- .../src/pages/sw/resources/roles/curating.mdx | 89 +++++++++++++++++++ 1 file changed, 89 insertions(+) create mode 100644 website/src/pages/sw/resources/roles/curating.mdx diff --git a/website/src/pages/sw/resources/roles/curating.mdx b/website/src/pages/sw/resources/roles/curating.mdx new file mode 100644 index 000000000000..a228ebfb3267 --- /dev/null +++ b/website/src/pages/sw/resources/roles/curating.mdx @@ -0,0 +1,89 @@ +--- +title: Curating +--- + +Curators are critical to The Graph's decentralized economy. They use their knowledge of the web3 ecosystem to assess and signal on the Subgraphs that should be indexed by The Graph Network. Through Graph Explorer, Curators view network data to make signaling decisions. In turn, The Graph Network rewards Curators who signal on good quality Subgraphs with a share of the query fees those Subgraphs generate. The amount of GRT signaled is one of the key considerations for indexers when determining which Subgraphs to index. + +## What Does Signaling Mean for The Graph Network? + +Before consumers can query a Subgraph, it must be indexed. This is where curation comes into play. In order for Indexers to earn substantial query fees on quality Subgraphs, they need to know what Subgraphs to index. When Curators signal on a Subgraph, it lets Indexers know that a Subgraph is in demand and of sufficient quality that it should be indexed. + +Curators make The Graph network efficient and [signaling](#how-to-signal) is the process that Curators use to let Indexers know that a Subgraph is good to index. Indexers can trust the signal from a Curator because upon signaling, Curators mint a curation share for the Subgraph, entitling them to a portion of future query fees that the Subgraph drives. + +Curator signals are represented as ERC20 tokens called Graph Curation Shares (GCS). Those that want to earn more query fees should signal their GRT to Subgraphs that they predict will generate a strong flow of fees to the network. Curators cannot be slashed for bad behavior, but there is a deposit tax on Curators to disincentivize poor decision-making that could harm the integrity of the network. Curators will also earn fewer query fees if they curate on a low-quality Subgraph because there will be fewer queries to process or fewer Indexers to process them. + +The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all Subgraphs, signaling GRT on a particular Subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. + +When signaling, Curators can decide to signal on a specific version of the Subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. + +If you require assistance with curation to enhance the quality of service, please send a request to the Edge & Node team at support@thegraph.zendesk.com and specify the Subgraphs you need assistance with. + +Indexers can find Subgraphs to index based on curation signals they see in Graph Explorer (screenshot below). + +![Explorer Subgraphs](/img/explorer-subgraphs.png) + +## How to Signal + +Within the Curator tab in Graph Explorer, curators will be able to signal and unsignal on certain Subgraphs based on network stats. For a step-by-step overview of how to do this in Graph Explorer, [click here.](/subgraphs/explorer/) + +A curator can choose to signal on a specific Subgraph version, or they can choose to have their signal automatically migrate to the newest production build of that Subgraph. Both are valid strategies and come with their own pros and cons. + +Signaling on a specific version is especially useful when one Subgraph is used by multiple dapps. One dapp might need to regularly update the Subgraph with new features. Another dapp might prefer to use an older, well-tested Subgraph version. Upon initial curation, a 1% standard tax is incurred. + +Having your signal automatically migrate to the newest production build can be valuable to ensure you keep accruing query fees. Every time you curate, a 1% curation tax is incurred. You will also pay a 0.5% curation tax on every migration. Subgraph developers are discouraged from frequently publishing new versions - they have to pay a 0.5% curation tax on all auto-migrated curation shares. + +> **Note**: The first address to signal a particular Subgraph is considered the first curator and will have to do much more gas-intensive work than the rest of the following curators because the first curator initializes the curation share tokens, and also transfers tokens into The Graph proxy. + +## Withdrawing your GRT + +Curators have the option to withdraw their signaled GRT at any time. + +Unlike the process of delegating, if you decide to withdraw your signaled GRT you will not have to wait for a cooldown period and will receive the entire amount (minus the 1% curation tax). + +Once a curator withdraws their signal, indexers may choose to keep indexing the Subgraph, even if there's currently no active GRT signaled. + +However, it is recommended that curators leave their signaled GRT in place not only to receive a portion of the query fees, but also to ensure reliability and uptime of the Subgraph. + +## Risks + +1. The query market is inherently young at The Graph and there is risk that your %APY may be lower than you expect due to nascent market dynamics. +2. Curation Fee - when a curator signals GRT on a Subgraph, they incur a 1% curation tax. This fee is burned. +3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their Subgraph or if a Subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). +4. A Subgraph can fail due to a bug. A failed Subgraph does not accrue query fees. As a result, you’ll have to wait until the developer fixes the bug and deploys a new version. + - If you are subscribed to the newest version of a Subgraph, your shares will auto-migrate to that new version. This will incur a 0.5% curation tax. + - If you have signaled on a specific Subgraph version and it fails, you will have to manually burn your curation shares. You can then signal on the new Subgraph version, thus incurring a 1% curation tax. + +## Curation FAQs + +### 1. What % of query fees do Curators earn? + +By signalling on a Subgraph, you will earn a share of all the query fees that the Subgraph generates. 10% of all query fees go to the Curators pro-rata to their curation shares. This 10% is subject to governance. + +### 2. How do I decide which Subgraphs are high quality to signal on? + +Finding high-quality Subgraphs is a complex task, but it can be approached in many different ways. As a Curator, you want to look for trustworthy Subgraphs that are driving query volume. A trustworthy Subgraph may be valuable if it is complete, accurate, and supports a dapp’s data needs. A poorly architected Subgraph might need to be revised or re-published, and can also end up failing. It is critical for Curators to review a Subgraph’s architecture or code in order to assess if a Subgraph is valuable. As a result: + +- Curators can use their understanding of a network to try and predict how an individual Subgraph may generate a higher or lower query volume in the future +- Curators should also understand the metrics that are available through Graph Explorer. Metrics like past query volume and who the Subgraph developer is can help determine whether or not a Subgraph is worth signalling on. + +### 3. What’s the cost of updating a Subgraph? + +Migrating your curation shares to a new Subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a Subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading Subgraphs is an onchain action that costs gas. + +### 4. How often can I update my Subgraph? + +It’s suggested that you don’t update your Subgraphs too frequently. See the question above for more details. + +### 5. Can I sell my curation shares? + +Curation shares cannot be "bought" or "sold" like other ERC20 tokens that you may be familiar with. They can only be minted (created) or burned (destroyed). + +As a Curator on Arbitrum, you are guaranteed to get back the GRT you initially deposited (minus the tax). + +### 6. Am I eligible for a curation grant? + +Curation grants are determined individually on a case-by-case basis. If you need assistance with curation, please send a request to support@thegraph.zendesk.com. + +Still confused? Check out our Curation video guide below: + + From 569c22666aeccff8dd831870264816220adc4b37 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:17:33 -0500 Subject: [PATCH 0516/1789] New translations tokenomics.mdx (Romanian) --- website/src/pages/ro/resources/tokenomics.mdx | 40 +++++++++---------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/website/src/pages/ro/resources/tokenomics.mdx b/website/src/pages/ro/resources/tokenomics.mdx index 4a9b42ca6e0d..dac3383a28e7 100644 --- a/website/src/pages/ro/resources/tokenomics.mdx +++ b/website/src/pages/ro/resources/tokenomics.mdx @@ -6,7 +6,7 @@ description: The Graph Network is incentivized by powerful tokenomics. Here’s ## Overview -The Graph is a decentralized protocol that enables easy access to blockchain data. It indexes blockchain data similarly to how Google indexes the web. If you've used a dapp that retrieves data from a subgraph, you've probably interacted with The Graph. Today, thousands of [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem use The Graph. +The Graph is a decentralized protocol that enables easy access to blockchain data. It indexes blockchain data similarly to how Google indexes the web. If you've used a dapp that retrieves data from a Subgraph, you've probably interacted with The Graph. Today, thousands of [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem use The Graph. ## Specifics @@ -24,9 +24,9 @@ There are four primary network participants: 1. Delegators - Delegate GRT to Indexers & secure the network -2. Curators - Find the best subgraphs for Indexers +2. Curators - Find the best Subgraphs for Indexers -3. Developers - Build & query subgraphs +3. Developers - Build & query Subgraphs 4. Indexers - Backbone of blockchain data @@ -36,7 +36,7 @@ Fishermen and Arbitrators are also integral to the network's success through oth ## Delegators (Passively earn GRT) -Indexers are delegated GRT by Delegators, increasing the Indexer’s stake in subgraphs on the network. In return, Delegators earn a percentage of all query fees and indexing rewards from the Indexer. Each Indexer sets the cut that will be rewarded to Delegators independently, creating competition among Indexers to attract Delegators. Most Indexers offer between 9-12% annually. +Indexers are delegated GRT by Delegators, increasing the Indexer’s stake in Subgraphs on the network. In return, Delegators earn a percentage of all query fees and indexing rewards from the Indexer. Each Indexer sets the cut that will be rewarded to Delegators independently, creating competition among Indexers to attract Delegators. Most Indexers offer between 9-12% annually. For example, if a Delegator were to delegate 15k GRT to an Indexer offering 10%, the Delegator would receive ~1,500 GRT in rewards annually. @@ -46,25 +46,25 @@ If you're reading this, you're capable of becoming a Delegator right now by head ## Curators (Earn GRT) -Curators identify high-quality subgraphs and "curate" them (i.e., signal GRT on them) to earn curation shares, which guarantee a percentage of all future query fees generated by the subgraph. While any independent network participant can be a Curator, typically subgraph developers are among the first Curators for their own subgraphs because they want to ensure their subgraph is indexed. +Curators identify high-quality Subgraphs and "curate" them (i.e., signal GRT on them) to earn curation shares, which guarantee a percentage of all future query fees generated by the Subgraph. While any independent network participant can be a Curator, typically Subgraph developers are among the first Curators for their own Subgraphs because they want to ensure their Subgraph is indexed. -Subgraph developers are encouraged to curate their subgraph with at least 3,000 GRT. However, this number may be impacted by network activity and community participation. +Subgraph developers are encouraged to curate their Subgraph with at least 3,000 GRT. However, this number may be impacted by network activity and community participation. -Curators pay a 1% curation tax when they curate a new subgraph. This curation tax is burned, decreasing the supply of GRT. +Curators pay a 1% curation tax when they curate a new Subgraph. This curation tax is burned, decreasing the supply of GRT. ## Developers -Developers build and query subgraphs to retrieve blockchain data. Since subgraphs are open source, developers can query existing subgraphs to load blockchain data into their dapps. Developers pay for queries they make in GRT, which is distributed to network participants. +Developers build and query Subgraphs to retrieve blockchain data. Since Subgraphs are open source, developers can query existing Subgraphs to load blockchain data into their dapps. Developers pay for queries they make in GRT, which is distributed to network participants. -### Creating a subgraph +### Creating a Subgraph -Developers can [create a subgraph](/developing/creating-a-subgraph/) to index data on the blockchain. Subgraphs are instructions for Indexers about which data should be served to consumers. +Developers can [create a Subgraph](/developing/creating-a-subgraph/) to index data on the blockchain. Subgraphs are instructions for Indexers about which data should be served to consumers. -Once developers have built and tested their subgraph, they can [publish their subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/) on The Graph's decentralized network. +Once developers have built and tested their Subgraph, they can [publish their Subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/) on The Graph's decentralized network. -### Querying an existing subgraph +### Querying an existing Subgraph -Once a subgraph is [published](/subgraphs/developing/publishing/publishing-a-subgraph/) to The Graph's decentralized network, anyone can create an API key, add GRT to their billing balance, and query the subgraph. +Once a Subgraph is [published](/subgraphs/developing/publishing/publishing-a-subgraph/) to The Graph's decentralized network, anyone can create an API key, add GRT to their billing balance, and query the Subgraph. Subgraphs are [queried using GraphQL](/subgraphs/querying/introduction/), and the query fees are paid for with GRT in [Subgraph Studio](https://thegraph.com/studio/). Query fees are distributed to network participants based on their contributions to the protocol. @@ -72,27 +72,27 @@ Subgraphs are [queried using GraphQL](/subgraphs/querying/introduction/), and th ## Indexers (Earn GRT) -Indexers are the backbone of The Graph. They operate independent hardware and software powering The Graph’s decentralized network. Indexers serve data to consumers based on instructions from subgraphs. +Indexers are the backbone of The Graph. They operate independent hardware and software powering The Graph’s decentralized network. Indexers serve data to consumers based on instructions from Subgraphs. Indexers can earn GRT rewards in two ways: -1. **Query fees**: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). +1. **Query fees**: GRT paid by developers or users for Subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). -2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. +2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of Subgraphs they are indexing. These rewards incentivize Indexers to index Subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. -Each subgraph is allotted a portion of the total network token issuance, based on the amount of the subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the subgraph. +Each Subgraph is allotted a portion of the total network token issuance, based on the amount of the Subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the Subgraph. In order to run an indexing node, Indexers must self-stake 100,000 GRT or more with the network. Indexers are incentivized to self-stake GRT in proportion to the amount of queries they serve. -Indexers can increase their GRT allocations on subgraphs by accepting GRT delegation from Delegators, and they can accept up to 16 times their initial self-stake. If an Indexer becomes "over-delegated" (i.e., more than 16 times their initial self-stake), they will not be able to use the additional GRT from Delegators until they increase their self-stake in the network. +Indexers can increase their GRT allocations on Subgraphs by accepting GRT delegation from Delegators, and they can accept up to 16 times their initial self-stake. If an Indexer becomes "over-delegated" (i.e., more than 16 times their initial self-stake), they will not be able to use the additional GRT from Delegators until they increase their self-stake in the network. The amount of rewards an Indexer receives can vary based on the Indexer's self-stake, accepted delegation, quality of service, and many more factors. ## Token Supply: Burning & Issuance -The initial token supply is 10 billion GRT, with a target of 3% new issuance annually to reward Indexers for allocating stake on subgraphs. This means that the total supply of GRT tokens will increase by 3% each year as new tokens are issued to Indexers for their contribution to the network. +The initial token supply is 10 billion GRT, with a target of 3% new issuance annually to reward Indexers for allocating stake on Subgraphs. This means that the total supply of GRT tokens will increase by 3% each year as new tokens are issued to Indexers for their contribution to the network. -The Graph is designed with multiple burning mechanisms to offset new token issuance. Approximately 1% of the GRT supply is burned annually through various activities on the network, and this number has been increasing as network activity continues to grow. These burning activities include a 0.5% delegation tax whenever a Delegator delegates GRT to an Indexer, a 1% curation tax when Curators signal on a subgraph, and a 1% of query fees for blockchain data. +The Graph is designed with multiple burning mechanisms to offset new token issuance. Approximately 1% of the GRT supply is burned annually through various activities on the network, and this number has been increasing as network activity continues to grow. These burning activities include a 0.5% delegation tax whenever a Delegator delegates GRT to an Indexer, a 1% curation tax when Curators signal on a Subgraph, and a 1% of query fees for blockchain data. ![Total burned GRT](/img/total-burned-grt.jpeg) From 12f2dc8c79eeba9e0bfff7a9510217d53a4cd4bc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:17:34 -0500 Subject: [PATCH 0517/1789] New translations tokenomics.mdx (French) --- website/src/pages/fr/resources/tokenomics.mdx | 84 +++++++++---------- 1 file changed, 42 insertions(+), 42 deletions(-) diff --git a/website/src/pages/fr/resources/tokenomics.mdx b/website/src/pages/fr/resources/tokenomics.mdx index 27bbbee1af4d..7568b69ebd35 100644 --- a/website/src/pages/fr/resources/tokenomics.mdx +++ b/website/src/pages/fr/resources/tokenomics.mdx @@ -1,103 +1,103 @@ --- title: Les tokenomiques du réseau The Graph sidebarTitle: Tokenomics -description: The Graph Network is incentivized by powerful tokenomics. Here’s how GRT, The Graph’s native work utility token, works. +description: The Graph Network est encouragé par une puissante tokénomic. Voici comment fonctionne GRT, le jeton d'utilité de travail natif de The Graph. --- ## Aperçu -The Graph is a decentralized protocol that enables easy access to blockchain data. It indexes blockchain data similarly to how Google indexes the web. If you've used a dapp that retrieves data from a subgraph, you've probably interacted with The Graph. Today, thousands of [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem use The Graph. +The Graph is a decentralized protocol that enables easy access to blockchain data. It indexes blockchain data similarly to how Google indexes the web. If you've used a dapp that retrieves data from a Subgraph, you've probably interacted with The Graph. Today, thousands of [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem use The Graph. ## Spécificités⁠ -The Graph's model is akin to a B2B2C model, but it's driven by a decentralized network where participants collaborate to provide data to end users in exchange for GRT rewards. GRT is the utility token for The Graph. It coordinates and incentivizes the interaction between data providers and consumers within the network. +Le modèle de The Graph s'apparente à un modèle B2B2C, mais il est piloté par un réseau décentralisé où les participants collaborent pour fournir des données aux utilisateurs finaux en échange de récompenses GRT. GRT est le jeton d'utilité de The Graph. Il coordonne et encourage l'interaction entre les fournisseurs de données et les consommateurs au sein du réseau. -The Graph plays a vital role in making blockchain data more accessible and supports a marketplace for its exchange. To learn more about The Graph's pay-for-what-you-need model, check out its [free and growth plans](/subgraphs/billing/). +The Graph joue un rôle essentiel en rendant les données de la blockchain plus accessibles et en soutenant une marketplace pour leur échange. Pour en savoir plus sur le modèle de facturation de The Graph, consultez ses [plans gratuits et de croissance](/subgraphs/billing/). -- GRT Token Address on Mainnet: [0xc944e90c64b2c07662a292be6244bdf05cda44a7](https://etherscan.io/token/0xc944e90c64b2c07662a292be6244bdf05cda44a7) +- Adresse du jeton GRT sur le réseau principal : [0xc944e90c64b2c07662a292be6244bdf05cda44a7](https://etherscan.io/token/0xc944e90c64b2c07662a292be6244bdf05cda44a7) -- GRT Token Address on Arbitrum One: [0x9623063377AD1B27544C965cCd7342f7EA7e88C7](https://arbiscan.io/token/0x9623063377ad1b27544c965ccd7342f7ea7e88c7) +- Adresse du jeton GRT sur Arbitrum One : [0x9623063377AD1B27544C965cCd7342f7EA7e88C7](https://arbiscan.io/token/0x9623063377ad1b27544c965ccd7342f7ea7e88c7) ## Les rôles des participants au réseau -There are four primary network participants: +Les participants au réseau sont au nombre de quatre : -1. Delegators - Delegate GRT to Indexers & secure the network +1. Délégateurs - Délèguent des GRT aux Indexeurs & sécurisent le réseau -2. Curateurs - Trouver les meilleurs subgraphs pour les indexeurs +2. Curators - Find the best Subgraphs for Indexers -3. Developers - Build & query subgraphs +3. Developers - Build & query Subgraphs 4. Indexeurs - épine dorsale des données de la blockchain -Fishermen and Arbitrators are also integral to the network's success through other contributions, supporting the work of the other primary participant roles. For more information about network roles, [read this article](https://thegraph.com/blog/the-graph-grt-token-economics/). +Les Fishermen et les arbitres font également partie intégrante du succès du réseau grâce à d'autres contributions, soutenant le travail des autres participants principaux. Pour plus d'informations sur les rôles du réseau, [lire cet article](https://thegraph.com/blog/the-graph-grt-token-economics/). -![Tokenomics diagram](/img/updated-tokenomics-image.png) +![Diagramme de la tokenomic](/img/updated-tokenomics-image.png) -## Delegators (Passively earn GRT) +## Délégateurs (gagnent passivement des GRT) -Indexers are delegated GRT by Delegators, increasing the Indexer’s stake in subgraphs on the network. In return, Delegators earn a percentage of all query fees and indexing rewards from the Indexer. Each Indexer sets the cut that will be rewarded to Delegators independently, creating competition among Indexers to attract Delegators. Most Indexers offer between 9-12% annually. +Indexers are delegated GRT by Delegators, increasing the Indexer’s stake in Subgraphs on the network. In return, Delegators earn a percentage of all query fees and indexing rewards from the Indexer. Each Indexer sets the cut that will be rewarded to Delegators independently, creating competition among Indexers to attract Delegators. Most Indexers offer between 9-12% annually. -For example, if a Delegator were to delegate 15k GRT to an Indexer offering 10%, the Delegator would receive ~1,500 GRT in rewards annually. +Par exemple, si un Délégateur délègue 15 000 GRT à un Indexeur offrant 10 %, le Délégateur recevra environ 1 500 GRT de récompenses par an. -There is a 0.5% delegation tax which is burned whenever a Delegator delegates GRT on the network. If a Delegator chooses to withdraw their delegated GRT, the Delegator must wait for the 28-epoch unbonding period. Each epoch is 6,646 blocks, which means 28 epochs ends up being approximately 26 days. +Une taxe de délégation de 0,5 % est prélevée chaque fois qu'un Délégateur délègue des GRT sur le réseau. Si un Délégateur choisit de retirer les GRT qu'il a délégués, il doit attendre la période de déverrouillage de 28 époques. Chaque époque compte 6 646 blocs, ce qui signifie que 28 époques représentent environ 26 jours. -If you're reading this, you're capable of becoming a Delegator right now by heading to the [network participants page](https://thegraph.com/explorer/participants/indexers), and delegating GRT to an Indexer of your choice. +Si vous lisez ceci, vous pouvez devenir Délégateur dès maintenant en vous rendant sur la [page des participants au réseau](https://thegraph.com/explorer/participants/indexers), et en déléguant des GRT à un Indexeur de votre choix. ## Curateurs (Gagnez des GRT) -Curators identify high-quality subgraphs and "curate" them (i.e., signal GRT on them) to earn curation shares, which guarantee a percentage of all future query fees generated by the subgraph. While any independent network participant can be a Curator, typically subgraph developers are among the first Curators for their own subgraphs because they want to ensure their subgraph is indexed. +Curators identify high-quality Subgraphs and "curate" them (i.e., signal GRT on them) to earn curation shares, which guarantee a percentage of all future query fees generated by the Subgraph. While any independent network participant can be a Curator, typically Subgraph developers are among the first Curators for their own Subgraphs because they want to ensure their Subgraph is indexed. -Subgraph developers are encouraged to curate their subgraph with at least 3,000 GRT. However, this number may be impacted by network activity and community participation. +Subgraph developers are encouraged to curate their Subgraph with at least 3,000 GRT. However, this number may be impacted by network activity and community participation. -Curators pay a 1% curation tax when they curate a new subgraph. This curation tax is burned, decreasing the supply of GRT. +Curators pay a 1% curation tax when they curate a new Subgraph. This curation tax is burned, decreasing the supply of GRT. -## Developers +## Développeurs -Developers build and query subgraphs to retrieve blockchain data. Since subgraphs are open source, developers can query existing subgraphs to load blockchain data into their dapps. Developers pay for queries they make in GRT, which is distributed to network participants. +Developers build and query Subgraphs to retrieve blockchain data. Since Subgraphs are open source, developers can query existing Subgraphs to load blockchain data into their dapps. Developers pay for queries they make in GRT, which is distributed to network participants. -### Création d'un subgraph +### Creating a Subgraph -Developers can [create a subgraph](/developing/creating-a-subgraph/) to index data on the blockchain. Subgraphs are instructions for Indexers about which data should be served to consumers. +Developers can [create a Subgraph](/developing/creating-a-subgraph/) to index data on the blockchain. Subgraphs are instructions for Indexers about which data should be served to consumers. -Once developers have built and tested their subgraph, they can [publish their subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/) on The Graph's decentralized network. +Once developers have built and tested their Subgraph, they can [publish their Subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/) on The Graph's decentralized network. -### Interroger un subgraph existant +### Querying an existing Subgraph -Once a subgraph is [published](/subgraphs/developing/publishing/publishing-a-subgraph/) to The Graph's decentralized network, anyone can create an API key, add GRT to their billing balance, and query the subgraph. +Once a Subgraph is [published](/subgraphs/developing/publishing/publishing-a-subgraph/) to The Graph's decentralized network, anyone can create an API key, add GRT to their billing balance, and query the Subgraph. -Subgraphs are [queried using GraphQL](/subgraphs/querying/introduction/), and the query fees are paid for with GRT in [Subgraph Studio](https://thegraph.com/studio/). Query fees are distributed to network participants based on their contributions to the protocol. +Les subgraph sont [interrogés à l'aide de GraphQL](/subgraphs/querying/introduction/), et les frais d'interrogation sont payés avec des GRT dans [Subgraph Studio](https://thegraph.com/studio/). Les frais d'interrogation sont distribués aux participants au réseau en fonction de leur contribution au protocole. -1% of the query fees paid to the network are burned. +1% des frais de requête payés au réseau sont brûlés. -## Indexers (Earn GRT) +## Indexeurs (gagner des GRT) -Indexers are the backbone of The Graph. They operate independent hardware and software powering The Graph’s decentralized network. Indexers serve data to consumers based on instructions from subgraphs. +Indexers are the backbone of The Graph. They operate independent hardware and software powering The Graph’s decentralized network. Indexers serve data to consumers based on instructions from Subgraphs. Les Indexeurs peuvent gagner des récompenses en GRT de deux façons : -1. **Query fees**: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). +1. **Query fees**: GRT paid by developers or users for Subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). -2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. +2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of Subgraphs they are indexing. These rewards incentivize Indexers to index Subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. -Each subgraph is allotted a portion of the total network token issuance, based on the amount of the subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the subgraph. +Each Subgraph is allotted a portion of the total network token issuance, based on the amount of the Subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the Subgraph. -In order to run an indexing node, Indexers must self-stake 100,000 GRT or more with the network. Indexers are incentivized to self-stake GRT in proportion to the amount of queries they serve. +Pour faire fonctionner un nœud d'indexation, les Indexeurs doivent staker 100 000 GRT ou plus avec le réseau. Les Indexeurs sont incités à s'approprier des GRT proportionnellement au nombre de requêtes qu'ils traitent. -Indexers can increase their GRT allocations on subgraphs by accepting GRT delegation from Delegators, and they can accept up to 16 times their initial self-stake. If an Indexer becomes "over-delegated" (i.e., more than 16 times their initial self-stake), they will not be able to use the additional GRT from Delegators until they increase their self-stake in the network. +Indexers can increase their GRT allocations on Subgraphs by accepting GRT delegation from Delegators, and they can accept up to 16 times their initial self-stake. If an Indexer becomes "over-delegated" (i.e., more than 16 times their initial self-stake), they will not be able to use the additional GRT from Delegators until they increase their self-stake in the network. -The amount of rewards an Indexer receives can vary based on the Indexer's self-stake, accepted delegation, quality of service, and many more factors. +Le montant des récompenses reçues par un Indexeur peut varier en fonction du self-stake de l'indexeur, de la délégation acceptée, de la qualité du service et de nombreux autres facteurs. ## Token Supply : Incinération & Emission -The initial token supply is 10 billion GRT, with a target of 3% new issuance annually to reward Indexers for allocating stake on subgraphs. This means that the total supply of GRT tokens will increase by 3% each year as new tokens are issued to Indexers for their contribution to the network. +The initial token supply is 10 billion GRT, with a target of 3% new issuance annually to reward Indexers for allocating stake on Subgraphs. This means that the total supply of GRT tokens will increase by 3% each year as new tokens are issued to Indexers for their contribution to the network. -The Graph is designed with multiple burning mechanisms to offset new token issuance. Approximately 1% of the GRT supply is burned annually through various activities on the network, and this number has been increasing as network activity continues to grow. These burning activities include a 0.5% delegation tax whenever a Delegator delegates GRT to an Indexer, a 1% curation tax when Curators signal on a subgraph, and a 1% of query fees for blockchain data. +The Graph is designed with multiple burning mechanisms to offset new token issuance. Approximately 1% of the GRT supply is burned annually through various activities on the network, and this number has been increasing as network activity continues to grow. These burning activities include a 0.5% delegation tax whenever a Delegator delegates GRT to an Indexer, a 1% curation tax when Curators signal on a Subgraph, and a 1% of query fees for blockchain data. -![Total burned GRT](/img/total-burned-grt.jpeg) +![Total de GRT brûlés](/img/total-burned-grt.jpeg) -In addition to these regularly occurring burning activities, the GRT token also has a slashing mechanism in place to penalize malicious or irresponsible behavior by Indexers. If an Indexer is slashed, 50% of their indexing rewards for the epoch are burned (while the other half goes to the fisherman), and their self-stake is slashed by 2.5%, with half of this amount being burned. This helps to ensure that Indexers have a strong incentive to act in the best interests of the network and to contribute to its security and stability. +En plus de ces activités d'incinération régulières, le jeton GRT dispose également d'un mécanisme de réduction (slashing) pour pénaliser les comportements malveillants ou irresponsables des Indexeurs. Lorsqu'un Indexeur est sanctionné, 50 % de ses récompenses d'indexation pour l'époque sont brûlées (l'autre moitié est versée au fisherman), et sa participation personnelle est réduite de 2,5 %, la moitié de ce montant étant brûlée. Les Indexeurs sont ainsi fortement incités à agir dans l'intérêt du réseau et à contribuer à sa sécurité et à sa stabilité. ## Amélioration du protocole -The Graph Network is ever-evolving and improvements to the economic design of the protocol are constantly being made to provide the best experience for all network participants. The Graph Council oversees protocol changes and community members are encouraged to participate. Get involved with protocol improvements in [The Graph Forum](https://forum.thegraph.com/). +The Graph Network est en constante évolution et des améliorations sont constamment apportées à la conception économique du protocole afin d'offrir la meilleure expérience possible à tous les participants au réseau. The Graph Council supervise les modifications du protocole et les membres de la communauté sont encouragés à y participer. Participez aux améliorations du protocole sur [le Forum The Graph](https://forum.thegraph.com/). From 8fa1cea6dc593ca385161c3df46c48e29069f044 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:17:35 -0500 Subject: [PATCH 0518/1789] New translations tokenomics.mdx (Spanish) --- website/src/pages/es/resources/tokenomics.mdx | 40 +++++++++---------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/website/src/pages/es/resources/tokenomics.mdx b/website/src/pages/es/resources/tokenomics.mdx index cd30274637ea..a15d15155fd5 100644 --- a/website/src/pages/es/resources/tokenomics.mdx +++ b/website/src/pages/es/resources/tokenomics.mdx @@ -6,7 +6,7 @@ description: The Graph Network is incentivized by powerful tokenomics. Here’s ## Descripción -The Graph is a decentralized protocol that enables easy access to blockchain data. It indexes blockchain data similarly to how Google indexes the web. If you've used a dapp that retrieves data from a subgraph, you've probably interacted with The Graph. Today, thousands of [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem use The Graph. +The Graph is a decentralized protocol that enables easy access to blockchain data. It indexes blockchain data similarly to how Google indexes the web. If you've used a dapp that retrieves data from a Subgraph, you've probably interacted with The Graph. Today, thousands of [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem use The Graph. ## Specifics @@ -24,9 +24,9 @@ There are four primary network participants: 1. Delegators - Delegate GRT to Indexers & secure the network -2. Curadores - Encuentran los mejores subgrafos para los Indexadores +2. Curators - Find the best Subgraphs for Indexers -3. Developers - Build & query subgraphs +3. Developers - Build & query Subgraphs 4. Indexadores: Son la columna vertebral de los datos de la blockchain @@ -36,7 +36,7 @@ Fishermen and Arbitrators are also integral to the network's success through oth ## Delegators (Passively earn GRT) -Indexers are delegated GRT by Delegators, increasing the Indexer’s stake in subgraphs on the network. In return, Delegators earn a percentage of all query fees and indexing rewards from the Indexer. Each Indexer sets the cut that will be rewarded to Delegators independently, creating competition among Indexers to attract Delegators. Most Indexers offer between 9-12% annually. +Indexers are delegated GRT by Delegators, increasing the Indexer’s stake in Subgraphs on the network. In return, Delegators earn a percentage of all query fees and indexing rewards from the Indexer. Each Indexer sets the cut that will be rewarded to Delegators independently, creating competition among Indexers to attract Delegators. Most Indexers offer between 9-12% annually. For example, if a Delegator were to delegate 15k GRT to an Indexer offering 10%, the Delegator would receive ~1,500 GRT in rewards annually. @@ -46,25 +46,25 @@ If you're reading this, you're capable of becoming a Delegator right now by head ## Curators (Earn GRT) -Curators identify high-quality subgraphs and "curate" them (i.e., signal GRT on them) to earn curation shares, which guarantee a percentage of all future query fees generated by the subgraph. While any independent network participant can be a Curator, typically subgraph developers are among the first Curators for their own subgraphs because they want to ensure their subgraph is indexed. +Curators identify high-quality Subgraphs and "curate" them (i.e., signal GRT on them) to earn curation shares, which guarantee a percentage of all future query fees generated by the Subgraph. While any independent network participant can be a Curator, typically Subgraph developers are among the first Curators for their own Subgraphs because they want to ensure their Subgraph is indexed. -Subgraph developers are encouraged to curate their subgraph with at least 3,000 GRT. However, this number may be impacted by network activity and community participation. +Subgraph developers are encouraged to curate their Subgraph with at least 3,000 GRT. However, this number may be impacted by network activity and community participation. -Curators pay a 1% curation tax when they curate a new subgraph. This curation tax is burned, decreasing the supply of GRT. +Curators pay a 1% curation tax when they curate a new Subgraph. This curation tax is burned, decreasing the supply of GRT. ## Developers -Developers build and query subgraphs to retrieve blockchain data. Since subgraphs are open source, developers can query existing subgraphs to load blockchain data into their dapps. Developers pay for queries they make in GRT, which is distributed to network participants. +Developers build and query Subgraphs to retrieve blockchain data. Since Subgraphs are open source, developers can query existing Subgraphs to load blockchain data into their dapps. Developers pay for queries they make in GRT, which is distributed to network participants. -### Creación de un subgrafo +### Creating a Subgraph -Developers can [create a subgraph](/developing/creating-a-subgraph/) to index data on the blockchain. Subgraphs are instructions for Indexers about which data should be served to consumers. +Developers can [create a Subgraph](/developing/creating-a-subgraph/) to index data on the blockchain. Subgraphs are instructions for Indexers about which data should be served to consumers. -Once developers have built and tested their subgraph, they can [publish their subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/) on The Graph's decentralized network. +Once developers have built and tested their Subgraph, they can [publish their Subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/) on The Graph's decentralized network. -### Consulta de un subgrafo existente +### Querying an existing Subgraph -Once a subgraph is [published](/subgraphs/developing/publishing/publishing-a-subgraph/) to The Graph's decentralized network, anyone can create an API key, add GRT to their billing balance, and query the subgraph. +Once a Subgraph is [published](/subgraphs/developing/publishing/publishing-a-subgraph/) to The Graph's decentralized network, anyone can create an API key, add GRT to their billing balance, and query the Subgraph. Subgraphs are [queried using GraphQL](/subgraphs/querying/introduction/), and the query fees are paid for with GRT in [Subgraph Studio](https://thegraph.com/studio/). Query fees are distributed to network participants based on their contributions to the protocol. @@ -72,27 +72,27 @@ Subgraphs are [queried using GraphQL](/subgraphs/querying/introduction/), and th ## Indexers (Earn GRT) -Indexers are the backbone of The Graph. They operate independent hardware and software powering The Graph’s decentralized network. Indexers serve data to consumers based on instructions from subgraphs. +Indexers are the backbone of The Graph. They operate independent hardware and software powering The Graph’s decentralized network. Indexers serve data to consumers based on instructions from Subgraphs. Indexers can earn GRT rewards in two ways: -1. **Query fees**: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). +1. **Query fees**: GRT paid by developers or users for Subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). -2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. +2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of Subgraphs they are indexing. These rewards incentivize Indexers to index Subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. -Each subgraph is allotted a portion of the total network token issuance, based on the amount of the subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the subgraph. +Each Subgraph is allotted a portion of the total network token issuance, based on the amount of the Subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the Subgraph. In order to run an indexing node, Indexers must self-stake 100,000 GRT or more with the network. Indexers are incentivized to self-stake GRT in proportion to the amount of queries they serve. -Indexers can increase their GRT allocations on subgraphs by accepting GRT delegation from Delegators, and they can accept up to 16 times their initial self-stake. If an Indexer becomes "over-delegated" (i.e., more than 16 times their initial self-stake), they will not be able to use the additional GRT from Delegators until they increase their self-stake in the network. +Indexers can increase their GRT allocations on Subgraphs by accepting GRT delegation from Delegators, and they can accept up to 16 times their initial self-stake. If an Indexer becomes "over-delegated" (i.e., more than 16 times their initial self-stake), they will not be able to use the additional GRT from Delegators until they increase their self-stake in the network. The amount of rewards an Indexer receives can vary based on the Indexer's self-stake, accepted delegation, quality of service, and many more factors. ## Token Supply: Burning & Issuance -The initial token supply is 10 billion GRT, with a target of 3% new issuance annually to reward Indexers for allocating stake on subgraphs. This means that the total supply of GRT tokens will increase by 3% each year as new tokens are issued to Indexers for their contribution to the network. +The initial token supply is 10 billion GRT, with a target of 3% new issuance annually to reward Indexers for allocating stake on Subgraphs. This means that the total supply of GRT tokens will increase by 3% each year as new tokens are issued to Indexers for their contribution to the network. -The Graph is designed with multiple burning mechanisms to offset new token issuance. Approximately 1% of the GRT supply is burned annually through various activities on the network, and this number has been increasing as network activity continues to grow. These burning activities include a 0.5% delegation tax whenever a Delegator delegates GRT to an Indexer, a 1% curation tax when Curators signal on a subgraph, and a 1% of query fees for blockchain data. +The Graph is designed with multiple burning mechanisms to offset new token issuance. Approximately 1% of the GRT supply is burned annually through various activities on the network, and this number has been increasing as network activity continues to grow. These burning activities include a 0.5% delegation tax whenever a Delegator delegates GRT to an Indexer, a 1% curation tax when Curators signal on a Subgraph, and a 1% of query fees for blockchain data. ![Total burned GRT](/img/total-burned-grt.jpeg) From 7a6cb93431dc6a4e156179d5252a27c4b960795a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:17:36 -0500 Subject: [PATCH 0519/1789] New translations tokenomics.mdx (Arabic) --- website/src/pages/ar/resources/tokenomics.mdx | 40 +++++++++---------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/website/src/pages/ar/resources/tokenomics.mdx b/website/src/pages/ar/resources/tokenomics.mdx index 511af057534f..fa0f098b22c8 100644 --- a/website/src/pages/ar/resources/tokenomics.mdx +++ b/website/src/pages/ar/resources/tokenomics.mdx @@ -6,7 +6,7 @@ description: The Graph Network is incentivized by powerful tokenomics. Here’s ## نظره عامة -The Graph is a decentralized protocol that enables easy access to blockchain data. It indexes blockchain data similarly to how Google indexes the web. If you've used a dapp that retrieves data from a subgraph, you've probably interacted with The Graph. Today, thousands of [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem use The Graph. +The Graph is a decentralized protocol that enables easy access to blockchain data. It indexes blockchain data similarly to how Google indexes the web. If you've used a dapp that retrieves data from a Subgraph, you've probably interacted with The Graph. Today, thousands of [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem use The Graph. ## Specifics @@ -24,9 +24,9 @@ There are four primary network participants: 1. Delegators - Delegate GRT to Indexers & secure the network -2. المنسقون (Curators) - يبحثون عن أفضل subgraphs للمفهرسين +2. Curators - Find the best Subgraphs for Indexers -3. Developers - Build & query subgraphs +3. Developers - Build & query Subgraphs 4. المفهرسون (Indexers) - العمود الفقري لبيانات blockchain @@ -36,7 +36,7 @@ Fishermen and Arbitrators are also integral to the network's success through oth ## Delegators (Passively earn GRT) -Indexers are delegated GRT by Delegators, increasing the Indexer’s stake in subgraphs on the network. In return, Delegators earn a percentage of all query fees and indexing rewards from the Indexer. Each Indexer sets the cut that will be rewarded to Delegators independently, creating competition among Indexers to attract Delegators. Most Indexers offer between 9-12% annually. +Indexers are delegated GRT by Delegators, increasing the Indexer’s stake in Subgraphs on the network. In return, Delegators earn a percentage of all query fees and indexing rewards from the Indexer. Each Indexer sets the cut that will be rewarded to Delegators independently, creating competition among Indexers to attract Delegators. Most Indexers offer between 9-12% annually. For example, if a Delegator were to delegate 15k GRT to an Indexer offering 10%, the Delegator would receive ~1,500 GRT in rewards annually. @@ -46,25 +46,25 @@ If you're reading this, you're capable of becoming a Delegator right now by head ## Curators (Earn GRT) -Curators identify high-quality subgraphs and "curate" them (i.e., signal GRT on them) to earn curation shares, which guarantee a percentage of all future query fees generated by the subgraph. While any independent network participant can be a Curator, typically subgraph developers are among the first Curators for their own subgraphs because they want to ensure their subgraph is indexed. +Curators identify high-quality Subgraphs and "curate" them (i.e., signal GRT on them) to earn curation shares, which guarantee a percentage of all future query fees generated by the Subgraph. While any independent network participant can be a Curator, typically Subgraph developers are among the first Curators for their own Subgraphs because they want to ensure their Subgraph is indexed. -Subgraph developers are encouraged to curate their subgraph with at least 3,000 GRT. However, this number may be impacted by network activity and community participation. +Subgraph developers are encouraged to curate their Subgraph with at least 3,000 GRT. However, this number may be impacted by network activity and community participation. -Curators pay a 1% curation tax when they curate a new subgraph. This curation tax is burned, decreasing the supply of GRT. +Curators pay a 1% curation tax when they curate a new Subgraph. This curation tax is burned, decreasing the supply of GRT. ## Developers -Developers build and query subgraphs to retrieve blockchain data. Since subgraphs are open source, developers can query existing subgraphs to load blockchain data into their dapps. Developers pay for queries they make in GRT, which is distributed to network participants. +Developers build and query Subgraphs to retrieve blockchain data. Since Subgraphs are open source, developers can query existing Subgraphs to load blockchain data into their dapps. Developers pay for queries they make in GRT, which is distributed to network participants. -### إنشاء subgraph +### Creating a Subgraph -Developers can [create a subgraph](/developing/creating-a-subgraph/) to index data on the blockchain. Subgraphs are instructions for Indexers about which data should be served to consumers. +Developers can [create a Subgraph](/developing/creating-a-subgraph/) to index data on the blockchain. Subgraphs are instructions for Indexers about which data should be served to consumers. -Once developers have built and tested their subgraph, they can [publish their subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/) on The Graph's decentralized network. +Once developers have built and tested their Subgraph, they can [publish their Subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/) on The Graph's decentralized network. -### الاستعلام عن subgraph موجود +### Querying an existing Subgraph -Once a subgraph is [published](/subgraphs/developing/publishing/publishing-a-subgraph/) to The Graph's decentralized network, anyone can create an API key, add GRT to their billing balance, and query the subgraph. +Once a Subgraph is [published](/subgraphs/developing/publishing/publishing-a-subgraph/) to The Graph's decentralized network, anyone can create an API key, add GRT to their billing balance, and query the Subgraph. Subgraphs are [queried using GraphQL](/subgraphs/querying/introduction/), and the query fees are paid for with GRT in [Subgraph Studio](https://thegraph.com/studio/). Query fees are distributed to network participants based on their contributions to the protocol. @@ -72,27 +72,27 @@ Subgraphs are [queried using GraphQL](/subgraphs/querying/introduction/), and th ## Indexers (Earn GRT) -Indexers are the backbone of The Graph. They operate independent hardware and software powering The Graph’s decentralized network. Indexers serve data to consumers based on instructions from subgraphs. +Indexers are the backbone of The Graph. They operate independent hardware and software powering The Graph’s decentralized network. Indexers serve data to consumers based on instructions from Subgraphs. Indexers can earn GRT rewards in two ways: -1. **Query fees**: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). +1. **Query fees**: GRT paid by developers or users for Subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). -2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. +2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of Subgraphs they are indexing. These rewards incentivize Indexers to index Subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. -Each subgraph is allotted a portion of the total network token issuance, based on the amount of the subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the subgraph. +Each Subgraph is allotted a portion of the total network token issuance, based on the amount of the Subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the Subgraph. In order to run an indexing node, Indexers must self-stake 100,000 GRT or more with the network. Indexers are incentivized to self-stake GRT in proportion to the amount of queries they serve. -Indexers can increase their GRT allocations on subgraphs by accepting GRT delegation from Delegators, and they can accept up to 16 times their initial self-stake. If an Indexer becomes "over-delegated" (i.e., more than 16 times their initial self-stake), they will not be able to use the additional GRT from Delegators until they increase their self-stake in the network. +Indexers can increase their GRT allocations on Subgraphs by accepting GRT delegation from Delegators, and they can accept up to 16 times their initial self-stake. If an Indexer becomes "over-delegated" (i.e., more than 16 times their initial self-stake), they will not be able to use the additional GRT from Delegators until they increase their self-stake in the network. The amount of rewards an Indexer receives can vary based on the Indexer's self-stake, accepted delegation, quality of service, and many more factors. ## Token Supply: Burning & Issuance -The initial token supply is 10 billion GRT, with a target of 3% new issuance annually to reward Indexers for allocating stake on subgraphs. This means that the total supply of GRT tokens will increase by 3% each year as new tokens are issued to Indexers for their contribution to the network. +The initial token supply is 10 billion GRT, with a target of 3% new issuance annually to reward Indexers for allocating stake on Subgraphs. This means that the total supply of GRT tokens will increase by 3% each year as new tokens are issued to Indexers for their contribution to the network. -The Graph is designed with multiple burning mechanisms to offset new token issuance. Approximately 1% of the GRT supply is burned annually through various activities on the network, and this number has been increasing as network activity continues to grow. These burning activities include a 0.5% delegation tax whenever a Delegator delegates GRT to an Indexer, a 1% curation tax when Curators signal on a subgraph, and a 1% of query fees for blockchain data. +The Graph is designed with multiple burning mechanisms to offset new token issuance. Approximately 1% of the GRT supply is burned annually through various activities on the network, and this number has been increasing as network activity continues to grow. These burning activities include a 0.5% delegation tax whenever a Delegator delegates GRT to an Indexer, a 1% curation tax when Curators signal on a Subgraph, and a 1% of query fees for blockchain data. ![Total burned GRT](/img/total-burned-grt.jpeg) From ee324a11c6ddf382787caffd2ef458a8203749e8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:17:37 -0500 Subject: [PATCH 0520/1789] New translations tokenomics.mdx (Czech) --- website/src/pages/cs/resources/tokenomics.mdx | 40 +++++++++---------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/website/src/pages/cs/resources/tokenomics.mdx b/website/src/pages/cs/resources/tokenomics.mdx index 92b1514574b4..66eefd5b8b1a 100644 --- a/website/src/pages/cs/resources/tokenomics.mdx +++ b/website/src/pages/cs/resources/tokenomics.mdx @@ -6,7 +6,7 @@ description: The Graph Network is incentivized by powerful tokenomics. Here’s ## Přehled -The Graph is a decentralized protocol that enables easy access to blockchain data. It indexes blockchain data similarly to how Google indexes the web. If you've used a dapp that retrieves data from a subgraph, you've probably interacted with The Graph. Today, thousands of [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem use The Graph. +The Graph is a decentralized protocol that enables easy access to blockchain data. It indexes blockchain data similarly to how Google indexes the web. If you've used a dapp that retrieves data from a Subgraph, you've probably interacted with The Graph. Today, thousands of [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem use The Graph. ## Specifics @@ -24,9 +24,9 @@ There are four primary network participants: 1. Delegators - Delegate GRT to Indexers & secure the network -2. Kurátoři - nalezení nejlepších podgrafů pro indexátory +2. Curators - Find the best Subgraphs for Indexers -3. Developers - Build & query subgraphs +3. Developers - Build & query Subgraphs 4. Indexery - páteř blockchainových dat @@ -36,7 +36,7 @@ Fishermen and Arbitrators are also integral to the network's success through oth ## Delegators (Passively earn GRT) -Indexers are delegated GRT by Delegators, increasing the Indexer’s stake in subgraphs on the network. In return, Delegators earn a percentage of all query fees and indexing rewards from the Indexer. Each Indexer sets the cut that will be rewarded to Delegators independently, creating competition among Indexers to attract Delegators. Most Indexers offer between 9-12% annually. +Indexers are delegated GRT by Delegators, increasing the Indexer’s stake in Subgraphs on the network. In return, Delegators earn a percentage of all query fees and indexing rewards from the Indexer. Each Indexer sets the cut that will be rewarded to Delegators independently, creating competition among Indexers to attract Delegators. Most Indexers offer between 9-12% annually. For example, if a Delegator were to delegate 15k GRT to an Indexer offering 10%, the Delegator would receive ~1,500 GRT in rewards annually. @@ -46,25 +46,25 @@ If you're reading this, you're capable of becoming a Delegator right now by head ## Curators (Earn GRT) -Curators identify high-quality subgraphs and "curate" them (i.e., signal GRT on them) to earn curation shares, which guarantee a percentage of all future query fees generated by the subgraph. While any independent network participant can be a Curator, typically subgraph developers are among the first Curators for their own subgraphs because they want to ensure their subgraph is indexed. +Curators identify high-quality Subgraphs and "curate" them (i.e., signal GRT on them) to earn curation shares, which guarantee a percentage of all future query fees generated by the Subgraph. While any independent network participant can be a Curator, typically Subgraph developers are among the first Curators for their own Subgraphs because they want to ensure their Subgraph is indexed. -Subgraph developers are encouraged to curate their subgraph with at least 3,000 GRT. However, this number may be impacted by network activity and community participation. +Subgraph developers are encouraged to curate their Subgraph with at least 3,000 GRT. However, this number may be impacted by network activity and community participation. -Curators pay a 1% curation tax when they curate a new subgraph. This curation tax is burned, decreasing the supply of GRT. +Curators pay a 1% curation tax when they curate a new Subgraph. This curation tax is burned, decreasing the supply of GRT. ## Developers -Developers build and query subgraphs to retrieve blockchain data. Since subgraphs are open source, developers can query existing subgraphs to load blockchain data into their dapps. Developers pay for queries they make in GRT, which is distributed to network participants. +Developers build and query Subgraphs to retrieve blockchain data. Since Subgraphs are open source, developers can query existing Subgraphs to load blockchain data into their dapps. Developers pay for queries they make in GRT, which is distributed to network participants. -### Vytvoření podgrafu +### Creating a Subgraph -Developers can [create a subgraph](/developing/creating-a-subgraph/) to index data on the blockchain. Subgraphs are instructions for Indexers about which data should be served to consumers. +Developers can [create a Subgraph](/developing/creating-a-subgraph/) to index data on the blockchain. Subgraphs are instructions for Indexers about which data should be served to consumers. -Once developers have built and tested their subgraph, they can [publish their subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/) on The Graph's decentralized network. +Once developers have built and tested their Subgraph, they can [publish their Subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/) on The Graph's decentralized network. -### Dotazování na existující podgraf +### Querying an existing Subgraph -Once a subgraph is [published](/subgraphs/developing/publishing/publishing-a-subgraph/) to The Graph's decentralized network, anyone can create an API key, add GRT to their billing balance, and query the subgraph. +Once a Subgraph is [published](/subgraphs/developing/publishing/publishing-a-subgraph/) to The Graph's decentralized network, anyone can create an API key, add GRT to their billing balance, and query the Subgraph. Subgraphs are [queried using GraphQL](/subgraphs/querying/introduction/), and the query fees are paid for with GRT in [Subgraph Studio](https://thegraph.com/studio/). Query fees are distributed to network participants based on their contributions to the protocol. @@ -72,27 +72,27 @@ Subgraphs are [queried using GraphQL](/subgraphs/querying/introduction/), and th ## Indexers (Earn GRT) -Indexers are the backbone of The Graph. They operate independent hardware and software powering The Graph’s decentralized network. Indexers serve data to consumers based on instructions from subgraphs. +Indexers are the backbone of The Graph. They operate independent hardware and software powering The Graph’s decentralized network. Indexers serve data to consumers based on instructions from Subgraphs. Indexers can earn GRT rewards in two ways: -1. **Query fees**: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). +1. **Query fees**: GRT paid by developers or users for Subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). -2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. +2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of Subgraphs they are indexing. These rewards incentivize Indexers to index Subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. -Each subgraph is allotted a portion of the total network token issuance, based on the amount of the subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the subgraph. +Each Subgraph is allotted a portion of the total network token issuance, based on the amount of the Subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the Subgraph. In order to run an indexing node, Indexers must self-stake 100,000 GRT or more with the network. Indexers are incentivized to self-stake GRT in proportion to the amount of queries they serve. -Indexers can increase their GRT allocations on subgraphs by accepting GRT delegation from Delegators, and they can accept up to 16 times their initial self-stake. If an Indexer becomes "over-delegated" (i.e., more than 16 times their initial self-stake), they will not be able to use the additional GRT from Delegators until they increase their self-stake in the network. +Indexers can increase their GRT allocations on Subgraphs by accepting GRT delegation from Delegators, and they can accept up to 16 times their initial self-stake. If an Indexer becomes "over-delegated" (i.e., more than 16 times their initial self-stake), they will not be able to use the additional GRT from Delegators until they increase their self-stake in the network. The amount of rewards an Indexer receives can vary based on the Indexer's self-stake, accepted delegation, quality of service, and many more factors. ## Token Supply: Burning & Issuance -The initial token supply is 10 billion GRT, with a target of 3% new issuance annually to reward Indexers for allocating stake on subgraphs. This means that the total supply of GRT tokens will increase by 3% each year as new tokens are issued to Indexers for their contribution to the network. +The initial token supply is 10 billion GRT, with a target of 3% new issuance annually to reward Indexers for allocating stake on Subgraphs. This means that the total supply of GRT tokens will increase by 3% each year as new tokens are issued to Indexers for their contribution to the network. -The Graph is designed with multiple burning mechanisms to offset new token issuance. Approximately 1% of the GRT supply is burned annually through various activities on the network, and this number has been increasing as network activity continues to grow. These burning activities include a 0.5% delegation tax whenever a Delegator delegates GRT to an Indexer, a 1% curation tax when Curators signal on a subgraph, and a 1% of query fees for blockchain data. +The Graph is designed with multiple burning mechanisms to offset new token issuance. Approximately 1% of the GRT supply is burned annually through various activities on the network, and this number has been increasing as network activity continues to grow. These burning activities include a 0.5% delegation tax whenever a Delegator delegates GRT to an Indexer, a 1% curation tax when Curators signal on a Subgraph, and a 1% of query fees for blockchain data. ![Total burned GRT](/img/total-burned-grt.jpeg) From 7f8846ca1eab74011efcdcbbbca6afca51eb8361 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:17:38 -0500 Subject: [PATCH 0521/1789] New translations tokenomics.mdx (German) --- website/src/pages/de/resources/tokenomics.mdx | 40 +++++++++---------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/website/src/pages/de/resources/tokenomics.mdx b/website/src/pages/de/resources/tokenomics.mdx index 3dd13eb7d06a..cfed7fc5c416 100644 --- a/website/src/pages/de/resources/tokenomics.mdx +++ b/website/src/pages/de/resources/tokenomics.mdx @@ -6,7 +6,7 @@ description: The Graph Network is incentivized by powerful tokenomics. Here’s ## Überblick -The Graph is a decentralized protocol that enables easy access to blockchain data. It indexes blockchain data similarly to how Google indexes the web. If you've used a dapp that retrieves data from a subgraph, you've probably interacted with The Graph. Today, thousands of [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem use The Graph. +The Graph is a decentralized protocol that enables easy access to blockchain data. It indexes blockchain data similarly to how Google indexes the web. If you've used a dapp that retrieves data from a Subgraph, you've probably interacted with The Graph. Today, thousands of [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem use The Graph. ## Besonderheiten @@ -24,9 +24,9 @@ There are four primary network participants: 1. Delegators - Delegate GRT to Indexers & secure the network -2. Kuratoren - Finden Sie die besten Untergraphen für Indexer +2. Curators - Find the best Subgraphs for Indexers -3. Developers - Build & query subgraphs +3. Developers - Build & query Subgraphs 4. Indexer - Das Rückgrat der Blockchain-Daten @@ -36,7 +36,7 @@ Fishermen and Arbitrators are also integral to the network's success through oth ## Delegators (Passively earn GRT) -Indexers are delegated GRT by Delegators, increasing the Indexer’s stake in subgraphs on the network. In return, Delegators earn a percentage of all query fees and indexing rewards from the Indexer. Each Indexer sets the cut that will be rewarded to Delegators independently, creating competition among Indexers to attract Delegators. Most Indexers offer between 9-12% annually. +Indexers are delegated GRT by Delegators, increasing the Indexer’s stake in Subgraphs on the network. In return, Delegators earn a percentage of all query fees and indexing rewards from the Indexer. Each Indexer sets the cut that will be rewarded to Delegators independently, creating competition among Indexers to attract Delegators. Most Indexers offer between 9-12% annually. For example, if a Delegator were to delegate 15k GRT to an Indexer offering 10%, the Delegator would receive ~1,500 GRT in rewards annually. @@ -46,25 +46,25 @@ If you're reading this, you're capable of becoming a Delegator right now by head ## Curators (Earn GRT) -Curators identify high-quality subgraphs and "curate" them (i.e., signal GRT on them) to earn curation shares, which guarantee a percentage of all future query fees generated by the subgraph. While any independent network participant can be a Curator, typically subgraph developers are among the first Curators for their own subgraphs because they want to ensure their subgraph is indexed. +Curators identify high-quality Subgraphs and "curate" them (i.e., signal GRT on them) to earn curation shares, which guarantee a percentage of all future query fees generated by the Subgraph. While any independent network participant can be a Curator, typically Subgraph developers are among the first Curators for their own Subgraphs because they want to ensure their Subgraph is indexed. -Subgraph developers are encouraged to curate their subgraph with at least 3,000 GRT. However, this number may be impacted by network activity and community participation. +Subgraph developers are encouraged to curate their Subgraph with at least 3,000 GRT. However, this number may be impacted by network activity and community participation. -Curators pay a 1% curation tax when they curate a new subgraph. This curation tax is burned, decreasing the supply of GRT. +Curators pay a 1% curation tax when they curate a new Subgraph. This curation tax is burned, decreasing the supply of GRT. ## Developers -Developers build and query subgraphs to retrieve blockchain data. Since subgraphs are open source, developers can query existing subgraphs to load blockchain data into their dapps. Developers pay for queries they make in GRT, which is distributed to network participants. +Developers build and query Subgraphs to retrieve blockchain data. Since Subgraphs are open source, developers can query existing Subgraphs to load blockchain data into their dapps. Developers pay for queries they make in GRT, which is distributed to network participants. -### Erstellung eines Untergraphen +### Creating a Subgraph -Developers can [create a subgraph](/developing/creating-a-subgraph/) to index data on the blockchain. Subgraphs are instructions for Indexers about which data should be served to consumers. +Developers can [create a Subgraph](/developing/creating-a-subgraph/) to index data on the blockchain. Subgraphs are instructions for Indexers about which data should be served to consumers. -Once developers have built and tested their subgraph, they can [publish their subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/) on The Graph's decentralized network. +Once developers have built and tested their Subgraph, they can [publish their Subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/) on The Graph's decentralized network. -### Abfrage eines vorhandenen Untergraphen +### Querying an existing Subgraph -Once a subgraph is [published](/subgraphs/developing/publishing/publishing-a-subgraph/) to The Graph's decentralized network, anyone can create an API key, add GRT to their billing balance, and query the subgraph. +Once a Subgraph is [published](/subgraphs/developing/publishing/publishing-a-subgraph/) to The Graph's decentralized network, anyone can create an API key, add GRT to their billing balance, and query the Subgraph. Subgraphs are [queried using GraphQL](/subgraphs/querying/introduction/), and the query fees are paid for with GRT in [Subgraph Studio](https://thegraph.com/studio/). Query fees are distributed to network participants based on their contributions to the protocol. @@ -72,27 +72,27 @@ Subgraphs are [queried using GraphQL](/subgraphs/querying/introduction/), and th ## Indexers (Earn GRT) -Indexers are the backbone of The Graph. They operate independent hardware and software powering The Graph’s decentralized network. Indexers serve data to consumers based on instructions from subgraphs. +Indexers are the backbone of The Graph. They operate independent hardware and software powering The Graph’s decentralized network. Indexers serve data to consumers based on instructions from Subgraphs. Indexers can earn GRT rewards in two ways: -1. **Query fees**: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). +1. **Query fees**: GRT paid by developers or users for Subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). -2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. +2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of Subgraphs they are indexing. These rewards incentivize Indexers to index Subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. -Each subgraph is allotted a portion of the total network token issuance, based on the amount of the subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the subgraph. +Each Subgraph is allotted a portion of the total network token issuance, based on the amount of the Subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the Subgraph. In order to run an indexing node, Indexers must self-stake 100,000 GRT or more with the network. Indexers are incentivized to self-stake GRT in proportion to the amount of queries they serve. -Indexers can increase their GRT allocations on subgraphs by accepting GRT delegation from Delegators, and they can accept up to 16 times their initial self-stake. If an Indexer becomes "over-delegated" (i.e., more than 16 times their initial self-stake), they will not be able to use the additional GRT from Delegators until they increase their self-stake in the network. +Indexers can increase their GRT allocations on Subgraphs by accepting GRT delegation from Delegators, and they can accept up to 16 times their initial self-stake. If an Indexer becomes "over-delegated" (i.e., more than 16 times their initial self-stake), they will not be able to use the additional GRT from Delegators until they increase their self-stake in the network. The amount of rewards an Indexer receives can vary based on the Indexer's self-stake, accepted delegation, quality of service, and many more factors. ## Token Supply: Burning & Issuance -The initial token supply is 10 billion GRT, with a target of 3% new issuance annually to reward Indexers for allocating stake on subgraphs. This means that the total supply of GRT tokens will increase by 3% each year as new tokens are issued to Indexers for their contribution to the network. +The initial token supply is 10 billion GRT, with a target of 3% new issuance annually to reward Indexers for allocating stake on Subgraphs. This means that the total supply of GRT tokens will increase by 3% each year as new tokens are issued to Indexers for their contribution to the network. -The Graph is designed with multiple burning mechanisms to offset new token issuance. Approximately 1% of the GRT supply is burned annually through various activities on the network, and this number has been increasing as network activity continues to grow. These burning activities include a 0.5% delegation tax whenever a Delegator delegates GRT to an Indexer, a 1% curation tax when Curators signal on a subgraph, and a 1% of query fees for blockchain data. +The Graph is designed with multiple burning mechanisms to offset new token issuance. Approximately 1% of the GRT supply is burned annually through various activities on the network, and this number has been increasing as network activity continues to grow. These burning activities include a 0.5% delegation tax whenever a Delegator delegates GRT to an Indexer, a 1% curation tax when Curators signal on a Subgraph, and a 1% of query fees for blockchain data. ![Total burned GRT](/img/total-burned-grt.jpeg) From a5126042eed104d0d647d9aeecec6c93e4337783 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:17:39 -0500 Subject: [PATCH 0522/1789] New translations tokenomics.mdx (Italian) --- website/src/pages/it/resources/tokenomics.mdx | 40 +++++++++---------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/website/src/pages/it/resources/tokenomics.mdx b/website/src/pages/it/resources/tokenomics.mdx index c342b803f911..c869fcb1a9da 100644 --- a/website/src/pages/it/resources/tokenomics.mdx +++ b/website/src/pages/it/resources/tokenomics.mdx @@ -6,7 +6,7 @@ description: The Graph Network is incentivized by powerful tokenomics. Here’s ## Panoramica -The Graph is a decentralized protocol that enables easy access to blockchain data. It indexes blockchain data similarly to how Google indexes the web. If you've used a dapp that retrieves data from a subgraph, you've probably interacted with The Graph. Today, thousands of [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem use The Graph. +The Graph is a decentralized protocol that enables easy access to blockchain data. It indexes blockchain data similarly to how Google indexes the web. If you've used a dapp that retrieves data from a Subgraph, you've probably interacted with The Graph. Today, thousands of [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem use The Graph. ## Specifics @@ -24,9 +24,9 @@ There are four primary network participants: 1. Delegators - Delegate GRT to Indexers & secure the network -2. Curator - Trovare i migliori subgraph per gli Indexer +2. Curators - Find the best Subgraphs for Indexers -3. Developers - Build & query subgraphs +3. Developers - Build & query Subgraphs 4. Indexer - Struttura portante dei dati della blockchain @@ -36,7 +36,7 @@ Fishermen and Arbitrators are also integral to the network's success through oth ## Delegators (Passively earn GRT) -Indexers are delegated GRT by Delegators, increasing the Indexer’s stake in subgraphs on the network. In return, Delegators earn a percentage of all query fees and indexing rewards from the Indexer. Each Indexer sets the cut that will be rewarded to Delegators independently, creating competition among Indexers to attract Delegators. Most Indexers offer between 9-12% annually. +Indexers are delegated GRT by Delegators, increasing the Indexer’s stake in Subgraphs on the network. In return, Delegators earn a percentage of all query fees and indexing rewards from the Indexer. Each Indexer sets the cut that will be rewarded to Delegators independently, creating competition among Indexers to attract Delegators. Most Indexers offer between 9-12% annually. For example, if a Delegator were to delegate 15k GRT to an Indexer offering 10%, the Delegator would receive ~1,500 GRT in rewards annually. @@ -46,25 +46,25 @@ If you're reading this, you're capable of becoming a Delegator right now by head ## Curators (Earn GRT) -Curators identify high-quality subgraphs and "curate" them (i.e., signal GRT on them) to earn curation shares, which guarantee a percentage of all future query fees generated by the subgraph. While any independent network participant can be a Curator, typically subgraph developers are among the first Curators for their own subgraphs because they want to ensure their subgraph is indexed. +Curators identify high-quality Subgraphs and "curate" them (i.e., signal GRT on them) to earn curation shares, which guarantee a percentage of all future query fees generated by the Subgraph. While any independent network participant can be a Curator, typically Subgraph developers are among the first Curators for their own Subgraphs because they want to ensure their Subgraph is indexed. -Subgraph developers are encouraged to curate their subgraph with at least 3,000 GRT. However, this number may be impacted by network activity and community participation. +Subgraph developers are encouraged to curate their Subgraph with at least 3,000 GRT. However, this number may be impacted by network activity and community participation. -Curators pay a 1% curation tax when they curate a new subgraph. This curation tax is burned, decreasing the supply of GRT. +Curators pay a 1% curation tax when they curate a new Subgraph. This curation tax is burned, decreasing the supply of GRT. ## Developers -Developers build and query subgraphs to retrieve blockchain data. Since subgraphs are open source, developers can query existing subgraphs to load blockchain data into their dapps. Developers pay for queries they make in GRT, which is distributed to network participants. +Developers build and query Subgraphs to retrieve blockchain data. Since Subgraphs are open source, developers can query existing Subgraphs to load blockchain data into their dapps. Developers pay for queries they make in GRT, which is distributed to network participants. -### Creare un subgraph +### Creating a Subgraph -Developers can [create a subgraph](/developing/creating-a-subgraph/) to index data on the blockchain. Subgraphs are instructions for Indexers about which data should be served to consumers. +Developers can [create a Subgraph](/developing/creating-a-subgraph/) to index data on the blockchain. Subgraphs are instructions for Indexers about which data should be served to consumers. -Once developers have built and tested their subgraph, they can [publish their subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/) on The Graph's decentralized network. +Once developers have built and tested their Subgraph, they can [publish their Subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/) on The Graph's decentralized network. -### Eseguire query di un subgraph esistente +### Querying an existing Subgraph -Once a subgraph is [published](/subgraphs/developing/publishing/publishing-a-subgraph/) to The Graph's decentralized network, anyone can create an API key, add GRT to their billing balance, and query the subgraph. +Once a Subgraph is [published](/subgraphs/developing/publishing/publishing-a-subgraph/) to The Graph's decentralized network, anyone can create an API key, add GRT to their billing balance, and query the Subgraph. Subgraphs are [queried using GraphQL](/subgraphs/querying/introduction/), and the query fees are paid for with GRT in [Subgraph Studio](https://thegraph.com/studio/). Query fees are distributed to network participants based on their contributions to the protocol. @@ -72,27 +72,27 @@ Subgraphs are [queried using GraphQL](/subgraphs/querying/introduction/), and th ## Indexers (Earn GRT) -Indexers are the backbone of The Graph. They operate independent hardware and software powering The Graph’s decentralized network. Indexers serve data to consumers based on instructions from subgraphs. +Indexers are the backbone of The Graph. They operate independent hardware and software powering The Graph’s decentralized network. Indexers serve data to consumers based on instructions from Subgraphs. Indexers can earn GRT rewards in two ways: -1. **Query fees**: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). +1. **Query fees**: GRT paid by developers or users for Subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). -2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. +2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of Subgraphs they are indexing. These rewards incentivize Indexers to index Subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. -Each subgraph is allotted a portion of the total network token issuance, based on the amount of the subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the subgraph. +Each Subgraph is allotted a portion of the total network token issuance, based on the amount of the Subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the Subgraph. In order to run an indexing node, Indexers must self-stake 100,000 GRT or more with the network. Indexers are incentivized to self-stake GRT in proportion to the amount of queries they serve. -Indexers can increase their GRT allocations on subgraphs by accepting GRT delegation from Delegators, and they can accept up to 16 times their initial self-stake. If an Indexer becomes "over-delegated" (i.e., more than 16 times their initial self-stake), they will not be able to use the additional GRT from Delegators until they increase their self-stake in the network. +Indexers can increase their GRT allocations on Subgraphs by accepting GRT delegation from Delegators, and they can accept up to 16 times their initial self-stake. If an Indexer becomes "over-delegated" (i.e., more than 16 times their initial self-stake), they will not be able to use the additional GRT from Delegators until they increase their self-stake in the network. The amount of rewards an Indexer receives can vary based on the Indexer's self-stake, accepted delegation, quality of service, and many more factors. ## Token Supply: Burning & Issuance -The initial token supply is 10 billion GRT, with a target of 3% new issuance annually to reward Indexers for allocating stake on subgraphs. This means that the total supply of GRT tokens will increase by 3% each year as new tokens are issued to Indexers for their contribution to the network. +The initial token supply is 10 billion GRT, with a target of 3% new issuance annually to reward Indexers for allocating stake on Subgraphs. This means that the total supply of GRT tokens will increase by 3% each year as new tokens are issued to Indexers for their contribution to the network. -The Graph is designed with multiple burning mechanisms to offset new token issuance. Approximately 1% of the GRT supply is burned annually through various activities on the network, and this number has been increasing as network activity continues to grow. These burning activities include a 0.5% delegation tax whenever a Delegator delegates GRT to an Indexer, a 1% curation tax when Curators signal on a subgraph, and a 1% of query fees for blockchain data. +The Graph is designed with multiple burning mechanisms to offset new token issuance. Approximately 1% of the GRT supply is burned annually through various activities on the network, and this number has been increasing as network activity continues to grow. These burning activities include a 0.5% delegation tax whenever a Delegator delegates GRT to an Indexer, a 1% curation tax when Curators signal on a Subgraph, and a 1% of query fees for blockchain data. ![Total burned GRT](/img/total-burned-grt.jpeg) From 00216a57bdbf5e16cf5917c0a9f369464e013a8d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:17:41 -0500 Subject: [PATCH 0523/1789] New translations tokenomics.mdx (Japanese) --- website/src/pages/ja/resources/tokenomics.mdx | 40 +++++++++---------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/website/src/pages/ja/resources/tokenomics.mdx b/website/src/pages/ja/resources/tokenomics.mdx index 07a04a43b06c..a1f30147507d 100644 --- a/website/src/pages/ja/resources/tokenomics.mdx +++ b/website/src/pages/ja/resources/tokenomics.mdx @@ -6,7 +6,7 @@ description: The Graph Network is incentivized by powerful tokenomics. Here’s ## 概要 -The Graph is a decentralized protocol that enables easy access to blockchain data. It indexes blockchain data similarly to how Google indexes the web. If you've used a dapp that retrieves data from a subgraph, you've probably interacted with The Graph. Today, thousands of [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem use The Graph. +The Graph is a decentralized protocol that enables easy access to blockchain data. It indexes blockchain data similarly to how Google indexes the web. If you've used a dapp that retrieves data from a Subgraph, you've probably interacted with The Graph. Today, thousands of [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem use The Graph. ## Specifics @@ -24,9 +24,9 @@ There are four primary network participants: 1. Delegators - Delegate GRT to Indexers & secure the network -2. キュレーター - インデクサーのために最適なサブグラフを見つける。 +2. Curators - Find the best Subgraphs for Indexers -3. Developers - Build & query subgraphs +3. Developers - Build & query Subgraphs 4. インデクサー - ブロックチェーンデータのバックボーン @@ -36,7 +36,7 @@ Fishermen and Arbitrators are also integral to the network's success through oth ## Delegators (Passively earn GRT) -Indexers are delegated GRT by Delegators, increasing the Indexer’s stake in subgraphs on the network. In return, Delegators earn a percentage of all query fees and indexing rewards from the Indexer. Each Indexer sets the cut that will be rewarded to Delegators independently, creating competition among Indexers to attract Delegators. Most Indexers offer between 9-12% annually. +Indexers are delegated GRT by Delegators, increasing the Indexer’s stake in Subgraphs on the network. In return, Delegators earn a percentage of all query fees and indexing rewards from the Indexer. Each Indexer sets the cut that will be rewarded to Delegators independently, creating competition among Indexers to attract Delegators. Most Indexers offer between 9-12% annually. For example, if a Delegator were to delegate 15k GRT to an Indexer offering 10%, the Delegator would receive ~1,500 GRT in rewards annually. @@ -46,25 +46,25 @@ If you're reading this, you're capable of becoming a Delegator right now by head ## Curators (Earn GRT) -Curators identify high-quality subgraphs and "curate" them (i.e., signal GRT on them) to earn curation shares, which guarantee a percentage of all future query fees generated by the subgraph. While any independent network participant can be a Curator, typically subgraph developers are among the first Curators for their own subgraphs because they want to ensure their subgraph is indexed. +Curators identify high-quality Subgraphs and "curate" them (i.e., signal GRT on them) to earn curation shares, which guarantee a percentage of all future query fees generated by the Subgraph. While any independent network participant can be a Curator, typically Subgraph developers are among the first Curators for their own Subgraphs because they want to ensure their Subgraph is indexed. -Subgraph developers are encouraged to curate their subgraph with at least 3,000 GRT. However, this number may be impacted by network activity and community participation. +Subgraph developers are encouraged to curate their Subgraph with at least 3,000 GRT. However, this number may be impacted by network activity and community participation. -Curators pay a 1% curation tax when they curate a new subgraph. This curation tax is burned, decreasing the supply of GRT. +Curators pay a 1% curation tax when they curate a new Subgraph. This curation tax is burned, decreasing the supply of GRT. ## Developers -Developers build and query subgraphs to retrieve blockchain data. Since subgraphs are open source, developers can query existing subgraphs to load blockchain data into their dapps. Developers pay for queries they make in GRT, which is distributed to network participants. +Developers build and query Subgraphs to retrieve blockchain data. Since Subgraphs are open source, developers can query existing Subgraphs to load blockchain data into their dapps. Developers pay for queries they make in GRT, which is distributed to network participants. -### サブグラフの作成 +### Creating a Subgraph -Developers can [create a subgraph](/developing/creating-a-subgraph/) to index data on the blockchain. Subgraphs are instructions for Indexers about which data should be served to consumers. +Developers can [create a Subgraph](/developing/creating-a-subgraph/) to index data on the blockchain. Subgraphs are instructions for Indexers about which data should be served to consumers. -Once developers have built and tested their subgraph, they can [publish their subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/) on The Graph's decentralized network. +Once developers have built and tested their Subgraph, they can [publish their Subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/) on The Graph's decentralized network. -### 既存のサブグラフのクエリ +### Querying an existing Subgraph -Once a subgraph is [published](/subgraphs/developing/publishing/publishing-a-subgraph/) to The Graph's decentralized network, anyone can create an API key, add GRT to their billing balance, and query the subgraph. +Once a Subgraph is [published](/subgraphs/developing/publishing/publishing-a-subgraph/) to The Graph's decentralized network, anyone can create an API key, add GRT to their billing balance, and query the Subgraph. Subgraphs are [queried using GraphQL](/subgraphs/querying/introduction/), and the query fees are paid for with GRT in [Subgraph Studio](https://thegraph.com/studio/). Query fees are distributed to network participants based on their contributions to the protocol. @@ -72,27 +72,27 @@ Subgraphs are [queried using GraphQL](/subgraphs/querying/introduction/), and th ## Indexers (Earn GRT) -Indexers are the backbone of The Graph. They operate independent hardware and software powering The Graph’s decentralized network. Indexers serve data to consumers based on instructions from subgraphs. +Indexers are the backbone of The Graph. They operate independent hardware and software powering The Graph’s decentralized network. Indexers serve data to consumers based on instructions from Subgraphs. Indexers can earn GRT rewards in two ways: -1. **Query fees**: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). +1. **Query fees**: GRT paid by developers or users for Subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). -2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. +2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of Subgraphs they are indexing. These rewards incentivize Indexers to index Subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. -Each subgraph is allotted a portion of the total network token issuance, based on the amount of the subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the subgraph. +Each Subgraph is allotted a portion of the total network token issuance, based on the amount of the Subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the Subgraph. In order to run an indexing node, Indexers must self-stake 100,000 GRT or more with the network. Indexers are incentivized to self-stake GRT in proportion to the amount of queries they serve. -Indexers can increase their GRT allocations on subgraphs by accepting GRT delegation from Delegators, and they can accept up to 16 times their initial self-stake. If an Indexer becomes "over-delegated" (i.e., more than 16 times their initial self-stake), they will not be able to use the additional GRT from Delegators until they increase their self-stake in the network. +Indexers can increase their GRT allocations on Subgraphs by accepting GRT delegation from Delegators, and they can accept up to 16 times their initial self-stake. If an Indexer becomes "over-delegated" (i.e., more than 16 times their initial self-stake), they will not be able to use the additional GRT from Delegators until they increase their self-stake in the network. The amount of rewards an Indexer receives can vary based on the Indexer's self-stake, accepted delegation, quality of service, and many more factors. ## Token Supply: Burning & Issuance -The initial token supply is 10 billion GRT, with a target of 3% new issuance annually to reward Indexers for allocating stake on subgraphs. This means that the total supply of GRT tokens will increase by 3% each year as new tokens are issued to Indexers for their contribution to the network. +The initial token supply is 10 billion GRT, with a target of 3% new issuance annually to reward Indexers for allocating stake on Subgraphs. This means that the total supply of GRT tokens will increase by 3% each year as new tokens are issued to Indexers for their contribution to the network. -The Graph is designed with multiple burning mechanisms to offset new token issuance. Approximately 1% of the GRT supply is burned annually through various activities on the network, and this number has been increasing as network activity continues to grow. These burning activities include a 0.5% delegation tax whenever a Delegator delegates GRT to an Indexer, a 1% curation tax when Curators signal on a subgraph, and a 1% of query fees for blockchain data. +The Graph is designed with multiple burning mechanisms to offset new token issuance. Approximately 1% of the GRT supply is burned annually through various activities on the network, and this number has been increasing as network activity continues to grow. These burning activities include a 0.5% delegation tax whenever a Delegator delegates GRT to an Indexer, a 1% curation tax when Curators signal on a Subgraph, and a 1% of query fees for blockchain data. ![Total burned GRT](/img/total-burned-grt.jpeg) From 8c0820fcc62c38a4cf5e91e240c07c63a3c30f2a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:17:42 -0500 Subject: [PATCH 0524/1789] New translations tokenomics.mdx (Korean) --- website/src/pages/ko/resources/tokenomics.mdx | 40 +++++++++---------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/website/src/pages/ko/resources/tokenomics.mdx b/website/src/pages/ko/resources/tokenomics.mdx index 4a9b42ca6e0d..dac3383a28e7 100644 --- a/website/src/pages/ko/resources/tokenomics.mdx +++ b/website/src/pages/ko/resources/tokenomics.mdx @@ -6,7 +6,7 @@ description: The Graph Network is incentivized by powerful tokenomics. Here’s ## Overview -The Graph is a decentralized protocol that enables easy access to blockchain data. It indexes blockchain data similarly to how Google indexes the web. If you've used a dapp that retrieves data from a subgraph, you've probably interacted with The Graph. Today, thousands of [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem use The Graph. +The Graph is a decentralized protocol that enables easy access to blockchain data. It indexes blockchain data similarly to how Google indexes the web. If you've used a dapp that retrieves data from a Subgraph, you've probably interacted with The Graph. Today, thousands of [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem use The Graph. ## Specifics @@ -24,9 +24,9 @@ There are four primary network participants: 1. Delegators - Delegate GRT to Indexers & secure the network -2. Curators - Find the best subgraphs for Indexers +2. Curators - Find the best Subgraphs for Indexers -3. Developers - Build & query subgraphs +3. Developers - Build & query Subgraphs 4. Indexers - Backbone of blockchain data @@ -36,7 +36,7 @@ Fishermen and Arbitrators are also integral to the network's success through oth ## Delegators (Passively earn GRT) -Indexers are delegated GRT by Delegators, increasing the Indexer’s stake in subgraphs on the network. In return, Delegators earn a percentage of all query fees and indexing rewards from the Indexer. Each Indexer sets the cut that will be rewarded to Delegators independently, creating competition among Indexers to attract Delegators. Most Indexers offer between 9-12% annually. +Indexers are delegated GRT by Delegators, increasing the Indexer’s stake in Subgraphs on the network. In return, Delegators earn a percentage of all query fees and indexing rewards from the Indexer. Each Indexer sets the cut that will be rewarded to Delegators independently, creating competition among Indexers to attract Delegators. Most Indexers offer between 9-12% annually. For example, if a Delegator were to delegate 15k GRT to an Indexer offering 10%, the Delegator would receive ~1,500 GRT in rewards annually. @@ -46,25 +46,25 @@ If you're reading this, you're capable of becoming a Delegator right now by head ## Curators (Earn GRT) -Curators identify high-quality subgraphs and "curate" them (i.e., signal GRT on them) to earn curation shares, which guarantee a percentage of all future query fees generated by the subgraph. While any independent network participant can be a Curator, typically subgraph developers are among the first Curators for their own subgraphs because they want to ensure their subgraph is indexed. +Curators identify high-quality Subgraphs and "curate" them (i.e., signal GRT on them) to earn curation shares, which guarantee a percentage of all future query fees generated by the Subgraph. While any independent network participant can be a Curator, typically Subgraph developers are among the first Curators for their own Subgraphs because they want to ensure their Subgraph is indexed. -Subgraph developers are encouraged to curate their subgraph with at least 3,000 GRT. However, this number may be impacted by network activity and community participation. +Subgraph developers are encouraged to curate their Subgraph with at least 3,000 GRT. However, this number may be impacted by network activity and community participation. -Curators pay a 1% curation tax when they curate a new subgraph. This curation tax is burned, decreasing the supply of GRT. +Curators pay a 1% curation tax when they curate a new Subgraph. This curation tax is burned, decreasing the supply of GRT. ## Developers -Developers build and query subgraphs to retrieve blockchain data. Since subgraphs are open source, developers can query existing subgraphs to load blockchain data into their dapps. Developers pay for queries they make in GRT, which is distributed to network participants. +Developers build and query Subgraphs to retrieve blockchain data. Since Subgraphs are open source, developers can query existing Subgraphs to load blockchain data into their dapps. Developers pay for queries they make in GRT, which is distributed to network participants. -### Creating a subgraph +### Creating a Subgraph -Developers can [create a subgraph](/developing/creating-a-subgraph/) to index data on the blockchain. Subgraphs are instructions for Indexers about which data should be served to consumers. +Developers can [create a Subgraph](/developing/creating-a-subgraph/) to index data on the blockchain. Subgraphs are instructions for Indexers about which data should be served to consumers. -Once developers have built and tested their subgraph, they can [publish their subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/) on The Graph's decentralized network. +Once developers have built and tested their Subgraph, they can [publish their Subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/) on The Graph's decentralized network. -### Querying an existing subgraph +### Querying an existing Subgraph -Once a subgraph is [published](/subgraphs/developing/publishing/publishing-a-subgraph/) to The Graph's decentralized network, anyone can create an API key, add GRT to their billing balance, and query the subgraph. +Once a Subgraph is [published](/subgraphs/developing/publishing/publishing-a-subgraph/) to The Graph's decentralized network, anyone can create an API key, add GRT to their billing balance, and query the Subgraph. Subgraphs are [queried using GraphQL](/subgraphs/querying/introduction/), and the query fees are paid for with GRT in [Subgraph Studio](https://thegraph.com/studio/). Query fees are distributed to network participants based on their contributions to the protocol. @@ -72,27 +72,27 @@ Subgraphs are [queried using GraphQL](/subgraphs/querying/introduction/), and th ## Indexers (Earn GRT) -Indexers are the backbone of The Graph. They operate independent hardware and software powering The Graph’s decentralized network. Indexers serve data to consumers based on instructions from subgraphs. +Indexers are the backbone of The Graph. They operate independent hardware and software powering The Graph’s decentralized network. Indexers serve data to consumers based on instructions from Subgraphs. Indexers can earn GRT rewards in two ways: -1. **Query fees**: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). +1. **Query fees**: GRT paid by developers or users for Subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). -2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. +2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of Subgraphs they are indexing. These rewards incentivize Indexers to index Subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. -Each subgraph is allotted a portion of the total network token issuance, based on the amount of the subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the subgraph. +Each Subgraph is allotted a portion of the total network token issuance, based on the amount of the Subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the Subgraph. In order to run an indexing node, Indexers must self-stake 100,000 GRT or more with the network. Indexers are incentivized to self-stake GRT in proportion to the amount of queries they serve. -Indexers can increase their GRT allocations on subgraphs by accepting GRT delegation from Delegators, and they can accept up to 16 times their initial self-stake. If an Indexer becomes "over-delegated" (i.e., more than 16 times their initial self-stake), they will not be able to use the additional GRT from Delegators until they increase their self-stake in the network. +Indexers can increase their GRT allocations on Subgraphs by accepting GRT delegation from Delegators, and they can accept up to 16 times their initial self-stake. If an Indexer becomes "over-delegated" (i.e., more than 16 times their initial self-stake), they will not be able to use the additional GRT from Delegators until they increase their self-stake in the network. The amount of rewards an Indexer receives can vary based on the Indexer's self-stake, accepted delegation, quality of service, and many more factors. ## Token Supply: Burning & Issuance -The initial token supply is 10 billion GRT, with a target of 3% new issuance annually to reward Indexers for allocating stake on subgraphs. This means that the total supply of GRT tokens will increase by 3% each year as new tokens are issued to Indexers for their contribution to the network. +The initial token supply is 10 billion GRT, with a target of 3% new issuance annually to reward Indexers for allocating stake on Subgraphs. This means that the total supply of GRT tokens will increase by 3% each year as new tokens are issued to Indexers for their contribution to the network. -The Graph is designed with multiple burning mechanisms to offset new token issuance. Approximately 1% of the GRT supply is burned annually through various activities on the network, and this number has been increasing as network activity continues to grow. These burning activities include a 0.5% delegation tax whenever a Delegator delegates GRT to an Indexer, a 1% curation tax when Curators signal on a subgraph, and a 1% of query fees for blockchain data. +The Graph is designed with multiple burning mechanisms to offset new token issuance. Approximately 1% of the GRT supply is burned annually through various activities on the network, and this number has been increasing as network activity continues to grow. These burning activities include a 0.5% delegation tax whenever a Delegator delegates GRT to an Indexer, a 1% curation tax when Curators signal on a Subgraph, and a 1% of query fees for blockchain data. ![Total burned GRT](/img/total-burned-grt.jpeg) From 3ed09fc3f2234bfb2199f08dcbddcb699a57fa20 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:17:43 -0500 Subject: [PATCH 0525/1789] New translations tokenomics.mdx (Dutch) --- website/src/pages/nl/resources/tokenomics.mdx | 40 +++++++++---------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/website/src/pages/nl/resources/tokenomics.mdx b/website/src/pages/nl/resources/tokenomics.mdx index 4a9b42ca6e0d..dac3383a28e7 100644 --- a/website/src/pages/nl/resources/tokenomics.mdx +++ b/website/src/pages/nl/resources/tokenomics.mdx @@ -6,7 +6,7 @@ description: The Graph Network is incentivized by powerful tokenomics. Here’s ## Overview -The Graph is a decentralized protocol that enables easy access to blockchain data. It indexes blockchain data similarly to how Google indexes the web. If you've used a dapp that retrieves data from a subgraph, you've probably interacted with The Graph. Today, thousands of [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem use The Graph. +The Graph is a decentralized protocol that enables easy access to blockchain data. It indexes blockchain data similarly to how Google indexes the web. If you've used a dapp that retrieves data from a Subgraph, you've probably interacted with The Graph. Today, thousands of [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem use The Graph. ## Specifics @@ -24,9 +24,9 @@ There are four primary network participants: 1. Delegators - Delegate GRT to Indexers & secure the network -2. Curators - Find the best subgraphs for Indexers +2. Curators - Find the best Subgraphs for Indexers -3. Developers - Build & query subgraphs +3. Developers - Build & query Subgraphs 4. Indexers - Backbone of blockchain data @@ -36,7 +36,7 @@ Fishermen and Arbitrators are also integral to the network's success through oth ## Delegators (Passively earn GRT) -Indexers are delegated GRT by Delegators, increasing the Indexer’s stake in subgraphs on the network. In return, Delegators earn a percentage of all query fees and indexing rewards from the Indexer. Each Indexer sets the cut that will be rewarded to Delegators independently, creating competition among Indexers to attract Delegators. Most Indexers offer between 9-12% annually. +Indexers are delegated GRT by Delegators, increasing the Indexer’s stake in Subgraphs on the network. In return, Delegators earn a percentage of all query fees and indexing rewards from the Indexer. Each Indexer sets the cut that will be rewarded to Delegators independently, creating competition among Indexers to attract Delegators. Most Indexers offer between 9-12% annually. For example, if a Delegator were to delegate 15k GRT to an Indexer offering 10%, the Delegator would receive ~1,500 GRT in rewards annually. @@ -46,25 +46,25 @@ If you're reading this, you're capable of becoming a Delegator right now by head ## Curators (Earn GRT) -Curators identify high-quality subgraphs and "curate" them (i.e., signal GRT on them) to earn curation shares, which guarantee a percentage of all future query fees generated by the subgraph. While any independent network participant can be a Curator, typically subgraph developers are among the first Curators for their own subgraphs because they want to ensure their subgraph is indexed. +Curators identify high-quality Subgraphs and "curate" them (i.e., signal GRT on them) to earn curation shares, which guarantee a percentage of all future query fees generated by the Subgraph. While any independent network participant can be a Curator, typically Subgraph developers are among the first Curators for their own Subgraphs because they want to ensure their Subgraph is indexed. -Subgraph developers are encouraged to curate their subgraph with at least 3,000 GRT. However, this number may be impacted by network activity and community participation. +Subgraph developers are encouraged to curate their Subgraph with at least 3,000 GRT. However, this number may be impacted by network activity and community participation. -Curators pay a 1% curation tax when they curate a new subgraph. This curation tax is burned, decreasing the supply of GRT. +Curators pay a 1% curation tax when they curate a new Subgraph. This curation tax is burned, decreasing the supply of GRT. ## Developers -Developers build and query subgraphs to retrieve blockchain data. Since subgraphs are open source, developers can query existing subgraphs to load blockchain data into their dapps. Developers pay for queries they make in GRT, which is distributed to network participants. +Developers build and query Subgraphs to retrieve blockchain data. Since Subgraphs are open source, developers can query existing Subgraphs to load blockchain data into their dapps. Developers pay for queries they make in GRT, which is distributed to network participants. -### Creating a subgraph +### Creating a Subgraph -Developers can [create a subgraph](/developing/creating-a-subgraph/) to index data on the blockchain. Subgraphs are instructions for Indexers about which data should be served to consumers. +Developers can [create a Subgraph](/developing/creating-a-subgraph/) to index data on the blockchain. Subgraphs are instructions for Indexers about which data should be served to consumers. -Once developers have built and tested their subgraph, they can [publish their subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/) on The Graph's decentralized network. +Once developers have built and tested their Subgraph, they can [publish their Subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/) on The Graph's decentralized network. -### Querying an existing subgraph +### Querying an existing Subgraph -Once a subgraph is [published](/subgraphs/developing/publishing/publishing-a-subgraph/) to The Graph's decentralized network, anyone can create an API key, add GRT to their billing balance, and query the subgraph. +Once a Subgraph is [published](/subgraphs/developing/publishing/publishing-a-subgraph/) to The Graph's decentralized network, anyone can create an API key, add GRT to their billing balance, and query the Subgraph. Subgraphs are [queried using GraphQL](/subgraphs/querying/introduction/), and the query fees are paid for with GRT in [Subgraph Studio](https://thegraph.com/studio/). Query fees are distributed to network participants based on their contributions to the protocol. @@ -72,27 +72,27 @@ Subgraphs are [queried using GraphQL](/subgraphs/querying/introduction/), and th ## Indexers (Earn GRT) -Indexers are the backbone of The Graph. They operate independent hardware and software powering The Graph’s decentralized network. Indexers serve data to consumers based on instructions from subgraphs. +Indexers are the backbone of The Graph. They operate independent hardware and software powering The Graph’s decentralized network. Indexers serve data to consumers based on instructions from Subgraphs. Indexers can earn GRT rewards in two ways: -1. **Query fees**: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). +1. **Query fees**: GRT paid by developers or users for Subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). -2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. +2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of Subgraphs they are indexing. These rewards incentivize Indexers to index Subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. -Each subgraph is allotted a portion of the total network token issuance, based on the amount of the subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the subgraph. +Each Subgraph is allotted a portion of the total network token issuance, based on the amount of the Subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the Subgraph. In order to run an indexing node, Indexers must self-stake 100,000 GRT or more with the network. Indexers are incentivized to self-stake GRT in proportion to the amount of queries they serve. -Indexers can increase their GRT allocations on subgraphs by accepting GRT delegation from Delegators, and they can accept up to 16 times their initial self-stake. If an Indexer becomes "over-delegated" (i.e., more than 16 times their initial self-stake), they will not be able to use the additional GRT from Delegators until they increase their self-stake in the network. +Indexers can increase their GRT allocations on Subgraphs by accepting GRT delegation from Delegators, and they can accept up to 16 times their initial self-stake. If an Indexer becomes "over-delegated" (i.e., more than 16 times their initial self-stake), they will not be able to use the additional GRT from Delegators until they increase their self-stake in the network. The amount of rewards an Indexer receives can vary based on the Indexer's self-stake, accepted delegation, quality of service, and many more factors. ## Token Supply: Burning & Issuance -The initial token supply is 10 billion GRT, with a target of 3% new issuance annually to reward Indexers for allocating stake on subgraphs. This means that the total supply of GRT tokens will increase by 3% each year as new tokens are issued to Indexers for their contribution to the network. +The initial token supply is 10 billion GRT, with a target of 3% new issuance annually to reward Indexers for allocating stake on Subgraphs. This means that the total supply of GRT tokens will increase by 3% each year as new tokens are issued to Indexers for their contribution to the network. -The Graph is designed with multiple burning mechanisms to offset new token issuance. Approximately 1% of the GRT supply is burned annually through various activities on the network, and this number has been increasing as network activity continues to grow. These burning activities include a 0.5% delegation tax whenever a Delegator delegates GRT to an Indexer, a 1% curation tax when Curators signal on a subgraph, and a 1% of query fees for blockchain data. +The Graph is designed with multiple burning mechanisms to offset new token issuance. Approximately 1% of the GRT supply is burned annually through various activities on the network, and this number has been increasing as network activity continues to grow. These burning activities include a 0.5% delegation tax whenever a Delegator delegates GRT to an Indexer, a 1% curation tax when Curators signal on a Subgraph, and a 1% of query fees for blockchain data. ![Total burned GRT](/img/total-burned-grt.jpeg) From efae8f87eb38a6ff52a8048bdef7dfc3fcc8eb10 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:17:44 -0500 Subject: [PATCH 0526/1789] New translations tokenomics.mdx (Polish) --- website/src/pages/pl/resources/tokenomics.mdx | 40 +++++++++---------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/website/src/pages/pl/resources/tokenomics.mdx b/website/src/pages/pl/resources/tokenomics.mdx index 4a9b42ca6e0d..dac3383a28e7 100644 --- a/website/src/pages/pl/resources/tokenomics.mdx +++ b/website/src/pages/pl/resources/tokenomics.mdx @@ -6,7 +6,7 @@ description: The Graph Network is incentivized by powerful tokenomics. Here’s ## Overview -The Graph is a decentralized protocol that enables easy access to blockchain data. It indexes blockchain data similarly to how Google indexes the web. If you've used a dapp that retrieves data from a subgraph, you've probably interacted with The Graph. Today, thousands of [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem use The Graph. +The Graph is a decentralized protocol that enables easy access to blockchain data. It indexes blockchain data similarly to how Google indexes the web. If you've used a dapp that retrieves data from a Subgraph, you've probably interacted with The Graph. Today, thousands of [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem use The Graph. ## Specifics @@ -24,9 +24,9 @@ There are four primary network participants: 1. Delegators - Delegate GRT to Indexers & secure the network -2. Curators - Find the best subgraphs for Indexers +2. Curators - Find the best Subgraphs for Indexers -3. Developers - Build & query subgraphs +3. Developers - Build & query Subgraphs 4. Indexers - Backbone of blockchain data @@ -36,7 +36,7 @@ Fishermen and Arbitrators are also integral to the network's success through oth ## Delegators (Passively earn GRT) -Indexers are delegated GRT by Delegators, increasing the Indexer’s stake in subgraphs on the network. In return, Delegators earn a percentage of all query fees and indexing rewards from the Indexer. Each Indexer sets the cut that will be rewarded to Delegators independently, creating competition among Indexers to attract Delegators. Most Indexers offer between 9-12% annually. +Indexers are delegated GRT by Delegators, increasing the Indexer’s stake in Subgraphs on the network. In return, Delegators earn a percentage of all query fees and indexing rewards from the Indexer. Each Indexer sets the cut that will be rewarded to Delegators independently, creating competition among Indexers to attract Delegators. Most Indexers offer between 9-12% annually. For example, if a Delegator were to delegate 15k GRT to an Indexer offering 10%, the Delegator would receive ~1,500 GRT in rewards annually. @@ -46,25 +46,25 @@ If you're reading this, you're capable of becoming a Delegator right now by head ## Curators (Earn GRT) -Curators identify high-quality subgraphs and "curate" them (i.e., signal GRT on them) to earn curation shares, which guarantee a percentage of all future query fees generated by the subgraph. While any independent network participant can be a Curator, typically subgraph developers are among the first Curators for their own subgraphs because they want to ensure their subgraph is indexed. +Curators identify high-quality Subgraphs and "curate" them (i.e., signal GRT on them) to earn curation shares, which guarantee a percentage of all future query fees generated by the Subgraph. While any independent network participant can be a Curator, typically Subgraph developers are among the first Curators for their own Subgraphs because they want to ensure their Subgraph is indexed. -Subgraph developers are encouraged to curate their subgraph with at least 3,000 GRT. However, this number may be impacted by network activity and community participation. +Subgraph developers are encouraged to curate their Subgraph with at least 3,000 GRT. However, this number may be impacted by network activity and community participation. -Curators pay a 1% curation tax when they curate a new subgraph. This curation tax is burned, decreasing the supply of GRT. +Curators pay a 1% curation tax when they curate a new Subgraph. This curation tax is burned, decreasing the supply of GRT. ## Developers -Developers build and query subgraphs to retrieve blockchain data. Since subgraphs are open source, developers can query existing subgraphs to load blockchain data into their dapps. Developers pay for queries they make in GRT, which is distributed to network participants. +Developers build and query Subgraphs to retrieve blockchain data. Since Subgraphs are open source, developers can query existing Subgraphs to load blockchain data into their dapps. Developers pay for queries they make in GRT, which is distributed to network participants. -### Creating a subgraph +### Creating a Subgraph -Developers can [create a subgraph](/developing/creating-a-subgraph/) to index data on the blockchain. Subgraphs are instructions for Indexers about which data should be served to consumers. +Developers can [create a Subgraph](/developing/creating-a-subgraph/) to index data on the blockchain. Subgraphs are instructions for Indexers about which data should be served to consumers. -Once developers have built and tested their subgraph, they can [publish their subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/) on The Graph's decentralized network. +Once developers have built and tested their Subgraph, they can [publish their Subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/) on The Graph's decentralized network. -### Querying an existing subgraph +### Querying an existing Subgraph -Once a subgraph is [published](/subgraphs/developing/publishing/publishing-a-subgraph/) to The Graph's decentralized network, anyone can create an API key, add GRT to their billing balance, and query the subgraph. +Once a Subgraph is [published](/subgraphs/developing/publishing/publishing-a-subgraph/) to The Graph's decentralized network, anyone can create an API key, add GRT to their billing balance, and query the Subgraph. Subgraphs are [queried using GraphQL](/subgraphs/querying/introduction/), and the query fees are paid for with GRT in [Subgraph Studio](https://thegraph.com/studio/). Query fees are distributed to network participants based on their contributions to the protocol. @@ -72,27 +72,27 @@ Subgraphs are [queried using GraphQL](/subgraphs/querying/introduction/), and th ## Indexers (Earn GRT) -Indexers are the backbone of The Graph. They operate independent hardware and software powering The Graph’s decentralized network. Indexers serve data to consumers based on instructions from subgraphs. +Indexers are the backbone of The Graph. They operate independent hardware and software powering The Graph’s decentralized network. Indexers serve data to consumers based on instructions from Subgraphs. Indexers can earn GRT rewards in two ways: -1. **Query fees**: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). +1. **Query fees**: GRT paid by developers or users for Subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). -2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. +2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of Subgraphs they are indexing. These rewards incentivize Indexers to index Subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. -Each subgraph is allotted a portion of the total network token issuance, based on the amount of the subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the subgraph. +Each Subgraph is allotted a portion of the total network token issuance, based on the amount of the Subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the Subgraph. In order to run an indexing node, Indexers must self-stake 100,000 GRT or more with the network. Indexers are incentivized to self-stake GRT in proportion to the amount of queries they serve. -Indexers can increase their GRT allocations on subgraphs by accepting GRT delegation from Delegators, and they can accept up to 16 times their initial self-stake. If an Indexer becomes "over-delegated" (i.e., more than 16 times their initial self-stake), they will not be able to use the additional GRT from Delegators until they increase their self-stake in the network. +Indexers can increase their GRT allocations on Subgraphs by accepting GRT delegation from Delegators, and they can accept up to 16 times their initial self-stake. If an Indexer becomes "over-delegated" (i.e., more than 16 times their initial self-stake), they will not be able to use the additional GRT from Delegators until they increase their self-stake in the network. The amount of rewards an Indexer receives can vary based on the Indexer's self-stake, accepted delegation, quality of service, and many more factors. ## Token Supply: Burning & Issuance -The initial token supply is 10 billion GRT, with a target of 3% new issuance annually to reward Indexers for allocating stake on subgraphs. This means that the total supply of GRT tokens will increase by 3% each year as new tokens are issued to Indexers for their contribution to the network. +The initial token supply is 10 billion GRT, with a target of 3% new issuance annually to reward Indexers for allocating stake on Subgraphs. This means that the total supply of GRT tokens will increase by 3% each year as new tokens are issued to Indexers for their contribution to the network. -The Graph is designed with multiple burning mechanisms to offset new token issuance. Approximately 1% of the GRT supply is burned annually through various activities on the network, and this number has been increasing as network activity continues to grow. These burning activities include a 0.5% delegation tax whenever a Delegator delegates GRT to an Indexer, a 1% curation tax when Curators signal on a subgraph, and a 1% of query fees for blockchain data. +The Graph is designed with multiple burning mechanisms to offset new token issuance. Approximately 1% of the GRT supply is burned annually through various activities on the network, and this number has been increasing as network activity continues to grow. These burning activities include a 0.5% delegation tax whenever a Delegator delegates GRT to an Indexer, a 1% curation tax when Curators signal on a Subgraph, and a 1% of query fees for blockchain data. ![Total burned GRT](/img/total-burned-grt.jpeg) From 2b64743781cbb0bb03da5c1b85f9729c7dd60652 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:17:45 -0500 Subject: [PATCH 0527/1789] New translations tokenomics.mdx (Portuguese) --- website/src/pages/pt/resources/tokenomics.mdx | 40 +++++++++---------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/website/src/pages/pt/resources/tokenomics.mdx b/website/src/pages/pt/resources/tokenomics.mdx index f5994ac88795..5126fa077fec 100644 --- a/website/src/pages/pt/resources/tokenomics.mdx +++ b/website/src/pages/pt/resources/tokenomics.mdx @@ -6,7 +6,7 @@ description: A Graph Network é incentivada por uma tokenomia (economia de token ## Visão geral -O The Graph é um protocolo descentralizado que permite acesso fácil a dados de blockchain. Ele indexa dados de blockchain da mesma forma que o Google indexa a web; se já usou um dApp (aplicativo descentralizado) que resgata dados de um subgraph, você provavelmente já interagiu com o The Graph. Hoje, milhares de [dApps populares](https://thegraph.com/explorer) no ecossistema da Web3 usam o The Graph. +The Graph is a decentralized protocol that enables easy access to blockchain data. It indexes blockchain data similarly to how Google indexes the web. If you've used a dapp that retrieves data from a Subgraph, you've probably interacted with The Graph. Today, thousands of [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem use The Graph. ## Especificações @@ -24,9 +24,9 @@ Há quatro participantes primários na rede: 1. Delegantes — Delegam GRT aos Indexadores e protegem a rede -2. Curadores — Encontram os melhores subgraphs para Indexadores +2. Curators - Find the best Subgraphs for Indexers -3. Programadores — Constroem e consultam subgraphs em queries +3. Developers - Build & query Subgraphs 4. Indexadores — Rede de transporte de dados em blockchain @@ -36,7 +36,7 @@ Pescadores e Árbitros também são integrais ao êxito da rede através de outr ## Delegantes (Ganham GRT passivamente) -Os Delegantes delegam GRT a Indexadores, aumentando o stake do Indexador em subgraphs na rede. Em troca, os Delegantes ganham uma porcentagem de todas as taxas de query e recompensas de indexação do Indexador. Cada Indexador determina a porção que será recompensada aos Delegantes de forma independente, criando competição entre Indexadores para atrair Delegantes. Muitos Indexadores oferecem entre 9 e 12% ao ano. +Indexers are delegated GRT by Delegators, increasing the Indexer’s stake in Subgraphs on the network. In return, Delegators earn a percentage of all query fees and indexing rewards from the Indexer. Each Indexer sets the cut that will be rewarded to Delegators independently, creating competition among Indexers to attract Delegators. Most Indexers offer between 9-12% annually. Por exemplo, se um Delegante delegasse 15.000 GRT a um Indexador que oferecesse 10%, o Delegante receberia cerca de 1.500 GRT em recompensas por ano. @@ -46,25 +46,25 @@ Quem ler isto pode tornar-se um Delegante agora mesmo na [página de participant ## Curadores (Ganham GRT) -Os Curadores identificam subgraphs de alta qualidade e os "curam" (por ex., sinalizam GRT neles) para ganhar ações de curadoria, que garantem uma porção de todas as taxas de query futuras geradas pelo subgraph. Enquanto qualquer participante independente da rede pode ser um Curador, os programadores de subgraphs tendem a ser os primeiros Curadores dos seus próprios subgraphs, pois querem garantir que o seu subgraph seja indexado. +Curators identify high-quality Subgraphs and "curate" them (i.e., signal GRT on them) to earn curation shares, which guarantee a percentage of all future query fees generated by the Subgraph. While any independent network participant can be a Curator, typically Subgraph developers are among the first Curators for their own Subgraphs because they want to ensure their Subgraph is indexed. -Desde 11 de abril de 2024, os programadores de subgraphs podem curar o seu subgraph com, no mínimo, 3.000 GRT. Porém, este número pode ser impactado pela atividade na rede e participação na comunidade. +Subgraph developers are encouraged to curate their Subgraph with at least 3,000 GRT. However, this number may be impacted by network activity and community participation. -Os Curadores pagam uma taxa de curadoria de 1% ao curar um subgraph novo. Esta taxa de curadoria é queimada, de modo a reduzir a reserva de GRT. +Curators pay a 1% curation tax when they curate a new Subgraph. This curation tax is burned, decreasing the supply of GRT. ## Programadores -Os programadores constroem e fazem queries em subgraphs para retirar dados da blockchain. Como os subgraphs têm o código aberto, os programadores podem carregar dados da blockchain em seus dApps com queries nos subgraphs existentes. Os programadores pagam por queries feitos em GRT, que é distribuído aos participantes da rede. +Developers build and query Subgraphs to retrieve blockchain data. Since Subgraphs are open source, developers can query existing Subgraphs to load blockchain data into their dapps. Developers pay for queries they make in GRT, which is distributed to network participants. -### Como criar um Subgraph +### Creating a Subgraph -Para indexar dados na blockchain, os programadores podem [criar um subgraph](]/developing/creating-a-subgraph/) — um conjunto de instruções para Indexadores sobre quais dados devem ser servidos aos consumidores. +Developers can [create a Subgraph](/developing/creating-a-subgraph/) to index data on the blockchain. Subgraphs are instructions for Indexers about which data should be served to consumers. -Depois que os programadores tiverem criado e testado o seu subgraph, eles poderão [editá-lo](/subgraphs/developing/publishing/publishing-a-subgraph/) na rede descentralizada do The Graph. +Once developers have built and tested their Subgraph, they can [publish their Subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/) on The Graph's decentralized network. -### Como fazer queries um Subgraph existente +### Querying an existing Subgraph -Depois que um subgraph for [editado](/subgraphs/developing/publishing/publishing-a-subgraph/) na rede descentralizada do The Graph, qualquer um poderá criar uma chave API, depositar GRT no seu saldo de cobrança, e consultar o subgraph em um query. +Once a Subgraph is [published](/subgraphs/developing/publishing/publishing-a-subgraph/) to The Graph's decentralized network, anyone can create an API key, add GRT to their billing balance, and query the Subgraph. Os Subgraphs [recebem queries pelo GraphQL](/subgraphs/querying/introduction/), e as taxas de query são pagas em GRT no [Subgraph Studio](https://thegraph.com/studio/). As taxas de query são distribuídas a participantes da rede com base nas suas contribuições ao protocolo. @@ -72,27 +72,27 @@ Os Subgraphs [recebem queries pelo GraphQL](/subgraphs/querying/introduction/), ## Indexadores (Ganham GRT) -Os Indexadores são o núcleo do The Graph: operam o equipamento e o software independentes que movem a rede descentralizada do The Graph. Eles servem dados a consumidores baseado em instruções de subgraphs. +Indexers are the backbone of The Graph. They operate independent hardware and software powering The Graph’s decentralized network. Indexers serve data to consumers based on instructions from Subgraphs. Os Indexadores podem ganhar recompensas em GRT de duas maneiras: -1. **Taxas de query**: GRT pago, por programadores ou utilizadores, para queries de dados de subgraph. Taxas de query são distribuídas diretamente a Indexadores conforme a função de rebate exponencial (veja o GIP [aqui](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). +1. **Query fees**: GRT paid by developers or users for Subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). -2. **Recompensas de indexação**: a emissão anual de 3% é distribuída aos Indexadores com base no número de subgraphs que indexam. Estas recompensas os incentivam a indexar subgraphs, às vezes antes das taxas de query começarem, de modo a acumular e enviar Provas de Indexação (POIs) que verificam que indexaram dados corretamente. +2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of Subgraphs they are indexing. These rewards incentivize Indexers to index Subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. -Cada subgraph recebe uma porção da emissão total do token na rede, com base na quantia do sinal de curadoria do subgraph. Essa quantia é então recompensada aos Indexadores com base no seu stake alocado no subgraph. +Each Subgraph is allotted a portion of the total network token issuance, based on the amount of the Subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the Subgraph. Para executar um node de indexação, os Indexadores devem fazer um stake de 100.000 GRT ou mais com a rede. Os mesmos são incentivados a fazer um stake de GRT, proporcional à quantidade de queries que servem. -Os Indexadores podem aumentar suas alocações de GRT nos subgraphs ao aceitar delegações de GRT de Delegantes; também podem aceitar até 16 vezes a quantia do seu stake inicial. Se um Indexador se tornar "excessivamente delegado" (por ex., com seu stake inicial multiplicado mais de 16 vezes), ele não poderá usar o GRT adicional dos Delegantes até aumentar o seu próprio stake na rede. +Indexers can increase their GRT allocations on Subgraphs by accepting GRT delegation from Delegators, and they can accept up to 16 times their initial self-stake. If an Indexer becomes "over-delegated" (i.e., more than 16 times their initial self-stake), they will not be able to use the additional GRT from Delegators until they increase their self-stake in the network. A quantidade de recompensas recebidas por um Indexador pode variar com base no seu auto-stake, delegação aceita, qualidade de serviço, e muito mais fatores. ## Reserva de Tokens: Queima e Emissão -A reserva inicial de tokens é de 10 bilhões de GRT, com um alvo de emissão de 3% novos ao ano para recompensar os Indexadores por alocar stake em subgraphs. Portanto, a reserva total de tokens GRT aumentará por 3% a cada ano à medida que tokens novos são emitidos para Indexadores, pela sua contribuição à rede. +The initial token supply is 10 billion GRT, with a target of 3% new issuance annually to reward Indexers for allocating stake on Subgraphs. This means that the total supply of GRT tokens will increase by 3% each year as new tokens are issued to Indexers for their contribution to the network. -O The Graph é projetado com vários mecanismos de queima para compensar pela emissão de novos tokens. Aproximadamente 1% da reserva de GRT é queimado todo ano, através de várias atividades na rede, e este número só aumenta conforme a atividade na rede cresce. Estas atividades de queima incluem: uma taxa de delegação de 0,5% sempre que um Delegante delega GRT a um Indexador; uma taxa de curadoria de 1% quando Curadores sinalizam em um subgraph; e 1% de taxas de query por dados de blockchain. +The Graph is designed with multiple burning mechanisms to offset new token issuance. Approximately 1% of the GRT supply is burned annually through various activities on the network, and this number has been increasing as network activity continues to grow. These burning activities include a 0.5% delegation tax whenever a Delegator delegates GRT to an Indexer, a 1% curation tax when Curators signal on a Subgraph, and a 1% of query fees for blockchain data. [Total de GRT Queimado](/img/total-burned-grt.jpeg) From e6e64cc5967122de6a0b2d37328c214f9502e846 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:17:46 -0500 Subject: [PATCH 0528/1789] New translations tokenomics.mdx (Russian) --- website/src/pages/ru/resources/tokenomics.mdx | 40 +++++++++---------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/website/src/pages/ru/resources/tokenomics.mdx b/website/src/pages/ru/resources/tokenomics.mdx index e4ab88d45844..1fbe485101b2 100644 --- a/website/src/pages/ru/resources/tokenomics.mdx +++ b/website/src/pages/ru/resources/tokenomics.mdx @@ -6,7 +6,7 @@ description: The Graph Network is incentivized by powerful tokenomics. Here’s ## Обзор -The Graph is a decentralized protocol that enables easy access to blockchain data. It indexes blockchain data similarly to how Google indexes the web. If you've used a dapp that retrieves data from a subgraph, you've probably interacted with The Graph. Today, thousands of [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem use The Graph. +The Graph is a decentralized protocol that enables easy access to blockchain data. It indexes blockchain data similarly to how Google indexes the web. If you've used a dapp that retrieves data from a Subgraph, you've probably interacted with The Graph. Today, thousands of [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem use The Graph. ## Специфические особенности @@ -24,9 +24,9 @@ There are four primary network participants: 1. Delegators - Delegate GRT to Indexers & secure the network -2. Кураторы - Ищут лучшие субграфы для Индексаторов +2. Curators - Find the best Subgraphs for Indexers -3. Developers - Build & query subgraphs +3. Developers - Build & query Subgraphs 4. Индексаторы - Магистральный канал передачи данных блокчейна @@ -36,7 +36,7 @@ Fishermen and Arbitrators are also integral to the network's success through oth ## Delegators (Passively earn GRT) -Indexers are delegated GRT by Delegators, increasing the Indexer’s stake in subgraphs on the network. In return, Delegators earn a percentage of all query fees and indexing rewards from the Indexer. Each Indexer sets the cut that will be rewarded to Delegators independently, creating competition among Indexers to attract Delegators. Most Indexers offer between 9-12% annually. +Indexers are delegated GRT by Delegators, increasing the Indexer’s stake in Subgraphs on the network. In return, Delegators earn a percentage of all query fees and indexing rewards from the Indexer. Each Indexer sets the cut that will be rewarded to Delegators independently, creating competition among Indexers to attract Delegators. Most Indexers offer between 9-12% annually. For example, if a Delegator were to delegate 15k GRT to an Indexer offering 10%, the Delegator would receive ~1,500 GRT in rewards annually. @@ -46,25 +46,25 @@ If you're reading this, you're capable of becoming a Delegator right now by head ## Curators (Earn GRT) -Curators identify high-quality subgraphs and "curate" them (i.e., signal GRT on them) to earn curation shares, which guarantee a percentage of all future query fees generated by the subgraph. While any independent network participant can be a Curator, typically subgraph developers are among the first Curators for their own subgraphs because they want to ensure their subgraph is indexed. +Curators identify high-quality Subgraphs and "curate" them (i.e., signal GRT on them) to earn curation shares, which guarantee a percentage of all future query fees generated by the Subgraph. While any independent network participant can be a Curator, typically Subgraph developers are among the first Curators for their own Subgraphs because they want to ensure their Subgraph is indexed. -Subgraph developers are encouraged to curate their subgraph with at least 3,000 GRT. However, this number may be impacted by network activity and community participation. +Subgraph developers are encouraged to curate their Subgraph with at least 3,000 GRT. However, this number may be impacted by network activity and community participation. -Curators pay a 1% curation tax when they curate a new subgraph. This curation tax is burned, decreasing the supply of GRT. +Curators pay a 1% curation tax when they curate a new Subgraph. This curation tax is burned, decreasing the supply of GRT. ## Developers -Developers build and query subgraphs to retrieve blockchain data. Since subgraphs are open source, developers can query existing subgraphs to load blockchain data into their dapps. Developers pay for queries they make in GRT, which is distributed to network participants. +Developers build and query Subgraphs to retrieve blockchain data. Since Subgraphs are open source, developers can query existing Subgraphs to load blockchain data into their dapps. Developers pay for queries they make in GRT, which is distributed to network participants. -### Создание субграфа +### Creating a Subgraph -Developers can [create a subgraph](/developing/creating-a-subgraph/) to index data on the blockchain. Subgraphs are instructions for Indexers about which data should be served to consumers. +Developers can [create a Subgraph](/developing/creating-a-subgraph/) to index data on the blockchain. Subgraphs are instructions for Indexers about which data should be served to consumers. -Once developers have built and tested their subgraph, they can [publish their subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/) on The Graph's decentralized network. +Once developers have built and tested their Subgraph, they can [publish their Subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/) on The Graph's decentralized network. -### Запрос к существующему субграфу +### Querying an existing Subgraph -Once a subgraph is [published](/subgraphs/developing/publishing/publishing-a-subgraph/) to The Graph's decentralized network, anyone can create an API key, add GRT to their billing balance, and query the subgraph. +Once a Subgraph is [published](/subgraphs/developing/publishing/publishing-a-subgraph/) to The Graph's decentralized network, anyone can create an API key, add GRT to their billing balance, and query the Subgraph. Subgraphs are [queried using GraphQL](/subgraphs/querying/introduction/), and the query fees are paid for with GRT in [Subgraph Studio](https://thegraph.com/studio/). Query fees are distributed to network participants based on their contributions to the protocol. @@ -72,27 +72,27 @@ Subgraphs are [queried using GraphQL](/subgraphs/querying/introduction/), and th ## Indexers (Earn GRT) -Indexers are the backbone of The Graph. They operate independent hardware and software powering The Graph’s decentralized network. Indexers serve data to consumers based on instructions from subgraphs. +Indexers are the backbone of The Graph. They operate independent hardware and software powering The Graph’s decentralized network. Indexers serve data to consumers based on instructions from Subgraphs. Indexers can earn GRT rewards in two ways: -1. **Query fees**: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). +1. **Query fees**: GRT paid by developers or users for Subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). -2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. +2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of Subgraphs they are indexing. These rewards incentivize Indexers to index Subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. -Each subgraph is allotted a portion of the total network token issuance, based on the amount of the subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the subgraph. +Each Subgraph is allotted a portion of the total network token issuance, based on the amount of the Subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the Subgraph. In order to run an indexing node, Indexers must self-stake 100,000 GRT or more with the network. Indexers are incentivized to self-stake GRT in proportion to the amount of queries they serve. -Indexers can increase their GRT allocations on subgraphs by accepting GRT delegation from Delegators, and they can accept up to 16 times their initial self-stake. If an Indexer becomes "over-delegated" (i.e., more than 16 times their initial self-stake), they will not be able to use the additional GRT from Delegators until they increase their self-stake in the network. +Indexers can increase their GRT allocations on Subgraphs by accepting GRT delegation from Delegators, and they can accept up to 16 times their initial self-stake. If an Indexer becomes "over-delegated" (i.e., more than 16 times their initial self-stake), they will not be able to use the additional GRT from Delegators until they increase their self-stake in the network. The amount of rewards an Indexer receives can vary based on the Indexer's self-stake, accepted delegation, quality of service, and many more factors. ## Token Supply: Burning & Issuance -The initial token supply is 10 billion GRT, with a target of 3% new issuance annually to reward Indexers for allocating stake on subgraphs. This means that the total supply of GRT tokens will increase by 3% each year as new tokens are issued to Indexers for their contribution to the network. +The initial token supply is 10 billion GRT, with a target of 3% new issuance annually to reward Indexers for allocating stake on Subgraphs. This means that the total supply of GRT tokens will increase by 3% each year as new tokens are issued to Indexers for their contribution to the network. -The Graph is designed with multiple burning mechanisms to offset new token issuance. Approximately 1% of the GRT supply is burned annually through various activities on the network, and this number has been increasing as network activity continues to grow. These burning activities include a 0.5% delegation tax whenever a Delegator delegates GRT to an Indexer, a 1% curation tax when Curators signal on a subgraph, and a 1% of query fees for blockchain data. +The Graph is designed with multiple burning mechanisms to offset new token issuance. Approximately 1% of the GRT supply is burned annually through various activities on the network, and this number has been increasing as network activity continues to grow. These burning activities include a 0.5% delegation tax whenever a Delegator delegates GRT to an Indexer, a 1% curation tax when Curators signal on a Subgraph, and a 1% of query fees for blockchain data. ![Total burned GRT](/img/total-burned-grt.jpeg) From bb7618a9b20a25e04445dbc78780934866fab855 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:17:47 -0500 Subject: [PATCH 0529/1789] New translations tokenomics.mdx (Swedish) --- website/src/pages/sv/resources/tokenomics.mdx | 40 +++++++++---------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/website/src/pages/sv/resources/tokenomics.mdx b/website/src/pages/sv/resources/tokenomics.mdx index 3d6c4666a960..120c43db7ee1 100644 --- a/website/src/pages/sv/resources/tokenomics.mdx +++ b/website/src/pages/sv/resources/tokenomics.mdx @@ -6,7 +6,7 @@ description: The Graph Network is incentivized by powerful tokenomics. Here’s ## Översikt -The Graph is a decentralized protocol that enables easy access to blockchain data. It indexes blockchain data similarly to how Google indexes the web. If you've used a dapp that retrieves data from a subgraph, you've probably interacted with The Graph. Today, thousands of [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem use The Graph. +The Graph is a decentralized protocol that enables easy access to blockchain data. It indexes blockchain data similarly to how Google indexes the web. If you've used a dapp that retrieves data from a Subgraph, you've probably interacted with The Graph. Today, thousands of [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem use The Graph. ## Specifics @@ -24,9 +24,9 @@ There are four primary network participants: 1. Delegators - Delegate GRT to Indexers & secure the network -2. Kuratorer - Hitta de bästa subgrafterna för Indexers +2. Curators - Find the best Subgraphs for Indexers -3. Developers - Build & query subgraphs +3. Developers - Build & query Subgraphs 4. Indexers - Grundvalen för blockkedjedata @@ -36,7 +36,7 @@ Fishermen and Arbitrators are also integral to the network's success through oth ## Delegators (Passively earn GRT) -Indexers are delegated GRT by Delegators, increasing the Indexer’s stake in subgraphs on the network. In return, Delegators earn a percentage of all query fees and indexing rewards from the Indexer. Each Indexer sets the cut that will be rewarded to Delegators independently, creating competition among Indexers to attract Delegators. Most Indexers offer between 9-12% annually. +Indexers are delegated GRT by Delegators, increasing the Indexer’s stake in Subgraphs on the network. In return, Delegators earn a percentage of all query fees and indexing rewards from the Indexer. Each Indexer sets the cut that will be rewarded to Delegators independently, creating competition among Indexers to attract Delegators. Most Indexers offer between 9-12% annually. For example, if a Delegator were to delegate 15k GRT to an Indexer offering 10%, the Delegator would receive ~1,500 GRT in rewards annually. @@ -46,25 +46,25 @@ If you're reading this, you're capable of becoming a Delegator right now by head ## Curators (Earn GRT) -Curators identify high-quality subgraphs and "curate" them (i.e., signal GRT on them) to earn curation shares, which guarantee a percentage of all future query fees generated by the subgraph. While any independent network participant can be a Curator, typically subgraph developers are among the first Curators for their own subgraphs because they want to ensure their subgraph is indexed. +Curators identify high-quality Subgraphs and "curate" them (i.e., signal GRT on them) to earn curation shares, which guarantee a percentage of all future query fees generated by the Subgraph. While any independent network participant can be a Curator, typically Subgraph developers are among the first Curators for their own Subgraphs because they want to ensure their Subgraph is indexed. -Subgraph developers are encouraged to curate their subgraph with at least 3,000 GRT. However, this number may be impacted by network activity and community participation. +Subgraph developers are encouraged to curate their Subgraph with at least 3,000 GRT. However, this number may be impacted by network activity and community participation. -Curators pay a 1% curation tax when they curate a new subgraph. This curation tax is burned, decreasing the supply of GRT. +Curators pay a 1% curation tax when they curate a new Subgraph. This curation tax is burned, decreasing the supply of GRT. ## Developers -Developers build and query subgraphs to retrieve blockchain data. Since subgraphs are open source, developers can query existing subgraphs to load blockchain data into their dapps. Developers pay for queries they make in GRT, which is distributed to network participants. +Developers build and query Subgraphs to retrieve blockchain data. Since Subgraphs are open source, developers can query existing Subgraphs to load blockchain data into their dapps. Developers pay for queries they make in GRT, which is distributed to network participants. -### Skapa en subgraf +### Creating a Subgraph -Developers can [create a subgraph](/developing/creating-a-subgraph/) to index data on the blockchain. Subgraphs are instructions for Indexers about which data should be served to consumers. +Developers can [create a Subgraph](/developing/creating-a-subgraph/) to index data on the blockchain. Subgraphs are instructions for Indexers about which data should be served to consumers. -Once developers have built and tested their subgraph, they can [publish their subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/) on The Graph's decentralized network. +Once developers have built and tested their Subgraph, they can [publish their Subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/) on The Graph's decentralized network. -### Fråga en befintlig subgraf +### Querying an existing Subgraph -Once a subgraph is [published](/subgraphs/developing/publishing/publishing-a-subgraph/) to The Graph's decentralized network, anyone can create an API key, add GRT to their billing balance, and query the subgraph. +Once a Subgraph is [published](/subgraphs/developing/publishing/publishing-a-subgraph/) to The Graph's decentralized network, anyone can create an API key, add GRT to their billing balance, and query the Subgraph. Subgraphs are [queried using GraphQL](/subgraphs/querying/introduction/), and the query fees are paid for with GRT in [Subgraph Studio](https://thegraph.com/studio/). Query fees are distributed to network participants based on their contributions to the protocol. @@ -72,27 +72,27 @@ Subgraphs are [queried using GraphQL](/subgraphs/querying/introduction/), and th ## Indexers (Earn GRT) -Indexers are the backbone of The Graph. They operate independent hardware and software powering The Graph’s decentralized network. Indexers serve data to consumers based on instructions from subgraphs. +Indexers are the backbone of The Graph. They operate independent hardware and software powering The Graph’s decentralized network. Indexers serve data to consumers based on instructions from Subgraphs. Indexers can earn GRT rewards in two ways: -1. **Query fees**: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). +1. **Query fees**: GRT paid by developers or users for Subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). -2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. +2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of Subgraphs they are indexing. These rewards incentivize Indexers to index Subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. -Each subgraph is allotted a portion of the total network token issuance, based on the amount of the subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the subgraph. +Each Subgraph is allotted a portion of the total network token issuance, based on the amount of the Subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the Subgraph. In order to run an indexing node, Indexers must self-stake 100,000 GRT or more with the network. Indexers are incentivized to self-stake GRT in proportion to the amount of queries they serve. -Indexers can increase their GRT allocations on subgraphs by accepting GRT delegation from Delegators, and they can accept up to 16 times their initial self-stake. If an Indexer becomes "over-delegated" (i.e., more than 16 times their initial self-stake), they will not be able to use the additional GRT from Delegators until they increase their self-stake in the network. +Indexers can increase their GRT allocations on Subgraphs by accepting GRT delegation from Delegators, and they can accept up to 16 times their initial self-stake. If an Indexer becomes "over-delegated" (i.e., more than 16 times their initial self-stake), they will not be able to use the additional GRT from Delegators until they increase their self-stake in the network. The amount of rewards an Indexer receives can vary based on the Indexer's self-stake, accepted delegation, quality of service, and many more factors. ## Token Supply: Burning & Issuance -The initial token supply is 10 billion GRT, with a target of 3% new issuance annually to reward Indexers for allocating stake on subgraphs. This means that the total supply of GRT tokens will increase by 3% each year as new tokens are issued to Indexers for their contribution to the network. +The initial token supply is 10 billion GRT, with a target of 3% new issuance annually to reward Indexers for allocating stake on Subgraphs. This means that the total supply of GRT tokens will increase by 3% each year as new tokens are issued to Indexers for their contribution to the network. -The Graph is designed with multiple burning mechanisms to offset new token issuance. Approximately 1% of the GRT supply is burned annually through various activities on the network, and this number has been increasing as network activity continues to grow. These burning activities include a 0.5% delegation tax whenever a Delegator delegates GRT to an Indexer, a 1% curation tax when Curators signal on a subgraph, and a 1% of query fees for blockchain data. +The Graph is designed with multiple burning mechanisms to offset new token issuance. Approximately 1% of the GRT supply is burned annually through various activities on the network, and this number has been increasing as network activity continues to grow. These burning activities include a 0.5% delegation tax whenever a Delegator delegates GRT to an Indexer, a 1% curation tax when Curators signal on a Subgraph, and a 1% of query fees for blockchain data. ![Total burned GRT](/img/total-burned-grt.jpeg) From 2092231e6fa1904e172af2d6353d48fec8a94363 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:17:48 -0500 Subject: [PATCH 0530/1789] New translations tokenomics.mdx (Turkish) --- website/src/pages/tr/resources/tokenomics.mdx | 40 +++++++++---------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/website/src/pages/tr/resources/tokenomics.mdx b/website/src/pages/tr/resources/tokenomics.mdx index ff09d144619c..61192bdf1366 100644 --- a/website/src/pages/tr/resources/tokenomics.mdx +++ b/website/src/pages/tr/resources/tokenomics.mdx @@ -6,7 +6,7 @@ description: The Graph Network is incentivized by powerful tokenomics. Here’s ## Genel Bakış -The Graph is a decentralized protocol that enables easy access to blockchain data. It indexes blockchain data similarly to how Google indexes the web. If you've used a dapp that retrieves data from a subgraph, you've probably interacted with The Graph. Today, thousands of [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem use The Graph. +The Graph is a decentralized protocol that enables easy access to blockchain data. It indexes blockchain data similarly to how Google indexes the web. If you've used a dapp that retrieves data from a Subgraph, you've probably interacted with The Graph. Today, thousands of [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem use The Graph. ## Ayrıntılar @@ -24,9 +24,9 @@ There are four primary network participants: 1. Delegators - Delegate GRT to Indexers & secure the network -2. Curators - Find the best subgraphs for Indexers +2. Curators - Find the best Subgraphs for Indexers -3. Developers - Build & query subgraphs +3. Developers - Build & query Subgraphs 4. Indexers - Backbone of blockchain data @@ -36,7 +36,7 @@ Fishermen and Arbitrators are also integral to the network's success through oth ## Delegators (Passively earn GRT) -Indexers are delegated GRT by Delegators, increasing the Indexer’s stake in subgraphs on the network. In return, Delegators earn a percentage of all query fees and indexing rewards from the Indexer. Each Indexer sets the cut that will be rewarded to Delegators independently, creating competition among Indexers to attract Delegators. Most Indexers offer between 9-12% annually. +Indexers are delegated GRT by Delegators, increasing the Indexer’s stake in Subgraphs on the network. In return, Delegators earn a percentage of all query fees and indexing rewards from the Indexer. Each Indexer sets the cut that will be rewarded to Delegators independently, creating competition among Indexers to attract Delegators. Most Indexers offer between 9-12% annually. For example, if a Delegator were to delegate 15k GRT to an Indexer offering 10%, the Delegator would receive ~1,500 GRT in rewards annually. @@ -46,25 +46,25 @@ If you're reading this, you're capable of becoming a Delegator right now by head ## Curators (Earn GRT) -Curators identify high-quality subgraphs and "curate" them (i.e., signal GRT on them) to earn curation shares, which guarantee a percentage of all future query fees generated by the subgraph. While any independent network participant can be a Curator, typically subgraph developers are among the first Curators for their own subgraphs because they want to ensure their subgraph is indexed. +Curators identify high-quality Subgraphs and "curate" them (i.e., signal GRT on them) to earn curation shares, which guarantee a percentage of all future query fees generated by the Subgraph. While any independent network participant can be a Curator, typically Subgraph developers are among the first Curators for their own Subgraphs because they want to ensure their Subgraph is indexed. -Subgraph developers are encouraged to curate their subgraph with at least 3,000 GRT. However, this number may be impacted by network activity and community participation. +Subgraph developers are encouraged to curate their Subgraph with at least 3,000 GRT. However, this number may be impacted by network activity and community participation. -Curators pay a 1% curation tax when they curate a new subgraph. This curation tax is burned, decreasing the supply of GRT. +Curators pay a 1% curation tax when they curate a new Subgraph. This curation tax is burned, decreasing the supply of GRT. ## Developers -Developers build and query subgraphs to retrieve blockchain data. Since subgraphs are open source, developers can query existing subgraphs to load blockchain data into their dapps. Developers pay for queries they make in GRT, which is distributed to network participants. +Developers build and query Subgraphs to retrieve blockchain data. Since Subgraphs are open source, developers can query existing Subgraphs to load blockchain data into their dapps. Developers pay for queries they make in GRT, which is distributed to network participants. -### Subgraph oluşturma +### Creating a Subgraph -Developers can [create a subgraph](/developing/creating-a-subgraph/) to index data on the blockchain. Subgraphs are instructions for Indexers about which data should be served to consumers. +Developers can [create a Subgraph](/developing/creating-a-subgraph/) to index data on the blockchain. Subgraphs are instructions for Indexers about which data should be served to consumers. -Once developers have built and tested their subgraph, they can [publish their subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/) on The Graph's decentralized network. +Once developers have built and tested their Subgraph, they can [publish their Subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/) on The Graph's decentralized network. -### Mevcut bir subgraph'ı sorgulama +### Querying an existing Subgraph -Once a subgraph is [published](/subgraphs/developing/publishing/publishing-a-subgraph/) to The Graph's decentralized network, anyone can create an API key, add GRT to their billing balance, and query the subgraph. +Once a Subgraph is [published](/subgraphs/developing/publishing/publishing-a-subgraph/) to The Graph's decentralized network, anyone can create an API key, add GRT to their billing balance, and query the Subgraph. Subgraphs are [queried using GraphQL](/subgraphs/querying/introduction/), and the query fees are paid for with GRT in [Subgraph Studio](https://thegraph.com/studio/). Query fees are distributed to network participants based on their contributions to the protocol. @@ -72,27 +72,27 @@ Subgraphs are [queried using GraphQL](/subgraphs/querying/introduction/), and th ## Indexers (Earn GRT) -Indexers are the backbone of The Graph. They operate independent hardware and software powering The Graph’s decentralized network. Indexers serve data to consumers based on instructions from subgraphs. +Indexers are the backbone of The Graph. They operate independent hardware and software powering The Graph’s decentralized network. Indexers serve data to consumers based on instructions from Subgraphs. Indexers can earn GRT rewards in two ways: -1. **Query fees**: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). +1. **Query fees**: GRT paid by developers or users for Subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). -2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. +2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of Subgraphs they are indexing. These rewards incentivize Indexers to index Subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. -Each subgraph is allotted a portion of the total network token issuance, based on the amount of the subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the subgraph. +Each Subgraph is allotted a portion of the total network token issuance, based on the amount of the Subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the Subgraph. In order to run an indexing node, Indexers must self-stake 100,000 GRT or more with the network. Indexers are incentivized to self-stake GRT in proportion to the amount of queries they serve. -Indexers can increase their GRT allocations on subgraphs by accepting GRT delegation from Delegators, and they can accept up to 16 times their initial self-stake. If an Indexer becomes "over-delegated" (i.e., more than 16 times their initial self-stake), they will not be able to use the additional GRT from Delegators until they increase their self-stake in the network. +Indexers can increase their GRT allocations on Subgraphs by accepting GRT delegation from Delegators, and they can accept up to 16 times their initial self-stake. If an Indexer becomes "over-delegated" (i.e., more than 16 times their initial self-stake), they will not be able to use the additional GRT from Delegators until they increase their self-stake in the network. The amount of rewards an Indexer receives can vary based on the Indexer's self-stake, accepted delegation, quality of service, and many more factors. ## Token Supply: Burning & Issuance -The initial token supply is 10 billion GRT, with a target of 3% new issuance annually to reward Indexers for allocating stake on subgraphs. This means that the total supply of GRT tokens will increase by 3% each year as new tokens are issued to Indexers for their contribution to the network. +The initial token supply is 10 billion GRT, with a target of 3% new issuance annually to reward Indexers for allocating stake on Subgraphs. This means that the total supply of GRT tokens will increase by 3% each year as new tokens are issued to Indexers for their contribution to the network. -The Graph is designed with multiple burning mechanisms to offset new token issuance. Approximately 1% of the GRT supply is burned annually through various activities on the network, and this number has been increasing as network activity continues to grow. These burning activities include a 0.5% delegation tax whenever a Delegator delegates GRT to an Indexer, a 1% curation tax when Curators signal on a subgraph, and a 1% of query fees for blockchain data. +The Graph is designed with multiple burning mechanisms to offset new token issuance. Approximately 1% of the GRT supply is burned annually through various activities on the network, and this number has been increasing as network activity continues to grow. These burning activities include a 0.5% delegation tax whenever a Delegator delegates GRT to an Indexer, a 1% curation tax when Curators signal on a Subgraph, and a 1% of query fees for blockchain data. ![Total burned GRT](/img/total-burned-grt.jpeg) From 58412729093f6b9353f66048f6e41264cf710b9b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:17:49 -0500 Subject: [PATCH 0531/1789] New translations tokenomics.mdx (Ukrainian) --- website/src/pages/uk/resources/tokenomics.mdx | 40 +++++++++---------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/website/src/pages/uk/resources/tokenomics.mdx b/website/src/pages/uk/resources/tokenomics.mdx index 709ebb3b40c0..0c58cbf44968 100644 --- a/website/src/pages/uk/resources/tokenomics.mdx +++ b/website/src/pages/uk/resources/tokenomics.mdx @@ -6,7 +6,7 @@ description: The Graph Network is incentivized by powerful tokenomics. Here’s ## Overview -The Graph is a decentralized protocol that enables easy access to blockchain data. It indexes blockchain data similarly to how Google indexes the web. If you've used a dapp that retrieves data from a subgraph, you've probably interacted with The Graph. Today, thousands of [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem use The Graph. +The Graph is a decentralized protocol that enables easy access to blockchain data. It indexes blockchain data similarly to how Google indexes the web. If you've used a dapp that retrieves data from a Subgraph, you've probably interacted with The Graph. Today, thousands of [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem use The Graph. ## Specifics @@ -24,9 +24,9 @@ There are four primary network participants: 1. Delegators - Delegate GRT to Indexers & secure the network -2. Куратори - знаходять найкращі підграфи для індексаторів +2. Curators - Find the best Subgraphs for Indexers -3. Developers - Build & query subgraphs +3. Developers - Build & query Subgraphs 4. Індексатори - кістяк блокчейн-даних @@ -36,7 +36,7 @@ Fishermen and Arbitrators are also integral to the network's success through oth ## Delegators (Passively earn GRT) -Indexers are delegated GRT by Delegators, increasing the Indexer’s stake in subgraphs on the network. In return, Delegators earn a percentage of all query fees and indexing rewards from the Indexer. Each Indexer sets the cut that will be rewarded to Delegators independently, creating competition among Indexers to attract Delegators. Most Indexers offer between 9-12% annually. +Indexers are delegated GRT by Delegators, increasing the Indexer’s stake in Subgraphs on the network. In return, Delegators earn a percentage of all query fees and indexing rewards from the Indexer. Each Indexer sets the cut that will be rewarded to Delegators independently, creating competition among Indexers to attract Delegators. Most Indexers offer between 9-12% annually. For example, if a Delegator were to delegate 15k GRT to an Indexer offering 10%, the Delegator would receive ~1,500 GRT in rewards annually. @@ -46,25 +46,25 @@ If you're reading this, you're capable of becoming a Delegator right now by head ## Curators (Earn GRT) -Curators identify high-quality subgraphs and "curate" them (i.e., signal GRT on them) to earn curation shares, which guarantee a percentage of all future query fees generated by the subgraph. While any independent network participant can be a Curator, typically subgraph developers are among the first Curators for their own subgraphs because they want to ensure their subgraph is indexed. +Curators identify high-quality Subgraphs and "curate" them (i.e., signal GRT on them) to earn curation shares, which guarantee a percentage of all future query fees generated by the Subgraph. While any independent network participant can be a Curator, typically Subgraph developers are among the first Curators for their own Subgraphs because they want to ensure their Subgraph is indexed. -Subgraph developers are encouraged to curate their subgraph with at least 3,000 GRT. However, this number may be impacted by network activity and community participation. +Subgraph developers are encouraged to curate their Subgraph with at least 3,000 GRT. However, this number may be impacted by network activity and community participation. -Curators pay a 1% curation tax when they curate a new subgraph. This curation tax is burned, decreasing the supply of GRT. +Curators pay a 1% curation tax when they curate a new Subgraph. This curation tax is burned, decreasing the supply of GRT. ## Developers -Developers build and query subgraphs to retrieve blockchain data. Since subgraphs are open source, developers can query existing subgraphs to load blockchain data into their dapps. Developers pay for queries they make in GRT, which is distributed to network participants. +Developers build and query Subgraphs to retrieve blockchain data. Since Subgraphs are open source, developers can query existing Subgraphs to load blockchain data into their dapps. Developers pay for queries they make in GRT, which is distributed to network participants. -### Creating a subgraph +### Creating a Subgraph -Developers can [create a subgraph](/developing/creating-a-subgraph/) to index data on the blockchain. Subgraphs are instructions for Indexers about which data should be served to consumers. +Developers can [create a Subgraph](/developing/creating-a-subgraph/) to index data on the blockchain. Subgraphs are instructions for Indexers about which data should be served to consumers. -Once developers have built and tested their subgraph, they can [publish their subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/) on The Graph's decentralized network. +Once developers have built and tested their Subgraph, they can [publish their Subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/) on The Graph's decentralized network. -### Querying an existing subgraph +### Querying an existing Subgraph -Once a subgraph is [published](/subgraphs/developing/publishing/publishing-a-subgraph/) to The Graph's decentralized network, anyone can create an API key, add GRT to their billing balance, and query the subgraph. +Once a Subgraph is [published](/subgraphs/developing/publishing/publishing-a-subgraph/) to The Graph's decentralized network, anyone can create an API key, add GRT to their billing balance, and query the Subgraph. Subgraphs are [queried using GraphQL](/subgraphs/querying/introduction/), and the query fees are paid for with GRT in [Subgraph Studio](https://thegraph.com/studio/). Query fees are distributed to network participants based on their contributions to the protocol. @@ -72,27 +72,27 @@ Subgraphs are [queried using GraphQL](/subgraphs/querying/introduction/), and th ## Indexers (Earn GRT) -Indexers are the backbone of The Graph. They operate independent hardware and software powering The Graph’s decentralized network. Indexers serve data to consumers based on instructions from subgraphs. +Indexers are the backbone of The Graph. They operate independent hardware and software powering The Graph’s decentralized network. Indexers serve data to consumers based on instructions from Subgraphs. Indexers can earn GRT rewards in two ways: -1. **Query fees**: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). +1. **Query fees**: GRT paid by developers or users for Subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). -2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. +2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of Subgraphs they are indexing. These rewards incentivize Indexers to index Subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. -Each subgraph is allotted a portion of the total network token issuance, based on the amount of the subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the subgraph. +Each Subgraph is allotted a portion of the total network token issuance, based on the amount of the Subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the Subgraph. In order to run an indexing node, Indexers must self-stake 100,000 GRT or more with the network. Indexers are incentivized to self-stake GRT in proportion to the amount of queries they serve. -Indexers can increase their GRT allocations on subgraphs by accepting GRT delegation from Delegators, and they can accept up to 16 times their initial self-stake. If an Indexer becomes "over-delegated" (i.e., more than 16 times their initial self-stake), they will not be able to use the additional GRT from Delegators until they increase their self-stake in the network. +Indexers can increase their GRT allocations on Subgraphs by accepting GRT delegation from Delegators, and they can accept up to 16 times their initial self-stake. If an Indexer becomes "over-delegated" (i.e., more than 16 times their initial self-stake), they will not be able to use the additional GRT from Delegators until they increase their self-stake in the network. The amount of rewards an Indexer receives can vary based on the Indexer's self-stake, accepted delegation, quality of service, and many more factors. ## Token Supply: Burning & Issuance -The initial token supply is 10 billion GRT, with a target of 3% new issuance annually to reward Indexers for allocating stake on subgraphs. This means that the total supply of GRT tokens will increase by 3% each year as new tokens are issued to Indexers for their contribution to the network. +The initial token supply is 10 billion GRT, with a target of 3% new issuance annually to reward Indexers for allocating stake on Subgraphs. This means that the total supply of GRT tokens will increase by 3% each year as new tokens are issued to Indexers for their contribution to the network. -The Graph is designed with multiple burning mechanisms to offset new token issuance. Approximately 1% of the GRT supply is burned annually through various activities on the network, and this number has been increasing as network activity continues to grow. These burning activities include a 0.5% delegation tax whenever a Delegator delegates GRT to an Indexer, a 1% curation tax when Curators signal on a subgraph, and a 1% of query fees for blockchain data. +The Graph is designed with multiple burning mechanisms to offset new token issuance. Approximately 1% of the GRT supply is burned annually through various activities on the network, and this number has been increasing as network activity continues to grow. These burning activities include a 0.5% delegation tax whenever a Delegator delegates GRT to an Indexer, a 1% curation tax when Curators signal on a Subgraph, and a 1% of query fees for blockchain data. ![Total burned GRT](/img/total-burned-grt.jpeg) From 8ec23a43a72960c4ed4cac4697cdd12da073680f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:17:50 -0500 Subject: [PATCH 0532/1789] New translations tokenomics.mdx (Chinese Simplified) --- website/src/pages/zh/resources/tokenomics.mdx | 42 +++++++++---------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/website/src/pages/zh/resources/tokenomics.mdx b/website/src/pages/zh/resources/tokenomics.mdx index c9062327aa5d..1bf6f61b9539 100644 --- a/website/src/pages/zh/resources/tokenomics.mdx +++ b/website/src/pages/zh/resources/tokenomics.mdx @@ -4,9 +4,9 @@ sidebarTitle: Tokenomics description: The Graph Network is incentivized by powerful tokenomics. Here’s how GRT, The Graph’s native work utility token, works. --- -## 概述 +## Overview -The Graph is a decentralized protocol that enables easy access to blockchain data. It indexes blockchain data similarly to how Google indexes the web. If you've used a dapp that retrieves data from a subgraph, you've probably interacted with The Graph. Today, thousands of [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem use The Graph. +The Graph is a decentralized protocol that enables easy access to blockchain data. It indexes blockchain data similarly to how Google indexes the web. If you've used a dapp that retrieves data from a Subgraph, you've probably interacted with The Graph. Today, thousands of [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem use The Graph. ## Specifics @@ -24,9 +24,9 @@ There are four primary network participants: 1. Delegators - Delegate GRT to Indexers & secure the network -2. 策展人-为索引人找到最佳子图 +2. Curators - Find the best Subgraphs for Indexers -3. Developers - Build & query subgraphs +3. Developers - Build & query Subgraphs 4. 索引人-区块链数据的主干 @@ -36,7 +36,7 @@ Fishermen and Arbitrators are also integral to the network's success through oth ## Delegators (Passively earn GRT) -Indexers are delegated GRT by Delegators, increasing the Indexer’s stake in subgraphs on the network. In return, Delegators earn a percentage of all query fees and indexing rewards from the Indexer. Each Indexer sets the cut that will be rewarded to Delegators independently, creating competition among Indexers to attract Delegators. Most Indexers offer between 9-12% annually. +Indexers are delegated GRT by Delegators, increasing the Indexer’s stake in Subgraphs on the network. In return, Delegators earn a percentage of all query fees and indexing rewards from the Indexer. Each Indexer sets the cut that will be rewarded to Delegators independently, creating competition among Indexers to attract Delegators. Most Indexers offer between 9-12% annually. For example, if a Delegator were to delegate 15k GRT to an Indexer offering 10%, the Delegator would receive ~1,500 GRT in rewards annually. @@ -46,25 +46,25 @@ If you're reading this, you're capable of becoming a Delegator right now by head ## Curators (Earn GRT) -Curators identify high-quality subgraphs and "curate" them (i.e., signal GRT on them) to earn curation shares, which guarantee a percentage of all future query fees generated by the subgraph. While any independent network participant can be a Curator, typically subgraph developers are among the first Curators for their own subgraphs because they want to ensure their subgraph is indexed. +Curators identify high-quality Subgraphs and "curate" them (i.e., signal GRT on them) to earn curation shares, which guarantee a percentage of all future query fees generated by the Subgraph. While any independent network participant can be a Curator, typically Subgraph developers are among the first Curators for their own Subgraphs because they want to ensure their Subgraph is indexed. -Subgraph developers are encouraged to curate their subgraph with at least 3,000 GRT. However, this number may be impacted by network activity and community participation. +Subgraph developers are encouraged to curate their Subgraph with at least 3,000 GRT. However, this number may be impacted by network activity and community participation. -Curators pay a 1% curation tax when they curate a new subgraph. This curation tax is burned, decreasing the supply of GRT. +Curators pay a 1% curation tax when they curate a new Subgraph. This curation tax is burned, decreasing the supply of GRT. ## 开发人员 -Developers build and query subgraphs to retrieve blockchain data. Since subgraphs are open source, developers can query existing subgraphs to load blockchain data into their dapps. Developers pay for queries they make in GRT, which is distributed to network participants. +Developers build and query Subgraphs to retrieve blockchain data. Since Subgraphs are open source, developers can query existing Subgraphs to load blockchain data into their dapps. Developers pay for queries they make in GRT, which is distributed to network participants. -### 创建子图 +### Creating a Subgraph -Developers can [create a subgraph](/developing/creating-a-subgraph/) to index data on the blockchain. Subgraphs are instructions for Indexers about which data should be served to consumers. +Developers can [create a Subgraph](/developing/creating-a-subgraph/) to index data on the blockchain. Subgraphs are instructions for Indexers about which data should be served to consumers. -Once developers have built and tested their subgraph, they can [publish their subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/) on The Graph's decentralized network. +Once developers have built and tested their Subgraph, they can [publish their Subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/) on The Graph's decentralized network. -### 查询现存子图 +### Querying an existing Subgraph -Once a subgraph is [published](/subgraphs/developing/publishing/publishing-a-subgraph/) to The Graph's decentralized network, anyone can create an API key, add GRT to their billing balance, and query the subgraph. +Once a Subgraph is [published](/subgraphs/developing/publishing/publishing-a-subgraph/) to The Graph's decentralized network, anyone can create an API key, add GRT to their billing balance, and query the Subgraph. Subgraphs are [queried using GraphQL](/subgraphs/querying/introduction/), and the query fees are paid for with GRT in [Subgraph Studio](https://thegraph.com/studio/). Query fees are distributed to network participants based on their contributions to the protocol. @@ -72,27 +72,27 @@ Subgraphs are [queried using GraphQL](/subgraphs/querying/introduction/), and th ## Indexers (Earn GRT) -Indexers are the backbone of The Graph. They operate independent hardware and software powering The Graph’s decentralized network. Indexers serve data to consumers based on instructions from subgraphs. +Indexers are the backbone of The Graph. They operate independent hardware and software powering The Graph’s decentralized network. Indexers serve data to consumers based on instructions from Subgraphs. Indexers can earn GRT rewards in two ways: -1. **Query fees**: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). +1. **Query fees**: GRT paid by developers or users for Subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). -2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. +2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of Subgraphs they are indexing. These rewards incentivize Indexers to index Subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. -Each subgraph is allotted a portion of the total network token issuance, based on the amount of the subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the subgraph. +Each Subgraph is allotted a portion of the total network token issuance, based on the amount of the Subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the Subgraph. In order to run an indexing node, Indexers must self-stake 100,000 GRT or more with the network. Indexers are incentivized to self-stake GRT in proportion to the amount of queries they serve. -Indexers can increase their GRT allocations on subgraphs by accepting GRT delegation from Delegators, and they can accept up to 16 times their initial self-stake. If an Indexer becomes "over-delegated" (i.e., more than 16 times their initial self-stake), they will not be able to use the additional GRT from Delegators until they increase their self-stake in the network. +Indexers can increase their GRT allocations on Subgraphs by accepting GRT delegation from Delegators, and they can accept up to 16 times their initial self-stake. If an Indexer becomes "over-delegated" (i.e., more than 16 times their initial self-stake), they will not be able to use the additional GRT from Delegators until they increase their self-stake in the network. The amount of rewards an Indexer receives can vary based on the Indexer's self-stake, accepted delegation, quality of service, and many more factors. ## Token Supply: Burning & Issuance -The initial token supply is 10 billion GRT, with a target of 3% new issuance annually to reward Indexers for allocating stake on subgraphs. This means that the total supply of GRT tokens will increase by 3% each year as new tokens are issued to Indexers for their contribution to the network. +The initial token supply is 10 billion GRT, with a target of 3% new issuance annually to reward Indexers for allocating stake on Subgraphs. This means that the total supply of GRT tokens will increase by 3% each year as new tokens are issued to Indexers for their contribution to the network. -The Graph is designed with multiple burning mechanisms to offset new token issuance. Approximately 1% of the GRT supply is burned annually through various activities on the network, and this number has been increasing as network activity continues to grow. These burning activities include a 0.5% delegation tax whenever a Delegator delegates GRT to an Indexer, a 1% curation tax when Curators signal on a subgraph, and a 1% of query fees for blockchain data. +The Graph is designed with multiple burning mechanisms to offset new token issuance. Approximately 1% of the GRT supply is burned annually through various activities on the network, and this number has been increasing as network activity continues to grow. These burning activities include a 0.5% delegation tax whenever a Delegator delegates GRT to an Indexer, a 1% curation tax when Curators signal on a Subgraph, and a 1% of query fees for blockchain data. ![Total burned GRT](/img/total-burned-grt.jpeg) From be4b7ab47e8461e048868d2437bdb2bd9338c6e8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:17:51 -0500 Subject: [PATCH 0533/1789] New translations tokenomics.mdx (Urdu (Pakistan)) --- website/src/pages/ur/resources/tokenomics.mdx | 40 +++++++++---------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/website/src/pages/ur/resources/tokenomics.mdx b/website/src/pages/ur/resources/tokenomics.mdx index 269dfc583951..92304bab81c9 100644 --- a/website/src/pages/ur/resources/tokenomics.mdx +++ b/website/src/pages/ur/resources/tokenomics.mdx @@ -6,7 +6,7 @@ description: The Graph Network is incentivized by powerful tokenomics. Here’s ## جائزہ -The Graph is a decentralized protocol that enables easy access to blockchain data. It indexes blockchain data similarly to how Google indexes the web. If you've used a dapp that retrieves data from a subgraph, you've probably interacted with The Graph. Today, thousands of [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem use The Graph. +The Graph is a decentralized protocol that enables easy access to blockchain data. It indexes blockchain data similarly to how Google indexes the web. If you've used a dapp that retrieves data from a Subgraph, you've probably interacted with The Graph. Today, thousands of [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem use The Graph. ## Specifics @@ -24,9 +24,9 @@ There are four primary network participants: 1. Delegators - Delegate GRT to Indexers & secure the network -2. کیوریٹرز - انڈیکسرز کے لیے بہترین سب گراف تلاش کریں +2. Curators - Find the best Subgraphs for Indexers -3. Developers - Build & query subgraphs +3. Developers - Build & query Subgraphs 4. انڈیکسرز - بلاکچین ڈیٹا کی ریڑھ کی ہڈی @@ -36,7 +36,7 @@ Fishermen and Arbitrators are also integral to the network's success through oth ## Delegators (Passively earn GRT) -Indexers are delegated GRT by Delegators, increasing the Indexer’s stake in subgraphs on the network. In return, Delegators earn a percentage of all query fees and indexing rewards from the Indexer. Each Indexer sets the cut that will be rewarded to Delegators independently, creating competition among Indexers to attract Delegators. Most Indexers offer between 9-12% annually. +Indexers are delegated GRT by Delegators, increasing the Indexer’s stake in Subgraphs on the network. In return, Delegators earn a percentage of all query fees and indexing rewards from the Indexer. Each Indexer sets the cut that will be rewarded to Delegators independently, creating competition among Indexers to attract Delegators. Most Indexers offer between 9-12% annually. For example, if a Delegator were to delegate 15k GRT to an Indexer offering 10%, the Delegator would receive ~1,500 GRT in rewards annually. @@ -46,25 +46,25 @@ If you're reading this, you're capable of becoming a Delegator right now by head ## Curators (Earn GRT) -Curators identify high-quality subgraphs and "curate" them (i.e., signal GRT on them) to earn curation shares, which guarantee a percentage of all future query fees generated by the subgraph. While any independent network participant can be a Curator, typically subgraph developers are among the first Curators for their own subgraphs because they want to ensure their subgraph is indexed. +Curators identify high-quality Subgraphs and "curate" them (i.e., signal GRT on them) to earn curation shares, which guarantee a percentage of all future query fees generated by the Subgraph. While any independent network participant can be a Curator, typically Subgraph developers are among the first Curators for their own Subgraphs because they want to ensure their Subgraph is indexed. -Subgraph developers are encouraged to curate their subgraph with at least 3,000 GRT. However, this number may be impacted by network activity and community participation. +Subgraph developers are encouraged to curate their Subgraph with at least 3,000 GRT. However, this number may be impacted by network activity and community participation. -Curators pay a 1% curation tax when they curate a new subgraph. This curation tax is burned, decreasing the supply of GRT. +Curators pay a 1% curation tax when they curate a new Subgraph. This curation tax is burned, decreasing the supply of GRT. ## Developers -Developers build and query subgraphs to retrieve blockchain data. Since subgraphs are open source, developers can query existing subgraphs to load blockchain data into their dapps. Developers pay for queries they make in GRT, which is distributed to network participants. +Developers build and query Subgraphs to retrieve blockchain data. Since Subgraphs are open source, developers can query existing Subgraphs to load blockchain data into their dapps. Developers pay for queries they make in GRT, which is distributed to network participants. -### سب گراف بنائیں +### Creating a Subgraph -Developers can [create a subgraph](/developing/creating-a-subgraph/) to index data on the blockchain. Subgraphs are instructions for Indexers about which data should be served to consumers. +Developers can [create a Subgraph](/developing/creating-a-subgraph/) to index data on the blockchain. Subgraphs are instructions for Indexers about which data should be served to consumers. -Once developers have built and tested their subgraph, they can [publish their subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/) on The Graph's decentralized network. +Once developers have built and tested their Subgraph, they can [publish their Subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/) on The Graph's decentralized network. -### موجودہ سب گراف کو کیوری کریں +### Querying an existing Subgraph -Once a subgraph is [published](/subgraphs/developing/publishing/publishing-a-subgraph/) to The Graph's decentralized network, anyone can create an API key, add GRT to their billing balance, and query the subgraph. +Once a Subgraph is [published](/subgraphs/developing/publishing/publishing-a-subgraph/) to The Graph's decentralized network, anyone can create an API key, add GRT to their billing balance, and query the Subgraph. Subgraphs are [queried using GraphQL](/subgraphs/querying/introduction/), and the query fees are paid for with GRT in [Subgraph Studio](https://thegraph.com/studio/). Query fees are distributed to network participants based on their contributions to the protocol. @@ -72,27 +72,27 @@ Subgraphs are [queried using GraphQL](/subgraphs/querying/introduction/), and th ## Indexers (Earn GRT) -Indexers are the backbone of The Graph. They operate independent hardware and software powering The Graph’s decentralized network. Indexers serve data to consumers based on instructions from subgraphs. +Indexers are the backbone of The Graph. They operate independent hardware and software powering The Graph’s decentralized network. Indexers serve data to consumers based on instructions from Subgraphs. Indexers can earn GRT rewards in two ways: -1. **Query fees**: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). +1. **Query fees**: GRT paid by developers or users for Subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). -2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. +2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of Subgraphs they are indexing. These rewards incentivize Indexers to index Subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. -Each subgraph is allotted a portion of the total network token issuance, based on the amount of the subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the subgraph. +Each Subgraph is allotted a portion of the total network token issuance, based on the amount of the Subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the Subgraph. In order to run an indexing node, Indexers must self-stake 100,000 GRT or more with the network. Indexers are incentivized to self-stake GRT in proportion to the amount of queries they serve. -Indexers can increase their GRT allocations on subgraphs by accepting GRT delegation from Delegators, and they can accept up to 16 times their initial self-stake. If an Indexer becomes "over-delegated" (i.e., more than 16 times their initial self-stake), they will not be able to use the additional GRT from Delegators until they increase their self-stake in the network. +Indexers can increase their GRT allocations on Subgraphs by accepting GRT delegation from Delegators, and they can accept up to 16 times their initial self-stake. If an Indexer becomes "over-delegated" (i.e., more than 16 times their initial self-stake), they will not be able to use the additional GRT from Delegators until they increase their self-stake in the network. The amount of rewards an Indexer receives can vary based on the Indexer's self-stake, accepted delegation, quality of service, and many more factors. ## Token Supply: Burning & Issuance -The initial token supply is 10 billion GRT, with a target of 3% new issuance annually to reward Indexers for allocating stake on subgraphs. This means that the total supply of GRT tokens will increase by 3% each year as new tokens are issued to Indexers for their contribution to the network. +The initial token supply is 10 billion GRT, with a target of 3% new issuance annually to reward Indexers for allocating stake on Subgraphs. This means that the total supply of GRT tokens will increase by 3% each year as new tokens are issued to Indexers for their contribution to the network. -The Graph is designed with multiple burning mechanisms to offset new token issuance. Approximately 1% of the GRT supply is burned annually through various activities on the network, and this number has been increasing as network activity continues to grow. These burning activities include a 0.5% delegation tax whenever a Delegator delegates GRT to an Indexer, a 1% curation tax when Curators signal on a subgraph, and a 1% of query fees for blockchain data. +The Graph is designed with multiple burning mechanisms to offset new token issuance. Approximately 1% of the GRT supply is burned annually through various activities on the network, and this number has been increasing as network activity continues to grow. These burning activities include a 0.5% delegation tax whenever a Delegator delegates GRT to an Indexer, a 1% curation tax when Curators signal on a Subgraph, and a 1% of query fees for blockchain data. ![Total burned GRT](/img/total-burned-grt.jpeg) From e1d47e8c4df1624d1baa3a4f807cd3fb46d02d89 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:17:52 -0500 Subject: [PATCH 0534/1789] New translations tokenomics.mdx (Vietnamese) --- website/src/pages/vi/resources/tokenomics.mdx | 40 +++++++++---------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/website/src/pages/vi/resources/tokenomics.mdx b/website/src/pages/vi/resources/tokenomics.mdx index 4b1d2516879a..b7e29f27647b 100644 --- a/website/src/pages/vi/resources/tokenomics.mdx +++ b/website/src/pages/vi/resources/tokenomics.mdx @@ -6,7 +6,7 @@ description: The Graph Network is incentivized by powerful tokenomics. Here’s ## Tổng quan -The Graph is a decentralized protocol that enables easy access to blockchain data. It indexes blockchain data similarly to how Google indexes the web. If you've used a dapp that retrieves data from a subgraph, you've probably interacted with The Graph. Today, thousands of [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem use The Graph. +The Graph is a decentralized protocol that enables easy access to blockchain data. It indexes blockchain data similarly to how Google indexes the web. If you've used a dapp that retrieves data from a Subgraph, you've probably interacted with The Graph. Today, thousands of [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem use The Graph. ## Specifics @@ -24,9 +24,9 @@ There are four primary network participants: 1. Delegators - Delegate GRT to Indexers & secure the network -2. Curators - Find the best subgraphs for Indexers +2. Curators - Find the best Subgraphs for Indexers -3. Developers - Build & query subgraphs +3. Developers - Build & query Subgraphs 4. Indexers - Backbone of blockchain data @@ -36,7 +36,7 @@ Fishermen and Arbitrators are also integral to the network's success through oth ## Delegators (Passively earn GRT) -Indexers are delegated GRT by Delegators, increasing the Indexer’s stake in subgraphs on the network. In return, Delegators earn a percentage of all query fees and indexing rewards from the Indexer. Each Indexer sets the cut that will be rewarded to Delegators independently, creating competition among Indexers to attract Delegators. Most Indexers offer between 9-12% annually. +Indexers are delegated GRT by Delegators, increasing the Indexer’s stake in Subgraphs on the network. In return, Delegators earn a percentage of all query fees and indexing rewards from the Indexer. Each Indexer sets the cut that will be rewarded to Delegators independently, creating competition among Indexers to attract Delegators. Most Indexers offer between 9-12% annually. For example, if a Delegator were to delegate 15k GRT to an Indexer offering 10%, the Delegator would receive ~1,500 GRT in rewards annually. @@ -46,25 +46,25 @@ If you're reading this, you're capable of becoming a Delegator right now by head ## Curators (Earn GRT) -Curators identify high-quality subgraphs and "curate" them (i.e., signal GRT on them) to earn curation shares, which guarantee a percentage of all future query fees generated by the subgraph. While any independent network participant can be a Curator, typically subgraph developers are among the first Curators for their own subgraphs because they want to ensure their subgraph is indexed. +Curators identify high-quality Subgraphs and "curate" them (i.e., signal GRT on them) to earn curation shares, which guarantee a percentage of all future query fees generated by the Subgraph. While any independent network participant can be a Curator, typically Subgraph developers are among the first Curators for their own Subgraphs because they want to ensure their Subgraph is indexed. -Subgraph developers are encouraged to curate their subgraph with at least 3,000 GRT. However, this number may be impacted by network activity and community participation. +Subgraph developers are encouraged to curate their Subgraph with at least 3,000 GRT. However, this number may be impacted by network activity and community participation. -Curators pay a 1% curation tax when they curate a new subgraph. This curation tax is burned, decreasing the supply of GRT. +Curators pay a 1% curation tax when they curate a new Subgraph. This curation tax is burned, decreasing the supply of GRT. ## Developers -Developers build and query subgraphs to retrieve blockchain data. Since subgraphs are open source, developers can query existing subgraphs to load blockchain data into their dapps. Developers pay for queries they make in GRT, which is distributed to network participants. +Developers build and query Subgraphs to retrieve blockchain data. Since Subgraphs are open source, developers can query existing Subgraphs to load blockchain data into their dapps. Developers pay for queries they make in GRT, which is distributed to network participants. -### Creating a subgraph +### Creating a Subgraph -Developers can [create a subgraph](/developing/creating-a-subgraph/) to index data on the blockchain. Subgraphs are instructions for Indexers about which data should be served to consumers. +Developers can [create a Subgraph](/developing/creating-a-subgraph/) to index data on the blockchain. Subgraphs are instructions for Indexers about which data should be served to consumers. -Once developers have built and tested their subgraph, they can [publish their subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/) on The Graph's decentralized network. +Once developers have built and tested their Subgraph, they can [publish their Subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/) on The Graph's decentralized network. -### Querying an existing subgraph +### Querying an existing Subgraph -Once a subgraph is [published](/subgraphs/developing/publishing/publishing-a-subgraph/) to The Graph's decentralized network, anyone can create an API key, add GRT to their billing balance, and query the subgraph. +Once a Subgraph is [published](/subgraphs/developing/publishing/publishing-a-subgraph/) to The Graph's decentralized network, anyone can create an API key, add GRT to their billing balance, and query the Subgraph. Subgraphs are [queried using GraphQL](/subgraphs/querying/introduction/), and the query fees are paid for with GRT in [Subgraph Studio](https://thegraph.com/studio/). Query fees are distributed to network participants based on their contributions to the protocol. @@ -72,27 +72,27 @@ Subgraphs are [queried using GraphQL](/subgraphs/querying/introduction/), and th ## Indexers (Earn GRT) -Indexers are the backbone of The Graph. They operate independent hardware and software powering The Graph’s decentralized network. Indexers serve data to consumers based on instructions from subgraphs. +Indexers are the backbone of The Graph. They operate independent hardware and software powering The Graph’s decentralized network. Indexers serve data to consumers based on instructions from Subgraphs. Indexers can earn GRT rewards in two ways: -1. **Query fees**: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). +1. **Query fees**: GRT paid by developers or users for Subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). -2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. +2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of Subgraphs they are indexing. These rewards incentivize Indexers to index Subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. -Each subgraph is allotted a portion of the total network token issuance, based on the amount of the subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the subgraph. +Each Subgraph is allotted a portion of the total network token issuance, based on the amount of the Subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the Subgraph. In order to run an indexing node, Indexers must self-stake 100,000 GRT or more with the network. Indexers are incentivized to self-stake GRT in proportion to the amount of queries they serve. -Indexers can increase their GRT allocations on subgraphs by accepting GRT delegation from Delegators, and they can accept up to 16 times their initial self-stake. If an Indexer becomes "over-delegated" (i.e., more than 16 times their initial self-stake), they will not be able to use the additional GRT from Delegators until they increase their self-stake in the network. +Indexers can increase their GRT allocations on Subgraphs by accepting GRT delegation from Delegators, and they can accept up to 16 times their initial self-stake. If an Indexer becomes "over-delegated" (i.e., more than 16 times their initial self-stake), they will not be able to use the additional GRT from Delegators until they increase their self-stake in the network. The amount of rewards an Indexer receives can vary based on the Indexer's self-stake, accepted delegation, quality of service, and many more factors. ## Token Supply: Burning & Issuance -The initial token supply is 10 billion GRT, with a target of 3% new issuance annually to reward Indexers for allocating stake on subgraphs. This means that the total supply of GRT tokens will increase by 3% each year as new tokens are issued to Indexers for their contribution to the network. +The initial token supply is 10 billion GRT, with a target of 3% new issuance annually to reward Indexers for allocating stake on Subgraphs. This means that the total supply of GRT tokens will increase by 3% each year as new tokens are issued to Indexers for their contribution to the network. -The Graph is designed with multiple burning mechanisms to offset new token issuance. Approximately 1% of the GRT supply is burned annually through various activities on the network, and this number has been increasing as network activity continues to grow. These burning activities include a 0.5% delegation tax whenever a Delegator delegates GRT to an Indexer, a 1% curation tax when Curators signal on a subgraph, and a 1% of query fees for blockchain data. +The Graph is designed with multiple burning mechanisms to offset new token issuance. Approximately 1% of the GRT supply is burned annually through various activities on the network, and this number has been increasing as network activity continues to grow. These burning activities include a 0.5% delegation tax whenever a Delegator delegates GRT to an Indexer, a 1% curation tax when Curators signal on a Subgraph, and a 1% of query fees for blockchain data. ![Total burned GRT](/img/total-burned-grt.jpeg) From b28807e77169766b84781ae8add2f5c6aa02441f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:17:53 -0500 Subject: [PATCH 0535/1789] New translations tokenomics.mdx (Marathi) --- website/src/pages/mr/resources/tokenomics.mdx | 40 +++++++++---------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/website/src/pages/mr/resources/tokenomics.mdx b/website/src/pages/mr/resources/tokenomics.mdx index 0fe45e9d9969..168cbea5509b 100644 --- a/website/src/pages/mr/resources/tokenomics.mdx +++ b/website/src/pages/mr/resources/tokenomics.mdx @@ -6,7 +6,7 @@ description: The Graph Network is incentivized by powerful tokenomics. Here’s ## सविश्लेषण -The Graph is a decentralized protocol that enables easy access to blockchain data. It indexes blockchain data similarly to how Google indexes the web. If you've used a dapp that retrieves data from a subgraph, you've probably interacted with The Graph. Today, thousands of [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem use The Graph. +The Graph is a decentralized protocol that enables easy access to blockchain data. It indexes blockchain data similarly to how Google indexes the web. If you've used a dapp that retrieves data from a Subgraph, you've probably interacted with The Graph. Today, thousands of [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem use The Graph. ## Specifics @@ -24,9 +24,9 @@ There are four primary network participants: 1. Delegators - Delegate GRT to Indexers & secure the network -2. क्युरेटर - इंडेक्सर्ससाठी सर्वोत्तम सबग्राफ शोधा +2. Curators - Find the best Subgraphs for Indexers -3. Developers - Build & query subgraphs +3. Developers - Build & query Subgraphs 4. इंडेक्सर्स - ब्लॉकचेन डेटाचा कणा @@ -36,7 +36,7 @@ Fishermen and Arbitrators are also integral to the network's success through oth ## Delegators (Passively earn GRT) -Indexers are delegated GRT by Delegators, increasing the Indexer’s stake in subgraphs on the network. In return, Delegators earn a percentage of all query fees and indexing rewards from the Indexer. Each Indexer sets the cut that will be rewarded to Delegators independently, creating competition among Indexers to attract Delegators. Most Indexers offer between 9-12% annually. +Indexers are delegated GRT by Delegators, increasing the Indexer’s stake in Subgraphs on the network. In return, Delegators earn a percentage of all query fees and indexing rewards from the Indexer. Each Indexer sets the cut that will be rewarded to Delegators independently, creating competition among Indexers to attract Delegators. Most Indexers offer between 9-12% annually. For example, if a Delegator were to delegate 15k GRT to an Indexer offering 10%, the Delegator would receive ~1,500 GRT in rewards annually. @@ -46,25 +46,25 @@ If you're reading this, you're capable of becoming a Delegator right now by head ## Curators (Earn GRT) -Curators identify high-quality subgraphs and "curate" them (i.e., signal GRT on them) to earn curation shares, which guarantee a percentage of all future query fees generated by the subgraph. While any independent network participant can be a Curator, typically subgraph developers are among the first Curators for their own subgraphs because they want to ensure their subgraph is indexed. +Curators identify high-quality Subgraphs and "curate" them (i.e., signal GRT on them) to earn curation shares, which guarantee a percentage of all future query fees generated by the Subgraph. While any independent network participant can be a Curator, typically Subgraph developers are among the first Curators for their own Subgraphs because they want to ensure their Subgraph is indexed. -Subgraph developers are encouraged to curate their subgraph with at least 3,000 GRT. However, this number may be impacted by network activity and community participation. +Subgraph developers are encouraged to curate their Subgraph with at least 3,000 GRT. However, this number may be impacted by network activity and community participation. -Curators pay a 1% curation tax when they curate a new subgraph. This curation tax is burned, decreasing the supply of GRT. +Curators pay a 1% curation tax when they curate a new Subgraph. This curation tax is burned, decreasing the supply of GRT. ## Developers -Developers build and query subgraphs to retrieve blockchain data. Since subgraphs are open source, developers can query existing subgraphs to load blockchain data into their dapps. Developers pay for queries they make in GRT, which is distributed to network participants. +Developers build and query Subgraphs to retrieve blockchain data. Since Subgraphs are open source, developers can query existing Subgraphs to load blockchain data into their dapps. Developers pay for queries they make in GRT, which is distributed to network participants. -### सबग्राफ तयार करणे +### Creating a Subgraph -Developers can [create a subgraph](/developing/creating-a-subgraph/) to index data on the blockchain. Subgraphs are instructions for Indexers about which data should be served to consumers. +Developers can [create a Subgraph](/developing/creating-a-subgraph/) to index data on the blockchain. Subgraphs are instructions for Indexers about which data should be served to consumers. -Once developers have built and tested their subgraph, they can [publish their subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/) on The Graph's decentralized network. +Once developers have built and tested their Subgraph, they can [publish their Subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/) on The Graph's decentralized network. -### विद्यमान सबग्राफची चौकशी करत आहे +### Querying an existing Subgraph -Once a subgraph is [published](/subgraphs/developing/publishing/publishing-a-subgraph/) to The Graph's decentralized network, anyone can create an API key, add GRT to their billing balance, and query the subgraph. +Once a Subgraph is [published](/subgraphs/developing/publishing/publishing-a-subgraph/) to The Graph's decentralized network, anyone can create an API key, add GRT to their billing balance, and query the Subgraph. Subgraphs are [queried using GraphQL](/subgraphs/querying/introduction/), and the query fees are paid for with GRT in [Subgraph Studio](https://thegraph.com/studio/). Query fees are distributed to network participants based on their contributions to the protocol. @@ -72,27 +72,27 @@ Subgraphs are [queried using GraphQL](/subgraphs/querying/introduction/), and th ## Indexers (Earn GRT) -Indexers are the backbone of The Graph. They operate independent hardware and software powering The Graph’s decentralized network. Indexers serve data to consumers based on instructions from subgraphs. +Indexers are the backbone of The Graph. They operate independent hardware and software powering The Graph’s decentralized network. Indexers serve data to consumers based on instructions from Subgraphs. Indexers can earn GRT rewards in two ways: -1. **Query fees**: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). +1. **Query fees**: GRT paid by developers or users for Subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). -2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. +2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of Subgraphs they are indexing. These rewards incentivize Indexers to index Subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. -Each subgraph is allotted a portion of the total network token issuance, based on the amount of the subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the subgraph. +Each Subgraph is allotted a portion of the total network token issuance, based on the amount of the Subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the Subgraph. In order to run an indexing node, Indexers must self-stake 100,000 GRT or more with the network. Indexers are incentivized to self-stake GRT in proportion to the amount of queries they serve. -Indexers can increase their GRT allocations on subgraphs by accepting GRT delegation from Delegators, and they can accept up to 16 times their initial self-stake. If an Indexer becomes "over-delegated" (i.e., more than 16 times their initial self-stake), they will not be able to use the additional GRT from Delegators until they increase their self-stake in the network. +Indexers can increase their GRT allocations on Subgraphs by accepting GRT delegation from Delegators, and they can accept up to 16 times their initial self-stake. If an Indexer becomes "over-delegated" (i.e., more than 16 times their initial self-stake), they will not be able to use the additional GRT from Delegators until they increase their self-stake in the network. The amount of rewards an Indexer receives can vary based on the Indexer's self-stake, accepted delegation, quality of service, and many more factors. ## Token Supply: Burning & Issuance -The initial token supply is 10 billion GRT, with a target of 3% new issuance annually to reward Indexers for allocating stake on subgraphs. This means that the total supply of GRT tokens will increase by 3% each year as new tokens are issued to Indexers for their contribution to the network. +The initial token supply is 10 billion GRT, with a target of 3% new issuance annually to reward Indexers for allocating stake on Subgraphs. This means that the total supply of GRT tokens will increase by 3% each year as new tokens are issued to Indexers for their contribution to the network. -The Graph is designed with multiple burning mechanisms to offset new token issuance. Approximately 1% of the GRT supply is burned annually through various activities on the network, and this number has been increasing as network activity continues to grow. These burning activities include a 0.5% delegation tax whenever a Delegator delegates GRT to an Indexer, a 1% curation tax when Curators signal on a subgraph, and a 1% of query fees for blockchain data. +The Graph is designed with multiple burning mechanisms to offset new token issuance. Approximately 1% of the GRT supply is burned annually through various activities on the network, and this number has been increasing as network activity continues to grow. These burning activities include a 0.5% delegation tax whenever a Delegator delegates GRT to an Indexer, a 1% curation tax when Curators signal on a Subgraph, and a 1% of query fees for blockchain data. ![Total burned GRT](/img/total-burned-grt.jpeg) From 021f582c8e80e7ef54b2922b1e831a8c5286833f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:17:54 -0500 Subject: [PATCH 0536/1789] New translations tokenomics.mdx (Hindi) --- website/src/pages/hi/resources/tokenomics.mdx | 40 +++++++++---------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/website/src/pages/hi/resources/tokenomics.mdx b/website/src/pages/hi/resources/tokenomics.mdx index e3437e3a0fff..2336182be690 100644 --- a/website/src/pages/hi/resources/tokenomics.mdx +++ b/website/src/pages/hi/resources/tokenomics.mdx @@ -6,7 +6,7 @@ description: The Graph Network को शक्तिशाली टोकन ## अवलोकन -The Graph एक विकेन्द्रीकृत प्रोटोकॉल है जो ब्लॉकचेन डेटा तक आसान पहुंच सक्षम करता है। यह ब्लॉकचेन डेटा को उसी तरह से अनुक्रमित करता है जैसे Google वेब को अनुक्रमित करता है। यदि आपने किसी dapp का उपयोग किया है जो किसी Subgraph से डेटा पुनर्प्राप्त करता है, तो संभवतः आपने The Graph के साथ इंटरैक्ट किया है। आज, वेब3 इकोसिस्टम में हजारों [popular dapps](https://thegraph.com/explorer) The Graph का उपयोग कर रहे हैं। +The Graph is a decentralized protocol that enables easy access to blockchain data. It indexes blockchain data similarly to how Google indexes the web. If you've used a dapp that retrieves data from a Subgraph, you've probably interacted with The Graph. Today, thousands of [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem use The Graph. ## विशिष्टताएँ @@ -24,9 +24,9 @@ There are four primary network participants: 1. Delegators - Delegate GRT to Indexers & secure the network -2. Curators - Find the best subgraphs for Indexers +2. Curators - Find the best Subgraphs for Indexers -3. Developers - Build & query subgraphs +3. Developers - Build & query Subgraphs 4. इंडेक्सर्स - ब्लॉकचेन डेटा की रीढ़ @@ -36,7 +36,7 @@ Fishermen and Arbitrators are also integral to the network's success through oth ## Delegators (Passively earn GRT) -Indexers are delegated GRT by Delegators, increasing the Indexer’s stake in subgraphs on the network. In return, Delegators earn a percentage of all query fees and indexing rewards from the Indexer. Each Indexer sets the cut that will be rewarded to Delegators independently, creating competition among Indexers to attract Delegators. Most Indexers offer between 9-12% annually. +Indexers are delegated GRT by Delegators, increasing the Indexer’s stake in Subgraphs on the network. In return, Delegators earn a percentage of all query fees and indexing rewards from the Indexer. Each Indexer sets the cut that will be rewarded to Delegators independently, creating competition among Indexers to attract Delegators. Most Indexers offer between 9-12% annually. For example, if a Delegator were to delegate 15k GRT to an Indexer offering 10%, the Delegator would receive ~1,500 GRT in rewards annually. @@ -46,25 +46,25 @@ If you're reading this, you're capable of becoming a Delegator right now by head ## Curators (Earn GRT) -Curators identify high-quality subgraphs and "curate" them (i.e., signal GRT on them) to earn curation shares, which guarantee a percentage of all future query fees generated by the subgraph. While any independent network participant can be a Curator, typically subgraph developers are among the first Curators for their own subgraphs because they want to ensure their subgraph is indexed. +Curators identify high-quality Subgraphs and "curate" them (i.e., signal GRT on them) to earn curation shares, which guarantee a percentage of all future query fees generated by the Subgraph. While any independent network participant can be a Curator, typically Subgraph developers are among the first Curators for their own Subgraphs because they want to ensure their Subgraph is indexed. -Subgraph developers are encouraged to curate their subgraph with at least 3,000 GRT. However, this number may be impacted by network activity and community participation. +Subgraph developers are encouraged to curate their Subgraph with at least 3,000 GRT. However, this number may be impacted by network activity and community participation. -Curators pay a 1% curation tax when they curate a new subgraph. This curation tax is burned, decreasing the supply of GRT. +Curators pay a 1% curation tax when they curate a new Subgraph. This curation tax is burned, decreasing the supply of GRT. ## Developers -Developers build and query subgraphs to retrieve blockchain data. Since subgraphs are open source, developers can query existing subgraphs to load blockchain data into their dapps. Developers pay for queries they make in GRT, which is distributed to network participants. +Developers build and query Subgraphs to retrieve blockchain data. Since Subgraphs are open source, developers can query existing Subgraphs to load blockchain data into their dapps. Developers pay for queries they make in GRT, which is distributed to network participants. -### सबग्राफ बनाना +### Creating a Subgraph -Developers can [create a subgraph](/developing/creating-a-subgraph/) to index data on the blockchain. Subgraphs are instructions for Indexers about which data should be served to consumers. +Developers can [create a Subgraph](/developing/creating-a-subgraph/) to index data on the blockchain. Subgraphs are instructions for Indexers about which data should be served to consumers. -Once developers have built and tested their subgraph, they can [publish their subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/) on The Graph's decentralized network. +Once developers have built and tested their Subgraph, they can [publish their Subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/) on The Graph's decentralized network. -### किसी मौजूदा सबग्राफ को क्वेरी करना +### Querying an existing Subgraph -Once a subgraph is [published](/subgraphs/developing/publishing/publishing-a-subgraph/) to The Graph's decentralized network, anyone can create an API key, add GRT to their billing balance, and query the subgraph. +Once a Subgraph is [published](/subgraphs/developing/publishing/publishing-a-subgraph/) to The Graph's decentralized network, anyone can create an API key, add GRT to their billing balance, and query the Subgraph. सबग्राफ़ को GraphQL का उपयोग करके क्वेरी किया जाता है()/subgraphs/querying/introduction/, और क्वेरी शुल्क को Subgraph Studio()https://thegraph.com/studio/ में GRT के साथ भुगतान किया जाता है। क्वेरी शुल्क को नेटवर्क प्रतिभागियों में उनके प्रोटोकॉल में योगदान के आधार पर वितरित किया जाता है। @@ -72,27 +72,27 @@ Once a subgraph is [published](/subgraphs/developing/publishing/publishing-a-sub ## Indexers (Earn GRT) -Indexers The Graph की रीढ़ हैं। वे स्वतंत्र हार्डवेयर और सॉफ़्टवेयर संचालित करते हैं जो The Graph के विकेन्द्रीकृत नेटवर्क को शक्ति प्रदान करता है। Indexers, सबग्राफ से निर्देशों के आधार पर उपभोक्ताओं को डेटा प्रदान करते हैं। +Indexers are the backbone of The Graph. They operate independent hardware and software powering The Graph’s decentralized network. Indexers serve data to consumers based on instructions from Subgraphs. Indexers दो तरीकों से GRT रिवार्ड्स कमा सकते हैं: -1. **क्वेरी शुल्क:** डेवलपर्स या उपयोगकर्ताओं द्वारा Subgraph डेटा क्वेरी के लिए भुगतान किया गया GRT। क्वेरी शुल्क सीधे Indexers को एक्सपोनेंशियल रिबेट फ़ंक्शन के अनुसार वितरित किया जाता है (देखें GIP [यहाँ](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162))। +1. **Query fees**: GRT paid by developers or users for Subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). -2. **Indexing रिवार्ड्स**: 3% की वार्षिक जारी राशि Indexers को उनके द्वारा indexed किए गए सबग्राफकी संख्या के आधार पर वितरित की जाती है। ये पुरस्कार Indexers को सबग्राफको index करने के लिए प्रेरित करते हैं, कभी-कभी query fees शुरू होने से पहले भी, ताकि वे Proofs of Indexing (POIs) को एकत्रित और प्रस्तुत कर सकें, यह सत्यापित करने के लिए कि उन्होंने डेटा को सटीक रूप से index किया है। +2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of Subgraphs they are indexing. These rewards incentivize Indexers to index Subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. -Each subgraph is allotted a portion of the total network token issuance, based on the amount of the subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the subgraph. +Each Subgraph is allotted a portion of the total network token issuance, based on the amount of the Subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the Subgraph. In order to run an indexing node, Indexers must self-stake 100,000 GRT or more with the network. Indexers are incentivized to self-stake GRT in proportion to the amount of queries they serve. -Indexers can increase their GRT allocations on subgraphs by accepting GRT delegation from Delegators, and they can accept up to 16 times their initial self-stake. If an Indexer becomes "over-delegated" (i.e., more than 16 times their initial self-stake), they will not be able to use the additional GRT from Delegators until they increase their self-stake in the network. +Indexers can increase their GRT allocations on Subgraphs by accepting GRT delegation from Delegators, and they can accept up to 16 times their initial self-stake. If an Indexer becomes "over-delegated" (i.e., more than 16 times their initial self-stake), they will not be able to use the additional GRT from Delegators until they increase their self-stake in the network. The amount of rewards an Indexer receives can vary based on the Indexer's self-stake, accepted delegation, quality of service, and many more factors. ## Token Supply: Burning & Issuance -The initial token supply is 10 billion GRT, with a target of 3% new issuance annually to reward Indexers for allocating stake on subgraphs. This means that the total supply of GRT tokens will increase by 3% each year as new tokens are issued to Indexers for their contribution to the network. +The initial token supply is 10 billion GRT, with a target of 3% new issuance annually to reward Indexers for allocating stake on Subgraphs. This means that the total supply of GRT tokens will increase by 3% each year as new tokens are issued to Indexers for their contribution to the network. -The Graph is designed with multiple burning mechanisms to offset new token issuance. Approximately 1% of the GRT supply is burned annually through various activities on the network, and this number has been increasing as network activity continues to grow. These burning activities include a 0.5% delegation tax whenever a Delegator delegates GRT to an Indexer, a 1% curation tax when Curators signal on a subgraph, and a 1% of query fees for blockchain data. +The Graph is designed with multiple burning mechanisms to offset new token issuance. Approximately 1% of the GRT supply is burned annually through various activities on the network, and this number has been increasing as network activity continues to grow. These burning activities include a 0.5% delegation tax whenever a Delegator delegates GRT to an Indexer, a 1% curation tax when Curators signal on a Subgraph, and a 1% of query fees for blockchain data. ![Total burned GRT](/img/total-burned-grt.jpeg) From b3db71de729b7e9dbc8c419066eba2489189757e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:17:55 -0500 Subject: [PATCH 0537/1789] New translations tokenomics.mdx (Swahili) --- website/src/pages/sw/resources/tokenomics.mdx | 103 ++++++++++++++++++ 1 file changed, 103 insertions(+) create mode 100644 website/src/pages/sw/resources/tokenomics.mdx diff --git a/website/src/pages/sw/resources/tokenomics.mdx b/website/src/pages/sw/resources/tokenomics.mdx new file mode 100644 index 000000000000..dac3383a28e7 --- /dev/null +++ b/website/src/pages/sw/resources/tokenomics.mdx @@ -0,0 +1,103 @@ +--- +title: Tokenomics of The Graph Network +sidebarTitle: Tokenomics +description: The Graph Network is incentivized by powerful tokenomics. Here’s how GRT, The Graph’s native work utility token, works. +--- + +## Overview + +The Graph is a decentralized protocol that enables easy access to blockchain data. It indexes blockchain data similarly to how Google indexes the web. If you've used a dapp that retrieves data from a Subgraph, you've probably interacted with The Graph. Today, thousands of [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem use The Graph. + +## Specifics + +The Graph's model is akin to a B2B2C model, but it's driven by a decentralized network where participants collaborate to provide data to end users in exchange for GRT rewards. GRT is the utility token for The Graph. It coordinates and incentivizes the interaction between data providers and consumers within the network. + +The Graph plays a vital role in making blockchain data more accessible and supports a marketplace for its exchange. To learn more about The Graph's pay-for-what-you-need model, check out its [free and growth plans](/subgraphs/billing/). + +- GRT Token Address on Mainnet: [0xc944e90c64b2c07662a292be6244bdf05cda44a7](https://etherscan.io/token/0xc944e90c64b2c07662a292be6244bdf05cda44a7) + +- GRT Token Address on Arbitrum One: [0x9623063377AD1B27544C965cCd7342f7EA7e88C7](https://arbiscan.io/token/0x9623063377ad1b27544c965ccd7342f7ea7e88c7) + +## The Roles of Network Participants + +There are four primary network participants: + +1. Delegators - Delegate GRT to Indexers & secure the network + +2. Curators - Find the best Subgraphs for Indexers + +3. Developers - Build & query Subgraphs + +4. Indexers - Backbone of blockchain data + +Fishermen and Arbitrators are also integral to the network's success through other contributions, supporting the work of the other primary participant roles. For more information about network roles, [read this article](https://thegraph.com/blog/the-graph-grt-token-economics/). + +![Tokenomics diagram](/img/updated-tokenomics-image.png) + +## Delegators (Passively earn GRT) + +Indexers are delegated GRT by Delegators, increasing the Indexer’s stake in Subgraphs on the network. In return, Delegators earn a percentage of all query fees and indexing rewards from the Indexer. Each Indexer sets the cut that will be rewarded to Delegators independently, creating competition among Indexers to attract Delegators. Most Indexers offer between 9-12% annually. + +For example, if a Delegator were to delegate 15k GRT to an Indexer offering 10%, the Delegator would receive ~1,500 GRT in rewards annually. + +There is a 0.5% delegation tax which is burned whenever a Delegator delegates GRT on the network. If a Delegator chooses to withdraw their delegated GRT, the Delegator must wait for the 28-epoch unbonding period. Each epoch is 6,646 blocks, which means 28 epochs ends up being approximately 26 days. + +If you're reading this, you're capable of becoming a Delegator right now by heading to the [network participants page](https://thegraph.com/explorer/participants/indexers), and delegating GRT to an Indexer of your choice. + +## Curators (Earn GRT) + +Curators identify high-quality Subgraphs and "curate" them (i.e., signal GRT on them) to earn curation shares, which guarantee a percentage of all future query fees generated by the Subgraph. While any independent network participant can be a Curator, typically Subgraph developers are among the first Curators for their own Subgraphs because they want to ensure their Subgraph is indexed. + +Subgraph developers are encouraged to curate their Subgraph with at least 3,000 GRT. However, this number may be impacted by network activity and community participation. + +Curators pay a 1% curation tax when they curate a new Subgraph. This curation tax is burned, decreasing the supply of GRT. + +## Developers + +Developers build and query Subgraphs to retrieve blockchain data. Since Subgraphs are open source, developers can query existing Subgraphs to load blockchain data into their dapps. Developers pay for queries they make in GRT, which is distributed to network participants. + +### Creating a Subgraph + +Developers can [create a Subgraph](/developing/creating-a-subgraph/) to index data on the blockchain. Subgraphs are instructions for Indexers about which data should be served to consumers. + +Once developers have built and tested their Subgraph, they can [publish their Subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/) on The Graph's decentralized network. + +### Querying an existing Subgraph + +Once a Subgraph is [published](/subgraphs/developing/publishing/publishing-a-subgraph/) to The Graph's decentralized network, anyone can create an API key, add GRT to their billing balance, and query the Subgraph. + +Subgraphs are [queried using GraphQL](/subgraphs/querying/introduction/), and the query fees are paid for with GRT in [Subgraph Studio](https://thegraph.com/studio/). Query fees are distributed to network participants based on their contributions to the protocol. + +1% of the query fees paid to the network are burned. + +## Indexers (Earn GRT) + +Indexers are the backbone of The Graph. They operate independent hardware and software powering The Graph’s decentralized network. Indexers serve data to consumers based on instructions from Subgraphs. + +Indexers can earn GRT rewards in two ways: + +1. **Query fees**: GRT paid by developers or users for Subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). + +2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of Subgraphs they are indexing. These rewards incentivize Indexers to index Subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. + +Each Subgraph is allotted a portion of the total network token issuance, based on the amount of the Subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the Subgraph. + +In order to run an indexing node, Indexers must self-stake 100,000 GRT or more with the network. Indexers are incentivized to self-stake GRT in proportion to the amount of queries they serve. + +Indexers can increase their GRT allocations on Subgraphs by accepting GRT delegation from Delegators, and they can accept up to 16 times their initial self-stake. If an Indexer becomes "over-delegated" (i.e., more than 16 times their initial self-stake), they will not be able to use the additional GRT from Delegators until they increase their self-stake in the network. + +The amount of rewards an Indexer receives can vary based on the Indexer's self-stake, accepted delegation, quality of service, and many more factors. + +## Token Supply: Burning & Issuance + +The initial token supply is 10 billion GRT, with a target of 3% new issuance annually to reward Indexers for allocating stake on Subgraphs. This means that the total supply of GRT tokens will increase by 3% each year as new tokens are issued to Indexers for their contribution to the network. + +The Graph is designed with multiple burning mechanisms to offset new token issuance. Approximately 1% of the GRT supply is burned annually through various activities on the network, and this number has been increasing as network activity continues to grow. These burning activities include a 0.5% delegation tax whenever a Delegator delegates GRT to an Indexer, a 1% curation tax when Curators signal on a Subgraph, and a 1% of query fees for blockchain data. + +![Total burned GRT](/img/total-burned-grt.jpeg) + +In addition to these regularly occurring burning activities, the GRT token also has a slashing mechanism in place to penalize malicious or irresponsible behavior by Indexers. If an Indexer is slashed, 50% of their indexing rewards for the epoch are burned (while the other half goes to the fisherman), and their self-stake is slashed by 2.5%, with half of this amount being burned. This helps to ensure that Indexers have a strong incentive to act in the best interests of the network and to contribute to its security and stability. + +## Improving the Protocol + +The Graph Network is ever-evolving and improvements to the economic design of the protocol are constantly being made to provide the best experience for all network participants. The Graph Council oversees protocol changes and community members are encouraged to participate. Get involved with protocol improvements in [The Graph Forum](https://forum.thegraph.com/). From ba909a8505563564c094683aec0faae18fb13dd4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:17:56 -0500 Subject: [PATCH 0538/1789] New translations billing.mdx (Romanian) --- website/src/pages/ro/subgraphs/billing.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/ro/subgraphs/billing.mdx b/website/src/pages/ro/subgraphs/billing.mdx index c9f380bb022c..e3a834f86844 100644 --- a/website/src/pages/ro/subgraphs/billing.mdx +++ b/website/src/pages/ro/subgraphs/billing.mdx @@ -4,7 +4,7 @@ title: Billing ## Querying Plans -There are two plans to use when querying subgraphs on The Graph Network. +There are two plans to use when querying Subgraphs on The Graph Network. - **Free Plan**: The Free Plan includes 100,000 free monthly queries with full access to the Subgraph Studio testing environment. This plan is designed for hobbyists, hackathoners, and those with side projects to try out The Graph before scaling their dapp. From 00f743ad7616517f2ca13faa9af0b951f6e7e722 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:17:57 -0500 Subject: [PATCH 0539/1789] New translations billing.mdx (French) --- website/src/pages/fr/subgraphs/billing.mdx | 60 +++++++++++----------- 1 file changed, 30 insertions(+), 30 deletions(-) diff --git a/website/src/pages/fr/subgraphs/billing.mdx b/website/src/pages/fr/subgraphs/billing.mdx index ba4239f2ea01..0d1275c905b5 100644 --- a/website/src/pages/fr/subgraphs/billing.mdx +++ b/website/src/pages/fr/subgraphs/billing.mdx @@ -2,20 +2,20 @@ title: Facturation --- -## Querying Plans +## Plans de requêtes -Il y a deux plans à utiliser lorsqu'on interroge les subgraphs sur le réseau de The Graph. +There are two plans to use when querying Subgraphs on The Graph Network. -- **Free Plan**: The Free Plan includes 100,000 free monthly queries with full access to the Subgraph Studio testing environment. This plan is designed for hobbyists, hackathoners, and those with side projects to try out The Graph before scaling their dapp. +- **Plan Gratuit (Free Plan)** : Le plan gratuit comprend 100 000 requêtes mensuelles gratuites et un accès complet à l'environnement de test Subgraph Studio. Ce plan est conçu pour les amateurs, les hackathoniens et ceux qui ont des projets parallèles pour essayer The Graph avant de faire évoluer leur dapp. -- **Growth Plan**: The Growth Plan includes everything in the Free Plan with all queries after 100,000 monthly queries requiring payments with GRT or credit card. The Growth Plan is flexible enough to cover teams that have established dapps across a variety of use cases. +- **Plan Croissance (Growth Plan)** : Le plan de croissance comprend tout ce qui est inclus dans le plan gratuit avec toutes les requêtes après 100 000 requêtes mensuelles nécessitant des paiements avec des GRT ou par carte de crédit. Le plan de croissance est suffisamment flexible pour couvrir les équipes qui ont établi des dapps à travers une variété de cas d'utilisation. ## Paiements de Requêtes avec Carte de Crédit⁠ - Pour mettre en place la facturation par carte de crédit/débit, les utilisateurs doivent accéder à Subgraph Studio (https://thegraph.com/studio/) - 1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/subgraphs/billing/). + 1. Accédez à la [page de Facturation de Subgraph Studio](https://thegraph.com/studio/subgraphs/billing/). 2. Cliquez sur le bouton "Connecter le portefeuille" dans le coin supérieur droit de la page. Vous serez redirigé vers la page de sélection des portefeuilles. Sélectionnez votre portefeuille et cliquez sur "Connecter". 3. Choisissez « Mettre à niveau votre abonnement » si vous effectuez une mise à niveau depuis le plan gratuit, ou choisissez « Gérer l'abonnement » si vous avez déjà ajouté des GRT à votre solde de facturation par le passé. Ensuite, vous pouvez estimer le nombre de requêtes pour obtenir une estimation du prix, mais ce n'est pas une étape obligatoire. 4. Pour choisir un paiement par carte de crédit, choisissez “Credit card” comme mode de paiement et remplissez les informations de votre carte de crédit. Ceux qui ont déjà utilisé Stripe peuvent utiliser la fonctionnalité Link pour remplir automatiquement leurs informations. @@ -45,17 +45,17 @@ L'utilisation du GRT sur Arbitrum est nécessaire pour le paiement des requêtes - Alternativement, vous pouvez acquérir du GRT directement sur Arbitrum via un échange décentralisé. -> This section is written assuming you already have GRT in your wallet, and you're on Arbitrum. If you don't have GRT, you can learn how to get GRT [here](#getting-grt). +> Cette section est écrite en supposant que vous avez déjà des GRT dans votre portefeuille, et que vous êtes sur Arbitrum. Si vous n'avez pas de GRT, vous pouvez apprendre à en obtenir [ici](#getting-grt). Une fois que vous avez transféré du GRT, vous pouvez l'ajouter à votre solde de facturation. ### Ajout de GRT à l'aide d'un portefeuille -1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/subgraphs/billing/). +1. Accédez à la [page de Facturation de Subgraph Studio](https://thegraph.com/studio/subgraphs/billing/). 2. Cliquez sur le bouton "Connecter le portefeuille" dans le coin supérieur droit de la page. Vous serez redirigé vers la page de sélection des portefeuilles. Sélectionnez votre portefeuille et cliquez sur "Connecter". 3. Cliquez sur le bouton « Manage » situé dans le coin supérieur droit. Les nouveaux utilisateurs verront l'option « Upgrade to Growth plan » (Passer au plan de croissance), tandis que les utilisateurs existants devront sélectionner « Deposit from wallet » (Déposer depuis le portefeuille). 4. Utilisez le curseur pour estimer le nombre de requêtes que vous prévoyez d’effectuer sur une base mensuelle. - - For suggestions on the number of queries you may use, see our **Frequently Asked Questions** page. + - Pour des suggestions sur le nombre de requêtes que vous pouvez utiliser, consultez notre page **Frequently Asked Questions** (Questions fréquemment posées). 5. Choisissez "Cryptocurrency". Le GRT est actuellement la seule cryptomonnaie acceptée sur le réseau The Graph. 6. Sélectionnez le nombre de mois pour lesquels vous souhaitez effectuer un paiement anticipé. - Le paiement anticipé ne vous engage pas sur une utilisation future. Vous ne serez facturé que pour ce que vous utiliserez et vous pourrez retirer votre solde à tout moment. @@ -68,7 +68,7 @@ Une fois que vous avez transféré du GRT, vous pouvez l'ajouter à votre solde ### Retirer des GRT en utilisant un portefeuille -1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/subgraphs/billing/). +1. Accédez à la [page de Facturation de Subgraph Studio](https://thegraph.com/studio/subgraphs/billing/). 2. Cliquez sur le bouton "Connect Wallet" dans le coin supérieur droit de la page. Sélectionnez votre portefeuille et cliquez sur "Connect". 3. Cliquez sur le bouton « Gérer » dans le coin supérieur droit de la page. Sélectionnez « Retirer des GRT ». Un panneau latéral apparaîtra. 4. Entrez le montant de GRT que vous voudriez retirer. @@ -77,11 +77,11 @@ Une fois que vous avez transféré du GRT, vous pouvez l'ajouter à votre solde ### Ajout de GRT à l'aide d'un portefeuille multisig -1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/subgraphs/billing/). -2. Click on the "Connect Wallet" button on the top right corner of the page. Select your wallet and click on "Connect". If you're using [Gnosis-Safe](https://gnosis-safe.io/), you'll be able to connect your multisig as well as your signing wallet. Then, sign the associated message. This will not cost any gas. +1. Accédez à la [page de Facturation de Subgraph Studio] (https://thegraph.com/studio/subgraphs/billing/). +2. Cliquez sur le bouton "Connecter votre Portefeuille" dans le coin supérieur droit de la page. Sélectionnez votre portefeuille et cliquez sur "Connecter". Si vous utilisez [Gnosis-Safe](https://gnosis-safe.io/), vous pourrez connecter votre portefeuille multisig ainsi que votre portefeuille de signature. Ensuite, signez le message associé. Cela ne coûtera pas de gaz. 3. Cliquez sur le bouton « Manage » situé dans le coin supérieur droit. Les nouveaux utilisateurs verront l'option « Upgrade to Growth plan » (Passer au plan de croissance), tandis que les utilisateurs existants devront sélectionner « Deposit from wallet » (Déposer depuis le portefeuille). 4. Utilisez le curseur pour estimer le nombre de requêtes que vous prévoyez d’effectuer sur une base mensuelle. - - For suggestions on the number of queries you may use, see our **Frequently Asked Questions** page. + - Pour des suggestions sur le nombre de requêtes que vous pouvez utiliser, consultez notre page **Frequently Asked Questions** (Questions fréquemment posées). 5. Choisissez "Cryptocurrency". Le GRT est actuellement la seule cryptomonnaie acceptée sur le réseau The Graph. 6. Sélectionnez le nombre de mois pour lesquels vous souhaitez effectuer un paiement anticipé. - Le paiement anticipé ne vous engage pas sur une utilisation future. Vous ne serez facturé que pour ce que vous utiliserez et vous pourrez retirer votre solde à tout moment. @@ -99,7 +99,7 @@ Cette section vous montrera comment obtenir du GRT pour payer les frais de requ Voici un guide étape par étape pour acheter de GRT sur Coinbase. -1. Go to [Coinbase](https://www.coinbase.com/) and create an account. +1. Allez sur [Coinbase](https://www.coinbase.com/) et créez un compte. 2. Dès que vous aurez créé un compte, vous devrez vérifier votre identité par le biais d'un processus connu sous le nom de KYC (Know Your Customer ou Connaître Votre Client). Il s'agit d'une procédure standard pour toutes les plateformes d'échange de crypto-monnaies centralisées ou dépositaires. 3. Une fois votre identité vérifiée, vous pouvez acheter des GRT. Pour ce faire, cliquez sur le bouton « Acheter/Vendre » en haut à droite de la page. 4. Sélectionnez la devise que vous souhaitez acheter. Sélectionnez GRT. @@ -107,19 +107,19 @@ Voici un guide étape par étape pour acheter de GRT sur Coinbase. 6. Sélectionnez la quantité de GRT que vous souhaitez acheter. 7. Vérifiez votre achat. Vérifiez votre achat et cliquez sur "Buy GRT". 8. Confirmez votre achat. Confirmez votre achat et vous aurez acheté des GRT avec succès. -9. You can transfer the GRT from your account to your wallet such as [MetaMask](https://metamask.io/). +9. Vous pouvez transférer les GRT de votre compte à votre portefeuille tel que [MetaMask](https://metamask.io/). - Pour transférer les GRT dans votre portefeuille, cliquez sur le bouton "Accounts" en haut à droite de la page. - Cliquez sur le bouton "Send" à côté du compte GRT. - Entrez le montant de GRT que vous souhaitez envoyer et l'adresse du portefeuille vers laquelle vous souhaitez l'envoyer. - Cliquez sur "Continue" et confirmez votre transaction. -Veuillez noter que pour des montants d'achat plus importants, Coinbase peut vous demander d'attendre 7 à 10 jours avant de transférer le montant total vers un portefeuille. -You can learn more about getting GRT on Coinbase [here](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). +Vous pouvez en savoir plus sur l'acquisition de GRT sur Coinbase [ici](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). ### Binance Ceci est un guide étape par étape pour l'achat des GRT sur Binance. -1. Go to [Binance](https://www.binance.com/en) and create an account. +1. Allez sur [Binance](https://www.binance.com/en) et créez un compte. 2. Dès que vous aurez créé un compte, vous devrez vérifier votre identité par le biais d'un processus connu sous le nom de KYC (Know Your Customer ou Connaître Votre Client). Il s'agit d'une procédure standard pour toutes les plateformes d'échange de crypto-monnaies centralisées ou dépositaires. 3. Une fois votre identité vérifiée, vous pouvez acheter des GRT. Pour ce faire, cliquez sur le bouton « Acheter maintenant » sur la bannière de la page d'accueil. 4. Vous accéderez à une page où vous pourrez sélectionner la devise que vous souhaitez acheter. Sélectionnez GRT. @@ -127,27 +127,27 @@ Ceci est un guide étape par étape pour l'achat des GRT sur Binance. 6. Sélectionnez la quantité de GRT que vous souhaitez acheter. 7. Confirmez votre achat et cliquez sur « Acheter des GRT ». 8. Confirmez votre achat et vous pourrez voir vos GRT dans votre portefeuille Binance Spot. -9. You can withdraw the GRT from your account to your wallet such as [MetaMask](https://metamask.io/). - - [To withdraw](https://www.binance.com/en/blog/ecosystem/how-to-transfer-crypto-from-binance-to-trust-wallet-8305050796630181570) the GRT to your wallet, add your wallet's address to the withdrawal whitelist. +9. Vous pouvez retirer les GRT de votre compte vers votre portefeuille tel que [MetaMask](https://metamask.io/). + - [Pour retirer](https://www.binance.com/en/blog/ecosystem/how-to-transfer-crypto-from-binance-to-trust-wallet-8305050796630181570) les GRT dans votre portefeuille, ajoutez l'adresse de votre portefeuille à la liste blanche des retraits. - Cliquez sur le bouton « portefeuille », cliquez sur retrait et sélectionnez GRT. - Saisissez le montant de GRT que vous souhaitez envoyer et l'adresse du portefeuille sur liste blanche à laquelle vous souhaitez l'envoyer. - Cliquer sur « Continuer » et confirmez votre transaction. -You can learn more about getting GRT on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). +Vous pouvez en savoir plus sur l'achat de GRT sur Binance [ici](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). ### Uniswap Voici comment vous pouvez acheter des GRT sur Uniswap. -1. Go to [Uniswap](https://app.uniswap.org/swap?chain=arbitrum) and connect your wallet. +1. Allez sur [Uniswap](https://app.uniswap.org/swap?chain=arbitrum) et connectez votre portefeuille. 2. Sélectionnez le jeton dont vous souhaitez échanger. Sélectionnez ETH. 3. Sélectionnez le jeton vers lequel vous souhaitez échanger. Sélectionnez GRT. - - Make sure you're swapping for the correct token. The GRT smart contract address on Arbitrum One is: [0x9623063377AD1B27544C965cCd7342f7EA7e88C7](https://arbiscan.io/token/0x9623063377ad1b27544c965ccd7342f7ea7e88c7) + - Assurez-vous que vous échangez contre le bon jeton. L'adresse du contrat intelligent GRT sur Arbitrum One est la suivante : [0x9623063377AD1B27544C965cCd7342f7EA7e88C7](https://arbiscan.io/token/0x9623063377ad1b27544c965ccd7342f7ea7e88c7) 4. Entrez le montant d'ETH que vous souhaitez échanger. 5. Cliquez sur « Échanger ». 6. Confirmez la transaction dans votre portefeuille et attendez qu'elle soit traitée. -You can learn more about getting GRT on Uniswap [here](https://support.uniswap.org/hc/en-us/articles/8370549680909-How-to-Swap-Tokens-). +Vous pouvez en savoir plus sur l'obtention de GRT sur Uniswap [ici](https://support.uniswap.org/hc/en-us/articles/8370549680909-How-to-Swap-Tokens-). ## Obtenir de l'Ether⁠ @@ -157,7 +157,7 @@ Cette section vous montrera comment obtenir de l'Ether (ETH) pour payer les frai Ce sera un guide étape par étape pour acheter de l'ETH sur Coinbase. -1. Go to [Coinbase](https://www.coinbase.com/) and create an account. +1. Allez sur [Coinbase](https://www.coinbase.com/) et créez un compte. 2. Une fois que vous avez créé un compte, vérifiez votre identité via un processus appelé KYC (ou Know Your Customer). l s'agit d'une procédure standard pour toutes les plateformes d'échange de crypto-monnaies centralisées ou dépositaires. 3. Une fois que vous avez vérifié votre identité, achetez de l'ETH en cliquant sur le bouton « Acheter/Vendre » en haut à droite de la page. 4. Choisissez la devise que vous souhaitez acheter. Sélectionnez ETH. @@ -165,20 +165,20 @@ Ce sera un guide étape par étape pour acheter de l'ETH sur Coinbase. 6. Entrez le montant d'ETH que vous souhaitez acheter. 7. Vérifiez votre achat et cliquez sur « Acheter des Ethereum ». 8. Confirmez votre achat et vous aurez acheté avec succès de l'ETH. -9. You can transfer the ETH from your Coinbase account to your wallet such as [MetaMask](https://metamask.io/). +9. Vous pouvez transférer les ETH de votre compte Coinbase vers votre portefeuille tel que [MetaMask](https://metamask.io/). - Pour transférer l'ETH vers votre portefeuille, cliquez sur le bouton « Comptes » en haut à droite de la page. - Cliquez sur le bouton « Envoyer » à côté du compte ETH. - Entrez le montant d'ETH que vous souhaitez envoyer et l'adresse du portefeuille vers lequel vous souhaitez l'envoyer. - Assurez-vous que vous envoyez à votre adresse de portefeuille Ethereum sur Arbitrum One. - Cliquer sur « Continuer » et confirmez votre transaction. -You can learn more about getting ETH on Coinbase [here](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). +Vous pouvez en savoir plus sur l'obtention d'ETH sur Coinbase [ici](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). ### Binance Ce sera un guide étape par étape pour acheter des ETH sur Binance. -1. Go to [Binance](https://www.binance.com/en) and create an account. +1. Allez sur [Binance](https://www.binance.com/en) et créez un compte. 2. Une fois que vous avez créé un compte, vérifiez votre identité via un processus appelé KYC (ou Know Your Customer). l s'agit d'une procédure standard pour toutes les plateformes d'échange de crypto-monnaies centralisées ou dépositaires. 3. Une fois que vous avez vérifié votre identité, achetez des ETH en cliquant sur le bouton « Acheter maintenant » sur la bannière de la page d'accueil. 4. Choisissez la devise que vous souhaitez acheter. Sélectionnez ETH. @@ -186,14 +186,14 @@ Ce sera un guide étape par étape pour acheter des ETH sur Binance. 6. Entrez le montant d'ETH que vous souhaitez acheter. 7. Vérifiez votre achat et cliquez sur « Acheter des Ethereum ». 8. Confirmez votre achat et vous verrez votre ETH dans votre portefeuille Binance Spot. -9. You can withdraw the ETH from your account to your wallet such as [MetaMask](https://metamask.io/). +9. Vous pouvez retirer les ETH de votre compte vers votre portefeuille tel que [MetaMask](https://metamask.io/). - Pour retirer l'ETH vers votre portefeuille, ajoutez l'adresse de votre portefeuille à la liste blanche de retrait. - Cliquez sur le bouton « portefeuille », cliquez sur retirer et sélectionnez ETH. - Entrez le montant d'ETH que vous souhaitez envoyer et l'adresse du portefeuille sur liste blanche à laquelle vous souhaitez l'envoyer. - Assurez-vous que vous envoyez à votre adresse de portefeuille Ethereum sur Arbitrum One. - Cliquer sur « Continuer » et confirmez votre transaction. -You can learn more about getting ETH on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). +Vous pouvez en savoir plus sur l'achat d'ETH sur Binance [ici](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). ## FAQ sur la facturation @@ -203,11 +203,11 @@ Vous n'avez pas besoin de savoir à l'avance combien de requêtes vous aurez bes Nous vous recommandons de surestimer le nombre de requêtes dont vous aurez besoin afin de ne pas avoir à recharger votre solde fréquemment. Pour les applications de petite et moyenne taille, une bonne estimation consiste à commencer par 1 à 2 millions de requêtes par mois et à surveiller de près l'utilisation au cours des premières semaines. Pour les applications plus grandes, une bonne estimation consiste à utiliser le nombre de visites quotidiennes que reçoit votre site multiplié par le nombre de requêtes que votre page la plus active effectue à son ouverture. -Of course, both new and existing users can reach out to Edge & Node's BD team for a consult to learn more about anticipated usage. +Bien entendu, les nouveaux utilisateurs et les utilisateurs existants peuvent contacter l'équipe BD d'Edge & Node pour une consultation afin d'en savoir plus sur l'utilisation prévue. ### Puis-je retirer du GRT de mon solde de facturation ? -Yes, you can always withdraw GRT that has not already been used for queries from your billing balance. The billing contract is only designed to bridge GRT from Ethereum mainnet to the Arbitrum network. If you'd like to transfer your GRT from Arbitrum back to Ethereum mainnet, you'll need to use the [Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161). +Oui, vous pouvez toujours retirer de votre solde de facturation les GRT qui n'ont pas encore été utilisés pour des requêtes. Le contrat de facturation est uniquement conçu pour faire le bridge entre les GRT du réseau principal Ethereum et le réseau Arbitrum. Si vous souhaitez transférer vos GRT d'Arbitrum vers le réseau principal Ethereum, vous devrez utiliser le [Bridge Arbitrum](https://bridge.arbitrum.io/?l2ChainId=42161). ### Que se passe-t-il lorsque mon solde de facturation est épuisé ? Vais-je recevoir un avertissement ? From a4ee347373b70a257afe6830472359de5a90abea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:17:58 -0500 Subject: [PATCH 0540/1789] New translations billing.mdx (Spanish) --- website/src/pages/es/subgraphs/billing.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/es/subgraphs/billing.mdx b/website/src/pages/es/subgraphs/billing.mdx index b2210285e434..dc6d65bd61f2 100644 --- a/website/src/pages/es/subgraphs/billing.mdx +++ b/website/src/pages/es/subgraphs/billing.mdx @@ -4,7 +4,7 @@ title: Facturación ## Planes de consultas -Existen dos planes para usar al consultar subgrafos en The Graph Network. +There are two plans to use when querying Subgraphs on The Graph Network. - **Plan Gratuito**: El Plan Gratuito incluye 100.000 consultas mensuales gratuitas con acceso completo al entorno de pruebas de Subgraph Studio. Este plan está diseñado para aficionados, participantes de hackatones y aquellos con proyectos paralelos que deseen probar The Graph antes de escalar su dapp. From b3ebcf1c3176af18db31f7e40d9219cf57d10c13 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:17:59 -0500 Subject: [PATCH 0541/1789] New translations billing.mdx (Arabic) --- website/src/pages/ar/subgraphs/billing.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/ar/subgraphs/billing.mdx b/website/src/pages/ar/subgraphs/billing.mdx index e5b5deb5c4ef..f63d86a370d6 100644 --- a/website/src/pages/ar/subgraphs/billing.mdx +++ b/website/src/pages/ar/subgraphs/billing.mdx @@ -4,7 +4,7 @@ title: الفوترة ## Querying Plans -There are two plans to use when querying subgraphs on The Graph Network. +There are two plans to use when querying Subgraphs on The Graph Network. - **Free Plan**: The Free Plan includes 100,000 free monthly queries with full access to the Subgraph Studio testing environment. This plan is designed for hobbyists, hackathoners, and those with side projects to try out The Graph before scaling their dapp. From 66f7195473a17df781a3243652542a7ba6aef0b4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:18:00 -0500 Subject: [PATCH 0542/1789] New translations billing.mdx (Czech) --- website/src/pages/cs/subgraphs/billing.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/cs/subgraphs/billing.mdx b/website/src/pages/cs/subgraphs/billing.mdx index 4118bf1d451a..c1a2e3a8c1b2 100644 --- a/website/src/pages/cs/subgraphs/billing.mdx +++ b/website/src/pages/cs/subgraphs/billing.mdx @@ -4,7 +4,7 @@ title: Fakturace ## Querying Plans -There are two plans to use when querying subgraphs on The Graph Network. +There are two plans to use when querying Subgraphs on The Graph Network. - **Free Plan**: The Free Plan includes 100,000 free monthly queries with full access to the Subgraph Studio testing environment. This plan is designed for hobbyists, hackathoners, and those with side projects to try out The Graph before scaling their dapp. From 0a0889256a34c95d41c74ac36a6749794a1d9246 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:18:02 -0500 Subject: [PATCH 0543/1789] New translations billing.mdx (German) --- website/src/pages/de/subgraphs/billing.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/de/subgraphs/billing.mdx b/website/src/pages/de/subgraphs/billing.mdx index 7014ebf64d61..94e8831addd5 100644 --- a/website/src/pages/de/subgraphs/billing.mdx +++ b/website/src/pages/de/subgraphs/billing.mdx @@ -4,7 +4,7 @@ title: Billing ## Querying Plans -Es gibt zwei Pläne für die Abfrage von Subgraphen in The Graph Network. +There are two plans to use when querying Subgraphs on The Graph Network. - **Free Plan**: The Free Plan includes 100,000 free monthly queries with full access to the Subgraph Studio testing environment. This plan is designed for hobbyists, hackathoners, and those with side projects to try out The Graph before scaling their dapp. From 8c06f56975cb680a790df65c350dab286f1bbc12 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:18:03 -0500 Subject: [PATCH 0544/1789] New translations billing.mdx (Italian) --- website/src/pages/it/subgraphs/billing.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/it/subgraphs/billing.mdx b/website/src/pages/it/subgraphs/billing.mdx index c9f380bb022c..e3a834f86844 100644 --- a/website/src/pages/it/subgraphs/billing.mdx +++ b/website/src/pages/it/subgraphs/billing.mdx @@ -4,7 +4,7 @@ title: Billing ## Querying Plans -There are two plans to use when querying subgraphs on The Graph Network. +There are two plans to use when querying Subgraphs on The Graph Network. - **Free Plan**: The Free Plan includes 100,000 free monthly queries with full access to the Subgraph Studio testing environment. This plan is designed for hobbyists, hackathoners, and those with side projects to try out The Graph before scaling their dapp. From f83d3d9741ede7fae3a4a4bccf7b73f7a08daee2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:18:04 -0500 Subject: [PATCH 0545/1789] New translations billing.mdx (Japanese) --- website/src/pages/ja/subgraphs/billing.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/ja/subgraphs/billing.mdx b/website/src/pages/ja/subgraphs/billing.mdx index 9967aa377644..051c3f98c6de 100644 --- a/website/src/pages/ja/subgraphs/billing.mdx +++ b/website/src/pages/ja/subgraphs/billing.mdx @@ -4,7 +4,7 @@ title: 請求書 ## Querying Plans -There are two plans to use when querying subgraphs on The Graph Network. +There are two plans to use when querying Subgraphs on The Graph Network. - **Free Plan**: The Free Plan includes 100,000 free monthly queries with full access to the Subgraph Studio testing environment. This plan is designed for hobbyists, hackathoners, and those with side projects to try out The Graph before scaling their dapp. From d0deafd8ee927650826f39ce8fd4b35f314c6332 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:18:05 -0500 Subject: [PATCH 0546/1789] New translations billing.mdx (Korean) --- website/src/pages/ko/subgraphs/billing.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/ko/subgraphs/billing.mdx b/website/src/pages/ko/subgraphs/billing.mdx index c9f380bb022c..e3a834f86844 100644 --- a/website/src/pages/ko/subgraphs/billing.mdx +++ b/website/src/pages/ko/subgraphs/billing.mdx @@ -4,7 +4,7 @@ title: Billing ## Querying Plans -There are two plans to use when querying subgraphs on The Graph Network. +There are two plans to use when querying Subgraphs on The Graph Network. - **Free Plan**: The Free Plan includes 100,000 free monthly queries with full access to the Subgraph Studio testing environment. This plan is designed for hobbyists, hackathoners, and those with side projects to try out The Graph before scaling their dapp. From b717b19710b3176f2f01948b26631fa4882d2f76 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:18:06 -0500 Subject: [PATCH 0547/1789] New translations billing.mdx (Dutch) --- website/src/pages/nl/subgraphs/billing.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/nl/subgraphs/billing.mdx b/website/src/pages/nl/subgraphs/billing.mdx index c9f380bb022c..e3a834f86844 100644 --- a/website/src/pages/nl/subgraphs/billing.mdx +++ b/website/src/pages/nl/subgraphs/billing.mdx @@ -4,7 +4,7 @@ title: Billing ## Querying Plans -There are two plans to use when querying subgraphs on The Graph Network. +There are two plans to use when querying Subgraphs on The Graph Network. - **Free Plan**: The Free Plan includes 100,000 free monthly queries with full access to the Subgraph Studio testing environment. This plan is designed for hobbyists, hackathoners, and those with side projects to try out The Graph before scaling their dapp. From e9312f77d8f3a77088c64773b9710a3423d3a287 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:18:07 -0500 Subject: [PATCH 0548/1789] New translations billing.mdx (Polish) --- website/src/pages/pl/subgraphs/billing.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/pl/subgraphs/billing.mdx b/website/src/pages/pl/subgraphs/billing.mdx index 511ac8067271..fb7c8afdfaa7 100644 --- a/website/src/pages/pl/subgraphs/billing.mdx +++ b/website/src/pages/pl/subgraphs/billing.mdx @@ -4,7 +4,7 @@ title: Billing ## Querying Plans -There are two plans to use when querying subgraphs on The Graph Network. +There are two plans to use when querying Subgraphs on The Graph Network. - **Free Plan**: The Free Plan includes 100,000 free monthly queries with full access to the Subgraph Studio testing environment. This plan is designed for hobbyists, hackathoners, and those with side projects to try out The Graph before scaling their dapp. From 4ed32a9bec30d734a4b0fa71aaf08184f14653ba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:18:08 -0500 Subject: [PATCH 0549/1789] New translations billing.mdx (Portuguese) --- website/src/pages/pt/subgraphs/billing.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/pt/subgraphs/billing.mdx b/website/src/pages/pt/subgraphs/billing.mdx index f73ae48ff725..fc02505179ec 100644 --- a/website/src/pages/pt/subgraphs/billing.mdx +++ b/website/src/pages/pt/subgraphs/billing.mdx @@ -4,7 +4,7 @@ title: Cobranças ## Planos de Query -Há dois planos disponíveis para queries de subgraphs na Graph Network. +There are two plans to use when querying Subgraphs on The Graph Network. - **Plano Grátis**: Inclui 100.000 queries grátis por mês, com acesso ilimitado ao ambiente de testes do Subgraph Studio. O plano é feito para entusiastas, participantes de hackathons, e para quem tem projetos paralelos para experimentar o The Graph antes de escalar o seu dapp. From a94f3fad3af9fc949fa9a56bf1cd40b59ddd560c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:18:10 -0500 Subject: [PATCH 0550/1789] New translations billing.mdx (Russian) --- website/src/pages/ru/subgraphs/billing.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/ru/subgraphs/billing.mdx b/website/src/pages/ru/subgraphs/billing.mdx index 0a7daa3442d0..904206f171a1 100644 --- a/website/src/pages/ru/subgraphs/billing.mdx +++ b/website/src/pages/ru/subgraphs/billing.mdx @@ -4,7 +4,7 @@ title: Выставление счетов ## Querying Plans -Существует два плана для выполнения запросов к субграфам в The Graph Network. +There are two plans to use when querying Subgraphs on The Graph Network. - **Free Plan**: The Free Plan includes 100,000 free monthly queries with full access to the Subgraph Studio testing environment. This plan is designed for hobbyists, hackathoners, and those with side projects to try out The Graph before scaling their dapp. From 85d9eec6062274e50f0ec57bc085d50492355428 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:18:11 -0500 Subject: [PATCH 0551/1789] New translations billing.mdx (Swedish) --- website/src/pages/sv/subgraphs/billing.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/sv/subgraphs/billing.mdx b/website/src/pages/sv/subgraphs/billing.mdx index d864c1d3d6fb..caefce0f0ffc 100644 --- a/website/src/pages/sv/subgraphs/billing.mdx +++ b/website/src/pages/sv/subgraphs/billing.mdx @@ -4,7 +4,7 @@ title: Fakturering ## Querying Plans -There are two plans to use when querying subgraphs on The Graph Network. +There are two plans to use when querying Subgraphs on The Graph Network. - **Free Plan**: The Free Plan includes 100,000 free monthly queries with full access to the Subgraph Studio testing environment. This plan is designed for hobbyists, hackathoners, and those with side projects to try out The Graph before scaling their dapp. From e2e5528f5af2cdee73073c0c719c5b77e50fb328 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:18:12 -0500 Subject: [PATCH 0552/1789] New translations billing.mdx (Turkish) --- website/src/pages/tr/subgraphs/billing.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/tr/subgraphs/billing.mdx b/website/src/pages/tr/subgraphs/billing.mdx index a86c1adbb755..0cc540f34713 100644 --- a/website/src/pages/tr/subgraphs/billing.mdx +++ b/website/src/pages/tr/subgraphs/billing.mdx @@ -4,7 +4,7 @@ title: Faturalandırma ## Querying Plans -The Graph Ağı'nda subgraph'leri sorgulamak için kullanabileceğiniz iki plan bulunmaktadır. +There are two plans to use when querying Subgraphs on The Graph Network. - **Free Plan**: The Free Plan includes 100,000 free monthly queries with full access to the Subgraph Studio testing environment. This plan is designed for hobbyists, hackathoners, and those with side projects to try out The Graph before scaling their dapp. From 820f1d6a591366f6065662502da0f771f56b5246 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:18:13 -0500 Subject: [PATCH 0553/1789] New translations billing.mdx (Ukrainian) --- website/src/pages/uk/subgraphs/billing.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/uk/subgraphs/billing.mdx b/website/src/pages/uk/subgraphs/billing.mdx index ac919c79491b..4604d9196579 100644 --- a/website/src/pages/uk/subgraphs/billing.mdx +++ b/website/src/pages/uk/subgraphs/billing.mdx @@ -4,7 +4,7 @@ title: Білінг ## Querying Plans -There are two plans to use when querying subgraphs on The Graph Network. +There are two plans to use when querying Subgraphs on The Graph Network. - **Free Plan**: The Free Plan includes 100,000 free monthly queries with full access to the Subgraph Studio testing environment. This plan is designed for hobbyists, hackathoners, and those with side projects to try out The Graph before scaling their dapp. From 96c0af9602cc0717f345f8d2a69c14fe9badb91b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:18:14 -0500 Subject: [PATCH 0554/1789] New translations billing.mdx (Chinese Simplified) --- website/src/pages/zh/subgraphs/billing.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/zh/subgraphs/billing.mdx b/website/src/pages/zh/subgraphs/billing.mdx index 985cc1679f23..4aa8b81eee99 100644 --- a/website/src/pages/zh/subgraphs/billing.mdx +++ b/website/src/pages/zh/subgraphs/billing.mdx @@ -4,7 +4,7 @@ title: 计费 ## Querying Plans -There are two plans to use when querying subgraphs on The Graph Network. +There are two plans to use when querying Subgraphs on The Graph Network. - **Free Plan**: The Free Plan includes 100,000 free monthly queries with full access to the Subgraph Studio testing environment. This plan is designed for hobbyists, hackathoners, and those with side projects to try out The Graph before scaling their dapp. From 8a4708ffddcaeb96ba281183f56e9d68948d6d7f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:18:15 -0500 Subject: [PATCH 0555/1789] New translations billing.mdx (Urdu (Pakistan)) --- website/src/pages/ur/subgraphs/billing.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/ur/subgraphs/billing.mdx b/website/src/pages/ur/subgraphs/billing.mdx index f7f5c848204d..ab3c5e5213be 100644 --- a/website/src/pages/ur/subgraphs/billing.mdx +++ b/website/src/pages/ur/subgraphs/billing.mdx @@ -4,7 +4,7 @@ title: بلنگ ## Querying Plans -There are two plans to use when querying subgraphs on The Graph Network. +There are two plans to use when querying Subgraphs on The Graph Network. - **Free Plan**: The Free Plan includes 100,000 free monthly queries with full access to the Subgraph Studio testing environment. This plan is designed for hobbyists, hackathoners, and those with side projects to try out The Graph before scaling their dapp. From c5c38a58bfeabc16cfd453e36c27067cd30cb965 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:18:16 -0500 Subject: [PATCH 0556/1789] New translations billing.mdx (Vietnamese) --- website/src/pages/vi/subgraphs/billing.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/vi/subgraphs/billing.mdx b/website/src/pages/vi/subgraphs/billing.mdx index c9f380bb022c..e3a834f86844 100644 --- a/website/src/pages/vi/subgraphs/billing.mdx +++ b/website/src/pages/vi/subgraphs/billing.mdx @@ -4,7 +4,7 @@ title: Billing ## Querying Plans -There are two plans to use when querying subgraphs on The Graph Network. +There are two plans to use when querying Subgraphs on The Graph Network. - **Free Plan**: The Free Plan includes 100,000 free monthly queries with full access to the Subgraph Studio testing environment. This plan is designed for hobbyists, hackathoners, and those with side projects to try out The Graph before scaling their dapp. From 5757eea21693bb90a2acc6baeb7e2b5d848cfe14 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:18:17 -0500 Subject: [PATCH 0557/1789] New translations billing.mdx (Marathi) --- website/src/pages/mr/subgraphs/billing.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/mr/subgraphs/billing.mdx b/website/src/pages/mr/subgraphs/billing.mdx index 7126ce22520f..81492b55a4ef 100644 --- a/website/src/pages/mr/subgraphs/billing.mdx +++ b/website/src/pages/mr/subgraphs/billing.mdx @@ -4,7 +4,7 @@ title: Billing ## Querying Plans -There are two plans to use when querying subgraphs on The Graph Network. +There are two plans to use when querying Subgraphs on The Graph Network. - **Free Plan**: The Free Plan includes 100,000 free monthly queries with full access to the Subgraph Studio testing environment. This plan is designed for hobbyists, hackathoners, and those with side projects to try out The Graph before scaling their dapp. From d2d8e04a6125aa9d2bc12df96f103e62a518eef9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:18:18 -0500 Subject: [PATCH 0558/1789] New translations billing.mdx (Hindi) --- website/src/pages/hi/subgraphs/billing.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/hi/subgraphs/billing.mdx b/website/src/pages/hi/subgraphs/billing.mdx index db7598ed5faf..dc8f1f5ee585 100644 --- a/website/src/pages/hi/subgraphs/billing.mdx +++ b/website/src/pages/hi/subgraphs/billing.mdx @@ -4,7 +4,7 @@ title: बिलिंग ## Querying Plans -There are two plans to use when querying subgraphs on The Graph Network. +There are two plans to use when querying Subgraphs on The Graph Network. - **Free Plan**: The Free Plan includes 100,000 free monthly queries with full access to the Subgraph Studio testing environment. This plan is designed for hobbyists, hackathoners, and those with side projects to try out The Graph before scaling their dapp. @@ -31,11 +31,11 @@ Subgraph users can use The Graph Token (or GRT) to pay for queries on The Graph ### GRT on Arbitrum or Ethereum -The Graph का बिलिंग सिस्टम Arbitrum पर GRT को स्वीकार करता है, और उपयोगकर्ताओं को गैस के भुगतान के लिए Arbitrum पर ETH की आवश्यकता होगी। जबकि The Graph प्रोटोकॉल Ethereum Mainnet पर शुरू हुआ, सभी गतिविधियाँ, जिसमें बिलिंग कॉन्ट्रैक्ट्स भी शामिल हैं, अब Arbitrum One पर हैं। +The Graph का बिलिंग सिस्टम Arbitrum पर GRT को स्वीकार करता है, और उपयोगकर्ताओं को गैस के भुगतान के लिए Arbitrum पर ETH की आवश्यकता होगी। जबकि The Graph प्रोटोकॉल Ethereum Mainnet पर शुरू हुआ, सभी गतिविधियाँ, जिसमें बिलिंग कॉन्ट्रैक्ट्स भी शामिल हैं, अब Arbitrum One पर हैं। क्वेरियों के लिए भुगतान करने के लिए, आपको Arbitrum पर GRT की आवश्यकता है। इसे प्राप्त करने के लिए कुछ विभिन्न तरीके यहां दिए गए हैं: -- यदि आपके पास पहले से Ethereum पर GRT है, तो आप इसे Arbitrum पर ब्रिज कर सकते हैं। आप यह Subgraph Studio में प्रदान किए गए GRT ब्रिजिंग विकल्प के माध्यम से या निम्नलिखित में से किसी एक ब्रिज का उपयोग करके कर सकते हैं: +- यदि आपके पास पहले से Ethereum पर GRT है, तो आप इसे Arbitrum पर ब्रिज कर सकते हैं। आप यह Subgraph Studio में प्रदान किए गए GRT ब्रिजिंग विकल्प के माध्यम से या निम्नलिखित में से किसी एक ब्रिज का उपयोग करके कर सकते हैं: - [The Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161) From e1a442df65b21bb03b0658b95ca2b804b2b2292a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:18:19 -0500 Subject: [PATCH 0559/1789] New translations billing.mdx (Swahili) --- website/src/pages/sw/subgraphs/billing.mdx | 214 +++++++++++++++++++++ 1 file changed, 214 insertions(+) create mode 100644 website/src/pages/sw/subgraphs/billing.mdx diff --git a/website/src/pages/sw/subgraphs/billing.mdx b/website/src/pages/sw/subgraphs/billing.mdx new file mode 100644 index 000000000000..e3a834f86844 --- /dev/null +++ b/website/src/pages/sw/subgraphs/billing.mdx @@ -0,0 +1,214 @@ +--- +title: Billing +--- + +## Querying Plans + +There are two plans to use when querying Subgraphs on The Graph Network. + +- **Free Plan**: The Free Plan includes 100,000 free monthly queries with full access to the Subgraph Studio testing environment. This plan is designed for hobbyists, hackathoners, and those with side projects to try out The Graph before scaling their dapp. + +- **Growth Plan**: The Growth Plan includes everything in the Free Plan with all queries after 100,000 monthly queries requiring payments with GRT or credit card. The Growth Plan is flexible enough to cover teams that have established dapps across a variety of use cases. + + + +## Query Payments with credit card + +- To set up billing with credit/debit cards, users should access Subgraph Studio (https://thegraph.com/studio/) + 1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/subgraphs/billing/). + 2. Click on the "Connect Wallet" button on the top right corner of the page. You'll be redirected to the wallet selection page. Select your wallet and click on "Connect". + 3. Choose “Upgrade plan” if you are upgrading from the Free Plan or choose “Manage Plan” if you have already added GRT to your billing balance in the past. Next, you can estimate the number of queries to get a pricing estimate, but this is not a required step. + 4. To choose a credit card payment, choose “Credit card” as the payment method and fill out your credit card information. Those who have used Stripe before can use the Link feature to autofill their details. +- Invoices will be processed at the end of each month and require an active credit card on file for all queries beyond the free plan quota. + +## Query Payments with GRT + +Subgraph users can use The Graph Token (or GRT) to pay for queries on The Graph Network. With GRT, invoices will be processed at the end of each month and require a sufficient balance of GRT to make queries beyond the Free Plan quota of 100,000 monthly queries. You'll be required to pay fees generated from your API keys. Using the billing contract, you'll be able to: + +- Add and withdraw GRT from your account balance. +- Keep track of your balances based on how much GRT you have added to your account balance, how much you have removed, and your invoices. +- Automatically pay invoices based on query fees generated, as long as there is enough GRT in your account balance. + +### GRT on Arbitrum or Ethereum + +The Graph’s billing system accepts GRT on Arbitrum, and users will need ETH on Arbitrum to pay their gas. While The Graph protocol started on Ethereum Mainnet, all activity, including the billing contracts, is now on Arbitrum One. + +To pay for queries, you need GRT on Arbitrum. Here are a few different ways to achieve this: + +- If you already have GRT on Ethereum, you can bridge it to Arbitrum. You can do this via the GRT bridging option provided in Subgraph Studio or by using one of the following bridges: + +- [The Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161) + +- [TransferTo](https://transferto.xyz/swap) + +- If you already have assets on Arbitrum, you can swap them for GRT via a swapping protocol like Uniswap. + +- Alternatively, you acquire GRT directly on Arbitrum through a decentralized exchange. + +> This section is written assuming you already have GRT in your wallet, and you're on Arbitrum. If you don't have GRT, you can learn how to get GRT [here](#getting-grt). + +Once you bridge GRT, you can add it to your billing balance. + +### Adding GRT using a wallet + +1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/subgraphs/billing/). +2. Click on the "Connect Wallet" button on the top right corner of the page. You'll be redirected to the wallet selection page. Select your wallet and click on "Connect". +3. Select the "Manage" button near the top right corner. First time users will see an option to "Upgrade to Growth plan" while returning users will click "Deposit from wallet". +4. Use the slider to estimate the number of queries you expect to make on a monthly basis. + - For suggestions on the number of queries you may use, see our **Frequently Asked Questions** page. +5. Choose "Cryptocurrency". GRT is currently the only cryptocurrency accepted on The Graph Network. +6. Select the number of months you would like to prepay. + - Paying in advance does not commit you to future usage. You will only be charged for what you use and you can withdraw your balance at any time. +7. Pick the network from which you are depositing your GRT. GRT on Arbitrum or Ethereum are both acceptable. +8. Click "Allow GRT Access" and then specify the amount of GRT that can be taken from you wallet. + - If you are prepaying for multiple months, you must allow access to the amount that corresponds with that amount. This interaction will not cost any gas. +9. Lastly, click on "Add GRT to Billing Balance". This transaction will require ETH on Arbitrum to cover the gas costs. + +- Note that GRT deposited from Arbitrum will process within a few moments while GRT deposited from Ethereum will take approximately 15-20 minutes to process. Once the transaction is confirmed, you'll see the GRT added to your account balance. + +### Withdrawing GRT using a wallet + +1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/subgraphs/billing/). +2. Click on the "Connect Wallet" button on the top right corner of the page. Select your wallet and click on "Connect". +3. Click the "Manage" button at the top right corner of the page. Select "Withdraw GRT". A side panel will appear. +4. Enter the amount of GRT you would like to withdraw. +5. Click 'Withdraw GRT' to withdraw the GRT from your account balance. Sign the associated transaction in your wallet. This will cost gas. The GRT will be sent to your Arbitrum wallet. +6. Once the transaction is confirmed, you'll see the GRT withdrawn from your account balance in your Arbitrum wallet. + +### Adding GRT using a multisig wallet + +1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/subgraphs/billing/). +2. Click on the "Connect Wallet" button on the top right corner of the page. Select your wallet and click on "Connect". If you're using [Gnosis-Safe](https://gnosis-safe.io/), you'll be able to connect your multisig as well as your signing wallet. Then, sign the associated message. This will not cost any gas. +3. Select the "Manage" button near the top right corner. First time users will see an option to "Upgrade to Growth plan" while returning users will click "Deposit from wallet". +4. Use the slider to estimate the number of queries you expect to make on a monthly basis. + - For suggestions on the number of queries you may use, see our **Frequently Asked Questions** page. +5. Choose "Cryptocurrency". GRT is currently the only cryptocurrency accepted on The Graph Network. +6. Select the number of months you would like to prepay. + - Paying in advance does not commit you to future usage. You will only be charged for what you use and you can withdraw your balance at any time. +7. Pick the network from which you are depositing your GRT. GRT on Arbitrum or Ethereum are both acceptable. 8. Click "Allow GRT Access" and then specify the amount of GRT that can be taken from you wallet. + - If you are prepaying for multiple months, you must allow access to the amount that corresponds with that amount. This interaction will not cost any gas. +8. Lastly, click on "Add GRT to Billing Balance". This transaction will require ETH on Arbitrum to cover the gas costs. + +- Note that GRT deposited from Arbitrum will process within a few moments while GRT deposited from Ethereum will take approximately 15-20 minutes to process. Once the transaction is confirmed, you'll see the GRT added to your account balance. + +## Getting GRT + +This section will show you how to get GRT to pay for query fees. + +### Coinbase + +This will be a step by step guide for purchasing GRT on Coinbase. + +1. Go to [Coinbase](https://www.coinbase.com/) and create an account. +2. Once you have created an account, you will need to verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, you can purchase GRT. You can do this by clicking on the "Buy/Sell" button on the top right of the page. +4. Select the currency you want to purchase. Select GRT. +5. Select the payment method. Select your preferred payment method. +6. Select the amount of GRT you want to purchase. +7. Review your purchase. Review your purchase and click "Buy GRT". +8. Confirm your purchase. Confirm your purchase and you will have successfully purchased GRT. +9. You can transfer the GRT from your account to your wallet such as [MetaMask](https://metamask.io/). + - To transfer the GRT to your wallet, click on the "Accounts" button on the top right of the page. + - Click on the "Send" button next to the GRT account. + - Enter the amount of GRT you want to send and the wallet address you want to send it to. + - Click "Continue" and confirm your transaction. -Please note that for larger purchase amounts, Coinbase may require you to wait 7-10 days before transferring the full amount to a wallet. + +You can learn more about getting GRT on Coinbase [here](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). + +### Binance + +This will be a step by step guide for purchasing GRT on Binance. + +1. Go to [Binance](https://www.binance.com/en) and create an account. +2. Once you have created an account, you will need to verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, you can purchase GRT. You can do this by clicking on the "Buy Now" button on the homepage banner. +4. You will be taken to a page where you can select the currency you want to purchase. Select GRT. +5. Select your preferred payment method. You'll be able to pay with different fiat currencies such as Euros, US Dollars, and more. +6. Select the amount of GRT you want to purchase. +7. Review your purchase and click "Buy GRT". +8. Confirm your purchase and you will be able to see your GRT in your Binance Spot Wallet. +9. You can withdraw the GRT from your account to your wallet such as [MetaMask](https://metamask.io/). + - [To withdraw](https://www.binance.com/en/blog/ecosystem/how-to-transfer-crypto-from-binance-to-trust-wallet-8305050796630181570) the GRT to your wallet, add your wallet's address to the withdrawal whitelist. + - Click on the "wallet" button, click withdraw, and select GRT. + - Enter the amount of GRT you want to send and the whitelisted wallet address you want to send it to. + - Click "Continue" and confirm your transaction. + +You can learn more about getting GRT on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). + +### Uniswap + +This is how you can purchase GRT on Uniswap. + +1. Go to [Uniswap](https://app.uniswap.org/swap?chain=arbitrum) and connect your wallet. +2. Select the token you want to swap from. Select ETH. +3. Select the token you want to swap to. Select GRT. + - Make sure you're swapping for the correct token. The GRT smart contract address on Arbitrum One is: [0x9623063377AD1B27544C965cCd7342f7EA7e88C7](https://arbiscan.io/token/0x9623063377ad1b27544c965ccd7342f7ea7e88c7) +4. Enter the amount of ETH you want to swap. +5. Click "Swap". +6. Confirm the transaction in your wallet and you wait for the transaction to process. + +You can learn more about getting GRT on Uniswap [here](https://support.uniswap.org/hc/en-us/articles/8370549680909-How-to-Swap-Tokens-). + +## Getting Ether + +This section will show you how to get Ether (ETH) to pay for transaction fees or gas costs. ETH is necessary to execute operations on the Ethereum network such as transferring tokens or interacting with contracts. + +### Coinbase + +This will be a step by step guide for purchasing ETH on Coinbase. + +1. Go to [Coinbase](https://www.coinbase.com/) and create an account. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy/Sell" button on the top right of the page. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will have successfully purchased ETH. +9. You can transfer the ETH from your Coinbase account to your wallet such as [MetaMask](https://metamask.io/). + - To transfer the ETH to your wallet, click on the "Accounts" button on the top right of the page. + - Click on the "Send" button next to the ETH account. + - Enter the amount of ETH you want to send and the wallet address you want to send it to. + - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. + - Click "Continue" and confirm your transaction. + +You can learn more about getting ETH on Coinbase [here](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). + +### Binance + +This will be a step by step guide for purchasing ETH on Binance. + +1. Go to [Binance](https://www.binance.com/en) and create an account. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy Now" button on the homepage banner. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will see your ETH in your Binance Spot Wallet. +9. You can withdraw the ETH from your account to your wallet such as [MetaMask](https://metamask.io/). + - To withdraw the ETH to your wallet, add your wallet's address to the withdrawal whitelist. + - Click on the "wallet" button, click withdraw, and select ETH. + - Enter the amount of ETH you want to send and the whitelisted wallet address you want to send it to. + - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. + - Click "Continue" and confirm your transaction. + +You can learn more about getting ETH on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). + +## Billing FAQs + +### How many queries will I need? + +You don't need to know how many queries you'll need in advance. You will only be charged for what you use and you can withdraw GRT from your account at any time. + +We recommend you overestimate the number of queries you will need so that you don’t have to top up your balance frequently. A good estimate for small to medium sized applications is to start with 1M-2M queries per month and monitor usage closely in the first weeks. For larger apps, a good estimate is to use the number of daily visits your site gets multiplied by the number of queries your most active page makes upon opening. + +Of course, both new and existing users can reach out to Edge & Node's BD team for a consult to learn more about anticipated usage. + +### Can I withdraw GRT from my billing balance? + +Yes, you can always withdraw GRT that has not already been used for queries from your billing balance. The billing contract is only designed to bridge GRT from Ethereum mainnet to the Arbitrum network. If you'd like to transfer your GRT from Arbitrum back to Ethereum mainnet, you'll need to use the [Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161). + +### What happens when my billing balance runs out? Will I get a warning? + +You will receive several email notifications before your billing balance runs out. From 014baaa72cbb87cdb15ae1f676b1495f3621f265 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:18:20 -0500 Subject: [PATCH 0560/1789] New translations arweave.mdx (Romanian) --- .../pages/ro/subgraphs/cookbook/arweave.mdx | 36 +++++++++---------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/website/src/pages/ro/subgraphs/cookbook/arweave.mdx b/website/src/pages/ro/subgraphs/cookbook/arweave.mdx index 2372025621d1..18b485a9c382 100644 --- a/website/src/pages/ro/subgraphs/cookbook/arweave.mdx +++ b/website/src/pages/ro/subgraphs/cookbook/arweave.mdx @@ -2,7 +2,7 @@ title: Building Subgraphs on Arweave --- -> Arweave support in Graph Node and on Subgraph Studio is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave subgraphs! +> Arweave support in Graph Node and on Subgraph Studio is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave Subgraphs! In this guide, you will learn how to build and deploy Subgraphs to index the Arweave blockchain. @@ -25,12 +25,12 @@ The Graph allows you to build custom open APIs called "Subgraphs". Subgraphs are To be able to build and deploy Arweave Subgraphs, you need two packages: -1. `@graphprotocol/graph-cli` above version 0.30.2 - This is a command-line tool for building and deploying subgraphs. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-cli) to download using `npm`. -2. `@graphprotocol/graph-ts` above version 0.27.0 - This is library of subgraph-specific types. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-ts) to download using `npm`. +1. `@graphprotocol/graph-cli` above version 0.30.2 - This is a command-line tool for building and deploying Subgraphs. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-cli) to download using `npm`. +2. `@graphprotocol/graph-ts` above version 0.27.0 - This is library of Subgraph-specific types. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-ts) to download using `npm`. ## Subgraph's components -There are three components of a subgraph: +There are three components of a Subgraph: ### 1. Manifest - `subgraph.yaml` @@ -40,22 +40,22 @@ Defines the data sources of interest, and how they should be processed. Arweave Here you define which data you want to be able to query after indexing your Subgraph using GraphQL. This is actually similar to a model for an API, where the model defines the structure of a request body. -The requirements for Arweave subgraphs are covered by the [existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). +The requirements for Arweave Subgraphs are covered by the [existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). ### 3. AssemblyScript Mappings - `mapping.ts` This is the logic that determines how data should be retrieved and stored when someone interacts with the data sources you are listening to. The data gets translated and is stored based off the schema you have listed. -During subgraph development there are two key commands: +During Subgraph development there are two key commands: ``` $ graph codegen # generates types from the schema file identified in the manifest -$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the subgraph files in a /build folder +$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the Subgraph files in a /build folder ``` ## Subgraph Manifest Definition -The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for an Arweave subgraph: +The Subgraph manifest `subgraph.yaml` identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest for an Arweave Subgraph: ```yaml specVersion: 0.0.5 @@ -82,7 +82,7 @@ dataSources: - handler: handleTx # the function name in the mapping file ``` -- Arweave subgraphs introduce a new kind of data source (`arweave`) +- Arweave Subgraphs introduce a new kind of data source (`arweave`) - The network should correspond to a network on the hosting Graph Node. In Subgraph Studio, Arweave's mainnet is `arweave-mainnet` - Arweave data sources introduce an optional source.owner field, which is the public key of an Arweave wallet @@ -99,7 +99,7 @@ Arweave data sources support two types of handlers: ## Schema Definition -Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on the subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). +Schema definition describes the structure of the resulting Subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on the Subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). ## AssemblyScript Mappings @@ -152,7 +152,7 @@ Writing the mappings of an Arweave Subgraph is very similar to writing the mappi ## Deploying an Arweave Subgraph in Subgraph Studio -Once your subgraph has been created on your Subgraph Studio dashboard, you can deploy by using the `graph deploy` CLI command. +Once your Subgraph has been created on your Subgraph Studio dashboard, you can deploy by using the `graph deploy` CLI command. ```bash graph deploy --access-token @@ -160,25 +160,25 @@ graph deploy --access-token ## Querying an Arweave Subgraph -The GraphQL endpoint for Arweave subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. +The GraphQL endpoint for Arweave Subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. ## Example Subgraphs -Here is an example subgraph for reference: +Here is an example Subgraph for reference: -- [Example subgraph for Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) +- [Example Subgraph for Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) ## FAQ -### Can a subgraph index Arweave and other chains? +### Can a Subgraph index Arweave and other chains? -No, a subgraph can only support data sources from one chain/network. +No, a Subgraph can only support data sources from one chain/network. ### Can I index the stored files on Arweave? Currently, The Graph is only indexing Arweave as a blockchain (its blocks and transactions). -### Can I identify Bundlr bundles in my subgraph? +### Can I identify Bundlr bundles in my Subgraph? This is not currently supported. @@ -188,7 +188,7 @@ The source.owner can be the user's public key or account address. ### What is the current encryption format? -Data is generally passed into the mappings as Bytes, which if stored directly is returned in the subgraph in a `hex` format (ex. block and transaction hashes). You may want to convert to a `base64` or `base64 URL`-safe format in your mappings, in order to match what is displayed in block explorers like [Arweave Explorer](https://viewblock.io/arweave/). +Data is generally passed into the mappings as Bytes, which if stored directly is returned in the Subgraph in a `hex` format (ex. block and transaction hashes). You may want to convert to a `base64` or `base64 URL`-safe format in your mappings, in order to match what is displayed in block explorers like [Arweave Explorer](https://viewblock.io/arweave/). The following `bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string` helper function can be used, and will be added to `graph-ts`: From 7d2a06e56776b612b2e815369e0c2c684f5eb20f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:18:21 -0500 Subject: [PATCH 0561/1789] New translations arweave.mdx (French) --- .../pages/fr/subgraphs/cookbook/arweave.mdx | 74 +++++++++---------- 1 file changed, 37 insertions(+), 37 deletions(-) diff --git a/website/src/pages/fr/subgraphs/cookbook/arweave.mdx b/website/src/pages/fr/subgraphs/cookbook/arweave.mdx index 2b11f5ea02a1..0995d59cd834 100644 --- a/website/src/pages/fr/subgraphs/cookbook/arweave.mdx +++ b/website/src/pages/fr/subgraphs/cookbook/arweave.mdx @@ -2,7 +2,7 @@ title: Construction de subgraphs pour Arweave --- -> Arweave support in Graph Node and on Subgraph Studio is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave subgraphs! +> Arweave support in Graph Node and on Subgraph Studio is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave Subgraphs! Dans ce guide, vous apprendrez comment créer et déployer des subgraphs pour indexer la blockchain Arweave. @@ -13,49 +13,49 @@ Arweave est un protocole qui permet aux développeurs de stocker des données de Arweave a déjà construit de nombreuses bibliothèques pour intégrer le protocole dans plusieurs langages de programmation différents. Pour plus d'informations, vous pouvez consulter : - [Arwiki](https://arwiki.wiki/#/en/main) -- [Arweave Resources](https://www.arweave.org/build) +- [Ressources Arweave](https://www.arweave.org/build) ## À quoi servent les subgraphs d'Arweave ? -The Graph allows you to build custom open APIs called "Subgraphs". Subgraphs are used to tell indexers (server operators) which data to index on a blockchain and save on their servers in order for you to be able to query it at any time using [GraphQL](https://graphql.org/). +The Graph vous permet de créer des API ouvertes personnalisées appelées "Subgraphs". Les subgraphs sont utilisés pour indiquer aux Indexeurs (opérateurs de serveur) quelles données indexer sur une blockchain et enregistrer sur leurs serveurs afin que vous puissiez les interroger à tout moment à l'aide de [GraphQL](https://graphql.org/). -[Graph Node](https://github.com/graphprotocol/graph-node) is now able to index data on Arweave protocol. The current integration is only indexing Arweave as a blockchain (blocks and transactions), it is not indexing the stored files yet. +[Graph Node](https://github.com/graphprotocol/graph-node) est désormais capable d'indexer les données sur le protocole Arweave. L'intégration actuelle indexe uniquement Arweave en tant que blockchain (blocs et transactions), elle n'indexe pas encore les fichiers stockés. ## Construire un subgraph Arweave Pour pouvoir créer et déployer des Arweave Subgraphs, vous avez besoin de deux packages : -1. `@graphprotocol/graph-cli` above version 0.30.2 - This is a command-line tool for building and deploying subgraphs. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-cli) to download using `npm`. -2. `@graphprotocol/graph-ts` above version 0.27.0 - This is library of subgraph-specific types. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-ts) to download using `npm`. +1. `@graphprotocol/graph-cli` above version 0.30.2 - This is a command-line tool for building and deploying Subgraphs. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-cli) to download using `npm`. +2. `@graphprotocol/graph-ts` above version 0.27.0 - This is library of Subgraph-specific types. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-ts) to download using `npm`. ## Caractéristique des subgraphs -Il y a trois composants d'un subgraph : +There are three components of a Subgraph: -### 1. Manifest - `subgraph.yaml` +### 1. Le Manifest - `subgraph.yaml` Définit les sources de données intéressantes et la manière dont elles doivent être traitées. Arweave est un nouveau type de source de données. -### 2. Schema - `schema.graphql` +### 2. Schéma - `schema.graphql` Vous définissez ici les données que vous souhaitez pouvoir interroger après avoir indexé votre subgraph à l'aide de GraphQL. Ceci est en fait similaire à un modèle pour une API, où le modèle définit la structure d'un corps de requête. -The requirements for Arweave subgraphs are covered by the [existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). +The requirements for Arweave Subgraphs are covered by the [existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). -### 3. AssemblyScript Mappings - `mapping.ts` +### 3. Mappages en AssemblyScript - `mapping.ts` Il s'agit de la logique qui détermine comment les données doivent être récupérées et stockées lorsqu'une personne interagit avec les sources de données que vous interrogez. Les données sont traduites et stockées sur la base du schema que vous avez répertorié. -Lors du développement du subgraph, il y a deux commandes clés : +During Subgraph development there are two key commands: ``` -$ graph codegen # génère des types à partir du fichier de schéma identifié dans le manifeste -$ graph build # génère le Web Assembly à partir des fichiers AssemblyScript, et prépare tous les fichiers de subgraphes dans un dossier /build +$ graph codegen # generates types from the schema file identified in the manifest +$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the Subgraph files in a /build folder ``` ## Définition du manifeste du subgraph -The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for an Arweave subgraph: +The Subgraph manifest `subgraph.yaml` identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest for an Arweave Subgraph: ```yaml specVersion: 0.0.5 @@ -82,30 +82,30 @@ dataSources: - handler: handleTx # le nom de la fonction dans le fichier de mapping ``` -- Arweave subgraphs introduce a new kind of data source (`arweave`) -- The network should correspond to a network on the hosting Graph Node. In Subgraph Studio, Arweave's mainnet is `arweave-mainnet` +- Arweave Subgraphs introduce a new kind of data source (`arweave`) +- Le réseau doit correspondre à un réseau sur le Graph Node hôte. Dans Subgraph Studio, le réseau principal d'Arweave est `arweave-mainnet` - Les sources de données Arweave introduisent un champ source.owner facultatif, qui est la clé publique d'un portefeuille Arweave Les sources de données Arweave prennent en charge deux types de gestionnaires : -- `blockHandlers` - Run on every new Arweave block. No source.owner is required. -- `transactionHandlers` - Run on every transaction where the data source's `source.owner` is the owner. Currently an owner is required for `transactionHandlers`, if users want to process all transactions they should provide "" as the `source.owner` +- `blockHandlers` - Exécuté sur chaque nouveau bloc Arweave. Aucun source.owner n'est requis. +- `transactionHandlers` - Exécute chaque transaction dont le propriétaire est `source.owner` de la source de données. Actuellement, un propriétaire est requis pour `transactionHandlers`, si les utilisateurs veulent traiter toutes les transactions, ils doivent fournir "" comme `source.owner` > Source.owner peut être l’adresse du propriétaire ou sa clé publique. > > Les transactions sont les éléments constitutifs du permaweb Arweave et ce sont des objets créés par les utilisateurs finaux. > -> Note: [Irys (previously Bundlr)](https://irys.xyz/) transactions are not supported yet. +> Note : Les transactions [Irys (anciennement Bundlr)](https://irys.xyz/) ne sont pas encore prises en charge. ## Définition de schéma -Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on the subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). +Schema definition describes the structure of the resulting Subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on the Subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). ## Cartographies AssemblyScript -The handlers for processing events are written in [AssemblyScript](https://www.assemblyscript.org/). +Les gestionnaires d'événements sont écrits en [AssemblyScript](https://www.assemblyscript.org/). -Arweave indexing introduces Arweave-specific data types to the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/). +L'indexation Arweave introduit des types de données spécifiques à Arweave dans l'[API AssemblyScript](/subgraphs/developing/creating/graph-ts/api/). ```tsx class Block { @@ -146,39 +146,39 @@ class Transaction { } ``` -Block handlers receive a `Block`, while transactions receive a `Transaction`. +Les gestionnaires de blocs reçoivent un `Block`, tandis que les transactions reçoivent un `Transaction`. -Writing the mappings of an Arweave Subgraph is very similar to writing the mappings of an Ethereum Subgraph. For more information, click [here](/developing/creating-a-subgraph/#writing-mappings). +L'écriture des mappages d'un subgraph Arweave est très similaire à l'écriture des mappages d'un subgraph Ethereum. Pour plus d'informations, cliquez [ici](/developing/creating-a-subgraph/#writing-mappings). ## Déploiement d'un subgraph Arweave dans Subgraph Studio -Once your subgraph has been created on your Subgraph Studio dashboard, you can deploy by using the `graph deploy` CLI command. +Once your Subgraph has been created on your Subgraph Studio dashboard, you can deploy by using the `graph deploy` CLI command. ```bash -graph deploy --access-token +graph deploy --access-token ``` ## Interroger un subgraph d'Arweave -The GraphQL endpoint for Arweave subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. +The GraphQL endpoint for Arweave Subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. ## Exemples de subgraphs -Voici un exemple de modèle subgraph : +Here is an example Subgraph for reference: -- [Example subgraph for Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) +- [Example Subgraph for Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) ## FAQ -### Un subgraph peut-il indexer Arweave et d'autres chaînes ? +### Can a Subgraph index Arweave and other chains? -Non, un subgraph ne peut supporter que les sources de données d'une seule chaîne/réseau. +No, a Subgraph can only support data sources from one chain/network. ### Puis-je indexer les fichiers enregistrés sur Arweave ? Actuellement, The Graph n'indexe Arweave qu'en tant que blockchain (ses blocs et ses transactions). -### Puis-je identifier les bundles de Bundlr dans mon subgraph ? +### Can I identify Bundlr bundles in my Subgraph? Cette fonction n'est pas prise en charge actuellement. @@ -188,9 +188,9 @@ La source.owner peut être la clé publique de l'utilisateur ou l'adresse de son ### Quel est le format de chiffrement actuel ? -Data is generally passed into the mappings as Bytes, which if stored directly is returned in the subgraph in a `hex` format (ex. block and transaction hashes). You may want to convert to a `base64` or `base64 URL`-safe format in your mappings, in order to match what is displayed in block explorers like [Arweave Explorer](https://viewblock.io/arweave/). +Data is generally passed into the mappings as Bytes, which if stored directly is returned in the Subgraph in a `hex` format (ex. block and transaction hashes). You may want to convert to a `base64` or `base64 URL`-safe format in your mappings, in order to match what is displayed in block explorers like [Arweave Explorer](https://viewblock.io/arweave/). -The following `bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string` helper function can be used, and will be added to `graph-ts`: +La fonction d'assistant `bytesToBase64(bytes : Uint8Array, urlSafe : boolean) : string` suivante peut être utilisée, et sera ajoutée à `graph-ts` : ``` const base64Alphabet = [ @@ -219,14 +219,14 @@ function bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string { result += alphabet[((bytes[i - 1] & 0x0F) << 2) | (bytes[i] >> 6)]; result += alphabet[bytes[i] & 0x3F]; } - if (i === l + 1) { // 1 octet yet to write + if (i === l + 1) { // 1 octet à écrire result += alphabet[bytes[i - 2] >> 2]; result += alphabet[(bytes[i - 2] & 0x03) << 4]; if (!urlSafe) { result += "=="; } } - if (!urlSafe && i === l) { // 2 octets yet to write + if (!urlSafe && i === l) { // 2 octets à écrire result += alphabet[bytes[i - 2] >> 2]; result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; result += alphabet[(bytes[i - 1] & 0x0F) << 2]; From 61669c57d677a0a038623d8056ac3e61a40e88ed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:18:23 -0500 Subject: [PATCH 0562/1789] New translations arweave.mdx (Spanish) --- .../pages/es/subgraphs/cookbook/arweave.mdx | 38 +++++++++---------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/website/src/pages/es/subgraphs/cookbook/arweave.mdx b/website/src/pages/es/subgraphs/cookbook/arweave.mdx index c0333e3dadf8..31f86a2aa56f 100644 --- a/website/src/pages/es/subgraphs/cookbook/arweave.mdx +++ b/website/src/pages/es/subgraphs/cookbook/arweave.mdx @@ -2,7 +2,7 @@ title: Construyendo Subgrafos en Arweave --- -> Arweave support in Graph Node and on Subgraph Studio is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave subgraphs! +> Arweave support in Graph Node and on Subgraph Studio is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave Subgraphs! En esta guía, aprenderás a construir y deployar subgrafos para indexar la blockchain de Arweave. @@ -25,12 +25,12 @@ The Graph allows you to build custom open APIs called "Subgraphs". Subgraphs are Para poder construir y deployar subgrafos Arweave, necesita dos paquetes: -1. `@graphprotocol/graph-cli` above version 0.30.2 - This is a command-line tool for building and deploying subgraphs. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-cli) to download using `npm`. -2. `@graphprotocol/graph-ts` above version 0.27.0 - This is library of subgraph-specific types. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-ts) to download using `npm`. +1. `@graphprotocol/graph-cli` above version 0.30.2 - This is a command-line tool for building and deploying Subgraphs. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-cli) to download using `npm`. +2. `@graphprotocol/graph-ts` above version 0.27.0 - This is library of Subgraph-specific types. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-ts) to download using `npm`. ## Componentes del subgrafo -Hay tres componentes de un subgrafo: +There are three components of a Subgraph: ### 1. Manifest - `subgraph.yaml` @@ -40,22 +40,22 @@ Define las fuentes de datos de interés y cómo deben ser procesadas. Arweave es Aquí defines qué datos quieres poder consultar después de indexar tu Subgrafo usando GraphQL. Esto es en realidad similar a un modelo para una API, donde el modelo define la estructura de un cuerpo de solicitud. -The requirements for Arweave subgraphs are covered by the [existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). +The requirements for Arweave Subgraphs are covered by the [existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). ### 3. AssemblyScript Mappings - `mapping.ts` Esta es la lógica que determina cómo los datos deben ser recuperados y almacenados cuando alguien interactúa con las fuentes de datos que estás escuchando. Los datos se traducen y se almacenan basándose en el esquema que has listado. -Durante el desarrollo del subgrafo hay dos comandos clave: +During Subgraph development there are two key commands: ``` -$ graph codegen # genera tipos a partir del archivo de esquema identificado en el manifiesto -$ graph build # genera Web Assembly a partir de los archivos de AssemblyScript y prepara todos los archivos de subgrafo en una carpeta /build +$ graph codegen # generates types from the schema file identified in the manifest +$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the Subgraph files in a /build folder ``` ## Definición de manifiesto del subgrafo -The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for an Arweave subgraph: +The Subgraph manifest `subgraph.yaml` identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest for an Arweave Subgraph: ```yaml specVersion: 0.0.5 @@ -82,7 +82,7 @@ dataSources: - handler: handleTx # the function name in the mapping file ``` -- Arweave subgraphs introduce a new kind of data source (`arweave`) +- Arweave Subgraphs introduce a new kind of data source (`arweave`) - The network should correspond to a network on the hosting Graph Node. In Subgraph Studio, Arweave's mainnet is `arweave-mainnet` - Las fuentes de datos de Arweave introducen un campo opcional "source.owner", que es la clave pública de una billetera Arweave @@ -99,7 +99,7 @@ Las fuentes de datos de Arweave admiten dos tipos de handlers: ## Definición de esquema -Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on the subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). +Schema definition describes the structure of the resulting Subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on the Subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). ## Asignaciones de AssemblyScript @@ -152,7 +152,7 @@ Writing the mappings of an Arweave Subgraph is very similar to writing the mappi ## Deploying an Arweave Subgraph in Subgraph Studio -Once your subgraph has been created on your Subgraph Studio dashboard, you can deploy by using the `graph deploy` CLI command. +Once your Subgraph has been created on your Subgraph Studio dashboard, you can deploy by using the `graph deploy` CLI command. ```bash graph deploy --access-token @@ -160,25 +160,25 @@ graph deploy --access-token ## Consultando un subgrafo de Arweave -The GraphQL endpoint for Arweave subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. +The GraphQL endpoint for Arweave Subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. ## Subgrafos de ejemplo -A continuación se muestra un ejemplo de subgrafo como referencia: +Here is an example Subgraph for reference: -- [Example subgraph for Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) +- [Example Subgraph for Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) ## FAQ -### ¿Puede un subgrafo indexar Arweave y otras cadenas? +### Can a Subgraph index Arweave and other chains? -No, un subgrafo sólo puede admitir fuentes de datos de una cadena/red. +No, a Subgraph can only support data sources from one chain/network. ### ¿Puedo indexar los archivos almacenados en Arweave? Actualmente, The Graph sólo indexa Arweave como blockchain (sus bloques y transacciones). -### ¿Puedo identificar los paquetes de Bundlr en mi subgrafo? +### Can I identify Bundlr bundles in my Subgraph? Actualmente no se admite. @@ -188,7 +188,7 @@ El source.owner puede ser la clave pública del usuario o la dirección de la cu ### ¿Cuál es el formato actual de encriptación? -Data is generally passed into the mappings as Bytes, which if stored directly is returned in the subgraph in a `hex` format (ex. block and transaction hashes). You may want to convert to a `base64` or `base64 URL`-safe format in your mappings, in order to match what is displayed in block explorers like [Arweave Explorer](https://viewblock.io/arweave/). +Data is generally passed into the mappings as Bytes, which if stored directly is returned in the Subgraph in a `hex` format (ex. block and transaction hashes). You may want to convert to a `base64` or `base64 URL`-safe format in your mappings, in order to match what is displayed in block explorers like [Arweave Explorer](https://viewblock.io/arweave/). The following `bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string` helper function can be used, and will be added to `graph-ts`: From d91e015b302781bf758d2cc395556409c2ca6393 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:18:24 -0500 Subject: [PATCH 0563/1789] New translations arweave.mdx (Arabic) --- .../pages/ar/subgraphs/cookbook/arweave.mdx | 36 +++++++++---------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/website/src/pages/ar/subgraphs/cookbook/arweave.mdx b/website/src/pages/ar/subgraphs/cookbook/arweave.mdx index c1ec421993b4..e042a1dd2f4c 100644 --- a/website/src/pages/ar/subgraphs/cookbook/arweave.mdx +++ b/website/src/pages/ar/subgraphs/cookbook/arweave.mdx @@ -2,7 +2,7 @@ title: Building Subgraphs on Arweave --- -> Arweave support in Graph Node and on Subgraph Studio is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave subgraphs! +> Arweave support in Graph Node and on Subgraph Studio is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave Subgraphs! In this guide, you will learn how to build and deploy Subgraphs to index the Arweave blockchain. @@ -25,12 +25,12 @@ The Graph allows you to build custom open APIs called "Subgraphs". Subgraphs are To be able to build and deploy Arweave Subgraphs, you need two packages: -1. `@graphprotocol/graph-cli` above version 0.30.2 - This is a command-line tool for building and deploying subgraphs. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-cli) to download using `npm`. -2. `@graphprotocol/graph-ts` above version 0.27.0 - This is library of subgraph-specific types. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-ts) to download using `npm`. +1. `@graphprotocol/graph-cli` above version 0.30.2 - This is a command-line tool for building and deploying Subgraphs. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-cli) to download using `npm`. +2. `@graphprotocol/graph-ts` above version 0.27.0 - This is library of Subgraph-specific types. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-ts) to download using `npm`. ## Subgraph's components -There are three components of a subgraph: +There are three components of a Subgraph: ### 1. Manifest - `subgraph.yaml` @@ -40,22 +40,22 @@ Defines the data sources of interest, and how they should be processed. Arweave Here you define which data you want to be able to query after indexing your Subgraph using GraphQL. This is actually similar to a model for an API, where the model defines the structure of a request body. -The requirements for Arweave subgraphs are covered by the [existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). +The requirements for Arweave Subgraphs are covered by the [existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). ### 3. AssemblyScript Mappings - `mapping.ts` This is the logic that determines how data should be retrieved and stored when someone interacts with the data sources you are listening to. The data gets translated and is stored based off the schema you have listed. -During subgraph development there are two key commands: +During Subgraph development there are two key commands: ``` $ graph codegen # generates types from the schema file identified in the manifest -$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the subgraph files in a /build folder +$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the Subgraph files in a /build folder ``` ## تعريف Subgraph Manifest -The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for an Arweave subgraph: +The Subgraph manifest `subgraph.yaml` identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest for an Arweave Subgraph: ```yaml specVersion: 0.0.5 @@ -82,7 +82,7 @@ dataSources: - handler: handleTx # the function name in the mapping file ``` -- Arweave subgraphs introduce a new kind of data source (`arweave`) +- Arweave Subgraphs introduce a new kind of data source (`arweave`) - The network should correspond to a network on the hosting Graph Node. In Subgraph Studio, Arweave's mainnet is `arweave-mainnet` - Arweave data sources introduce an optional source.owner field, which is the public key of an Arweave wallet @@ -99,7 +99,7 @@ Arweave data sources support two types of handlers: ## تعريف المخطط -Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on the subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). +Schema definition describes the structure of the resulting Subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on the Subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). ## AssemblyScript Mappings @@ -152,7 +152,7 @@ Writing the mappings of an Arweave Subgraph is very similar to writing the mappi ## Deploying an Arweave Subgraph in Subgraph Studio -Once your subgraph has been created on your Subgraph Studio dashboard, you can deploy by using the `graph deploy` CLI command. +Once your Subgraph has been created on your Subgraph Studio dashboard, you can deploy by using the `graph deploy` CLI command. ```bash graph deploy --access-token @@ -160,25 +160,25 @@ graph deploy --access-token ## Querying an Arweave Subgraph -The GraphQL endpoint for Arweave subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. +The GraphQL endpoint for Arweave Subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. ## أمثلة على الـ Subgraphs -Here is an example subgraph for reference: +Here is an example Subgraph for reference: -- [Example subgraph for Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) +- [Example Subgraph for Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) ## FAQ -### Can a subgraph index Arweave and other chains? +### Can a Subgraph index Arweave and other chains? -No, a subgraph can only support data sources from one chain/network. +No, a Subgraph can only support data sources from one chain/network. ### Can I index the stored files on Arweave? Currently, The Graph is only indexing Arweave as a blockchain (its blocks and transactions). -### Can I identify Bundlr bundles in my subgraph? +### Can I identify Bundlr bundles in my Subgraph? This is not currently supported. @@ -188,7 +188,7 @@ The source.owner can be the user's public key or account address. ### What is the current encryption format? -Data is generally passed into the mappings as Bytes, which if stored directly is returned in the subgraph in a `hex` format (ex. block and transaction hashes). You may want to convert to a `base64` or `base64 URL`-safe format in your mappings, in order to match what is displayed in block explorers like [Arweave Explorer](https://viewblock.io/arweave/). +Data is generally passed into the mappings as Bytes, which if stored directly is returned in the Subgraph in a `hex` format (ex. block and transaction hashes). You may want to convert to a `base64` or `base64 URL`-safe format in your mappings, in order to match what is displayed in block explorers like [Arweave Explorer](https://viewblock.io/arweave/). The following `bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string` helper function can be used, and will be added to `graph-ts`: From 228d701f75bed37ef4d400730e8f1df3914e729e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:18:25 -0500 Subject: [PATCH 0564/1789] New translations arweave.mdx (Czech) --- .../pages/cs/subgraphs/cookbook/arweave.mdx | 38 +++++++++---------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/website/src/pages/cs/subgraphs/cookbook/arweave.mdx b/website/src/pages/cs/subgraphs/cookbook/arweave.mdx index d59897ad4e03..dd5560479104 100644 --- a/website/src/pages/cs/subgraphs/cookbook/arweave.mdx +++ b/website/src/pages/cs/subgraphs/cookbook/arweave.mdx @@ -2,7 +2,7 @@ title: Vytváření podgrafů na Arweave --- -> Arweave support in Graph Node and on Subgraph Studio is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave subgraphs! +> Arweave support in Graph Node and on Subgraph Studio is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave Subgraphs! V této příručce se dozvíte, jak vytvořit a nasadit subgrafy pro indexování blockchainu Arweave. @@ -25,12 +25,12 @@ The Graph allows you to build custom open APIs called "Subgraphs". Subgraphs are Abyste mohli sestavit a nasadit Arweave Subgraphs, potřebujete dva balíčky: -1. `@graphprotocol/graph-cli` above version 0.30.2 - This is a command-line tool for building and deploying subgraphs. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-cli) to download using `npm`. -2. `@graphprotocol/graph-ts` above version 0.27.0 - This is library of subgraph-specific types. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-ts) to download using `npm`. +1. `@graphprotocol/graph-cli` above version 0.30.2 - This is a command-line tool for building and deploying Subgraphs. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-cli) to download using `npm`. +2. `@graphprotocol/graph-ts` above version 0.27.0 - This is library of Subgraph-specific types. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-ts) to download using `npm`. ## Komponenty podgrafu -Podgraf má tři Komponenty: +There are three components of a Subgraph: ### 1. Manifest - `subgraph.yaml` @@ -40,22 +40,22 @@ Definuje zdroje dat, které jsou předmětem zájmu, a způsob jejich zpracován Zde definujete, na která data se chcete po indexování subgrafu pomocí jazyka GraphQL dotazovat. Je to vlastně podobné modelu pro API, kde model definuje strukturu těla požadavku. -The requirements for Arweave subgraphs are covered by the [existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). +The requirements for Arweave Subgraphs are covered by the [existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). ### 3. AssemblyScript Mappings - `mapping.ts` Jedná se o logiku, která určuje, jak mají být data načtena a uložena, když někdo komunikuje se zdroji dat, kterým nasloucháte. Data se přeloží a uloží na základě schématu, které jste uvedli. -Při vývoji podgrafů existují dva klíčové příkazy: +During Subgraph development there are two key commands: ``` -$ graph codegen # generuje typy ze souboru se schématem identifikovaným v manifestu -$ graph build # vygeneruje webové sestavení ze souborů AssemblyScript a připraví všechny dílčí soubory do složky /build +$ graph codegen # generates types from the schema file identified in the manifest +$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the Subgraph files in a /build folder ``` ## Definice podgrafu Manifest -The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for an Arweave subgraph: +The Subgraph manifest `subgraph.yaml` identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest for an Arweave Subgraph: ```yaml specVersion: 0.0.5 @@ -82,7 +82,7 @@ dataSources: - handler: handleTx # the function name in the mapping file ``` -- Arweave subgraphs introduce a new kind of data source (`arweave`) +- Arweave Subgraphs introduce a new kind of data source (`arweave`) - The network should correspond to a network on the hosting Graph Node. In Subgraph Studio, Arweave's mainnet is `arweave-mainnet` - Zdroje dat Arweave obsahují nepovinné pole source.owner, což je veřejný klíč peněženky Arweave @@ -99,7 +99,7 @@ Datové zdroje Arweave podporují dva typy zpracovatelů: ## Definice schématu -Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on the subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). +Schema definition describes the structure of the resulting Subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on the Subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). ## AssemblyScript Mapování @@ -152,7 +152,7 @@ Writing the mappings of an Arweave Subgraph is very similar to writing the mappi ## Nasazení podgrafu Arweave v Podgraf Studio -Once your subgraph has been created on your Subgraph Studio dashboard, you can deploy by using the `graph deploy` CLI command. +Once your Subgraph has been created on your Subgraph Studio dashboard, you can deploy by using the `graph deploy` CLI command. ```bash graph deploy --access-token @@ -160,25 +160,25 @@ graph deploy --access-token ## Dotazování podgrafu Arweave -The GraphQL endpoint for Arweave subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. +The GraphQL endpoint for Arweave Subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. ## Příklady podgrafů -Zde je příklad podgrafu pro referenci: +Here is an example Subgraph for reference: -- [Example subgraph for Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) +- [Example Subgraph for Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) ## FAQ -### Může podgraf indexovat Arweave a další řetězce? +### Can a Subgraph index Arweave and other chains? -Ne, podgraf může podporovat zdroje dat pouze z jednoho řetězce/sítě. +No, a Subgraph can only support data sources from one chain/network. ### Mohu indexovat uložené soubory v Arweave? V současné době The Graph indexuje pouze Arweave jako blockchain (jeho bloky a transakce). -### Mohu identifikovat svazky Bundlr ve svém podgrafu? +### Can I identify Bundlr bundles in my Subgraph? Toto není aktuálně podporováno. @@ -188,7 +188,7 @@ Source.owner může být veřejný klíč uživatele nebo adresa účtu. ### Jaký je aktuální formát šifrování? -Data is generally passed into the mappings as Bytes, which if stored directly is returned in the subgraph in a `hex` format (ex. block and transaction hashes). You may want to convert to a `base64` or `base64 URL`-safe format in your mappings, in order to match what is displayed in block explorers like [Arweave Explorer](https://viewblock.io/arweave/). +Data is generally passed into the mappings as Bytes, which if stored directly is returned in the Subgraph in a `hex` format (ex. block and transaction hashes). You may want to convert to a `base64` or `base64 URL`-safe format in your mappings, in order to match what is displayed in block explorers like [Arweave Explorer](https://viewblock.io/arweave/). The following `bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string` helper function can be used, and will be added to `graph-ts`: From 36ee364504e38b2cf0e4894a0d4c8473eab6206e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:18:26 -0500 Subject: [PATCH 0565/1789] New translations arweave.mdx (German) --- .../pages/de/subgraphs/cookbook/arweave.mdx | 36 +++++++++---------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/website/src/pages/de/subgraphs/cookbook/arweave.mdx b/website/src/pages/de/subgraphs/cookbook/arweave.mdx index 02dd4f8398fc..49469572ced0 100644 --- a/website/src/pages/de/subgraphs/cookbook/arweave.mdx +++ b/website/src/pages/de/subgraphs/cookbook/arweave.mdx @@ -2,7 +2,7 @@ title: Building Subgraphs on Arweave --- -> Arweave support in Graph Node and on Subgraph Studio is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave subgraphs! +> Arweave support in Graph Node and on Subgraph Studio is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave Subgraphs! In this guide, you will learn how to build and deploy Subgraphs to index the Arweave blockchain. @@ -25,12 +25,12 @@ The Graph allows you to build custom open APIs called "Subgraphs". Subgraphs are To be able to build and deploy Arweave Subgraphs, you need two packages: -1. `@graphprotocol/graph-cli` above version 0.30.2 - This is a command-line tool for building and deploying subgraphs. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-cli) to download using `npm`. -2. `@graphprotocol/graph-ts` above version 0.27.0 - This is library of subgraph-specific types. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-ts) to download using `npm`. +1. `@graphprotocol/graph-cli` above version 0.30.2 - This is a command-line tool for building and deploying Subgraphs. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-cli) to download using `npm`. +2. `@graphprotocol/graph-ts` above version 0.27.0 - This is library of Subgraph-specific types. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-ts) to download using `npm`. ## Subgraph's components -There are three components of a subgraph: +There are three components of a Subgraph: ### 1. Manifest - `subgraph.yaml` @@ -40,22 +40,22 @@ Defines the data sources of interest, and how they should be processed. Arweave Here you define which data you want to be able to query after indexing your Subgraph using GraphQL. This is actually similar to a model for an API, where the model defines the structure of a request body. -The requirements for Arweave subgraphs are covered by the [existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). +The requirements for Arweave Subgraphs are covered by the [existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). ### 3. AssemblyScript Mappings - `mapping.ts` This is the logic that determines how data should be retrieved and stored when someone interacts with the data sources you are listening to. The data gets translated and is stored based off the schema you have listed. -During subgraph development there are two key commands: +During Subgraph development there are two key commands: ``` $ graph codegen # generates types from the schema file identified in the manifest -$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the subgraph files in a /build folder +$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the Subgraph files in a /build folder ``` ## Subgraf-Manifest-Definition -The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for an Arweave subgraph: +The Subgraph manifest `subgraph.yaml` identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest for an Arweave Subgraph: ```yaml specVersion: 0.0.5 @@ -82,7 +82,7 @@ dataSources: - handler: handleTx # the function name in the mapping file ``` -- Arweave subgraphs introduce a new kind of data source (`arweave`) +- Arweave Subgraphs introduce a new kind of data source (`arweave`) - The network should correspond to a network on the hosting Graph Node. In Subgraph Studio, Arweave's mainnet is `arweave-mainnet` - Arweave data sources introduce an optional source.owner field, which is the public key of an Arweave wallet @@ -99,7 +99,7 @@ Arweave data sources support two types of handlers: ## Schema-Definition -Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on the subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). +Schema definition describes the structure of the resulting Subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on the Subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). ## AssemblyScript-Mappings @@ -152,7 +152,7 @@ Writing the mappings of an Arweave Subgraph is very similar to writing the mappi ## Deploying an Arweave Subgraph in Subgraph Studio -Once your subgraph has been created on your Subgraph Studio dashboard, you can deploy by using the `graph deploy` CLI command. +Once your Subgraph has been created on your Subgraph Studio dashboard, you can deploy by using the `graph deploy` CLI command. ```bash graph deploy --access-token @@ -160,25 +160,25 @@ graph deploy --access-token ## Querying an Arweave Subgraph -The GraphQL endpoint for Arweave subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. +The GraphQL endpoint for Arweave Subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. ## Beispiele von Subgrafen -Here is an example subgraph for reference: +Here is an example Subgraph for reference: -- [Example subgraph for Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) +- [Example Subgraph for Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) ## FAQ -### Can a subgraph index Arweave and other chains? +### Can a Subgraph index Arweave and other chains? -No, a subgraph can only support data sources from one chain/network. +No, a Subgraph can only support data sources from one chain/network. ### Can I index the stored files on Arweave? Currently, The Graph is only indexing Arweave as a blockchain (its blocks and transactions). -### Can I identify Bundlr bundles in my subgraph? +### Can I identify Bundlr bundles in my Subgraph? This is not currently supported. @@ -188,7 +188,7 @@ The source.owner can be the user's public key or account address. ### What is the current encryption format? -Data is generally passed into the mappings as Bytes, which if stored directly is returned in the subgraph in a `hex` format (ex. block and transaction hashes). You may want to convert to a `base64` or `base64 URL`-safe format in your mappings, in order to match what is displayed in block explorers like [Arweave Explorer](https://viewblock.io/arweave/). +Data is generally passed into the mappings as Bytes, which if stored directly is returned in the Subgraph in a `hex` format (ex. block and transaction hashes). You may want to convert to a `base64` or `base64 URL`-safe format in your mappings, in order to match what is displayed in block explorers like [Arweave Explorer](https://viewblock.io/arweave/). The following `bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string` helper function can be used, and will be added to `graph-ts`: From ac4a78cf62e7940b60565911e1e74cdd68d0d115 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:18:27 -0500 Subject: [PATCH 0566/1789] New translations arweave.mdx (Italian) --- .../pages/it/subgraphs/cookbook/arweave.mdx | 36 +++++++++---------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/website/src/pages/it/subgraphs/cookbook/arweave.mdx b/website/src/pages/it/subgraphs/cookbook/arweave.mdx index 2372025621d1..18b485a9c382 100644 --- a/website/src/pages/it/subgraphs/cookbook/arweave.mdx +++ b/website/src/pages/it/subgraphs/cookbook/arweave.mdx @@ -2,7 +2,7 @@ title: Building Subgraphs on Arweave --- -> Arweave support in Graph Node and on Subgraph Studio is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave subgraphs! +> Arweave support in Graph Node and on Subgraph Studio is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave Subgraphs! In this guide, you will learn how to build and deploy Subgraphs to index the Arweave blockchain. @@ -25,12 +25,12 @@ The Graph allows you to build custom open APIs called "Subgraphs". Subgraphs are To be able to build and deploy Arweave Subgraphs, you need two packages: -1. `@graphprotocol/graph-cli` above version 0.30.2 - This is a command-line tool for building and deploying subgraphs. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-cli) to download using `npm`. -2. `@graphprotocol/graph-ts` above version 0.27.0 - This is library of subgraph-specific types. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-ts) to download using `npm`. +1. `@graphprotocol/graph-cli` above version 0.30.2 - This is a command-line tool for building and deploying Subgraphs. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-cli) to download using `npm`. +2. `@graphprotocol/graph-ts` above version 0.27.0 - This is library of Subgraph-specific types. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-ts) to download using `npm`. ## Subgraph's components -There are three components of a subgraph: +There are three components of a Subgraph: ### 1. Manifest - `subgraph.yaml` @@ -40,22 +40,22 @@ Defines the data sources of interest, and how they should be processed. Arweave Here you define which data you want to be able to query after indexing your Subgraph using GraphQL. This is actually similar to a model for an API, where the model defines the structure of a request body. -The requirements for Arweave subgraphs are covered by the [existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). +The requirements for Arweave Subgraphs are covered by the [existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). ### 3. AssemblyScript Mappings - `mapping.ts` This is the logic that determines how data should be retrieved and stored when someone interacts with the data sources you are listening to. The data gets translated and is stored based off the schema you have listed. -During subgraph development there are two key commands: +During Subgraph development there are two key commands: ``` $ graph codegen # generates types from the schema file identified in the manifest -$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the subgraph files in a /build folder +$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the Subgraph files in a /build folder ``` ## Subgraph Manifest Definition -The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for an Arweave subgraph: +The Subgraph manifest `subgraph.yaml` identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest for an Arweave Subgraph: ```yaml specVersion: 0.0.5 @@ -82,7 +82,7 @@ dataSources: - handler: handleTx # the function name in the mapping file ``` -- Arweave subgraphs introduce a new kind of data source (`arweave`) +- Arweave Subgraphs introduce a new kind of data source (`arweave`) - The network should correspond to a network on the hosting Graph Node. In Subgraph Studio, Arweave's mainnet is `arweave-mainnet` - Arweave data sources introduce an optional source.owner field, which is the public key of an Arweave wallet @@ -99,7 +99,7 @@ Arweave data sources support two types of handlers: ## Schema Definition -Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on the subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). +Schema definition describes the structure of the resulting Subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on the Subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). ## AssemblyScript Mappings @@ -152,7 +152,7 @@ Writing the mappings of an Arweave Subgraph is very similar to writing the mappi ## Deploying an Arweave Subgraph in Subgraph Studio -Once your subgraph has been created on your Subgraph Studio dashboard, you can deploy by using the `graph deploy` CLI command. +Once your Subgraph has been created on your Subgraph Studio dashboard, you can deploy by using the `graph deploy` CLI command. ```bash graph deploy --access-token @@ -160,25 +160,25 @@ graph deploy --access-token ## Querying an Arweave Subgraph -The GraphQL endpoint for Arweave subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. +The GraphQL endpoint for Arweave Subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. ## Example Subgraphs -Here is an example subgraph for reference: +Here is an example Subgraph for reference: -- [Example subgraph for Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) +- [Example Subgraph for Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) ## FAQ -### Can a subgraph index Arweave and other chains? +### Can a Subgraph index Arweave and other chains? -No, a subgraph can only support data sources from one chain/network. +No, a Subgraph can only support data sources from one chain/network. ### Can I index the stored files on Arweave? Currently, The Graph is only indexing Arweave as a blockchain (its blocks and transactions). -### Can I identify Bundlr bundles in my subgraph? +### Can I identify Bundlr bundles in my Subgraph? This is not currently supported. @@ -188,7 +188,7 @@ The source.owner can be the user's public key or account address. ### What is the current encryption format? -Data is generally passed into the mappings as Bytes, which if stored directly is returned in the subgraph in a `hex` format (ex. block and transaction hashes). You may want to convert to a `base64` or `base64 URL`-safe format in your mappings, in order to match what is displayed in block explorers like [Arweave Explorer](https://viewblock.io/arweave/). +Data is generally passed into the mappings as Bytes, which if stored directly is returned in the Subgraph in a `hex` format (ex. block and transaction hashes). You may want to convert to a `base64` or `base64 URL`-safe format in your mappings, in order to match what is displayed in block explorers like [Arweave Explorer](https://viewblock.io/arweave/). The following `bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string` helper function can be used, and will be added to `graph-ts`: From 818c74eab64a259906e23c688d10e33e70aaadce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:18:28 -0500 Subject: [PATCH 0567/1789] New translations arweave.mdx (Japanese) --- .../pages/ja/subgraphs/cookbook/arweave.mdx | 38 +++++++++---------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/website/src/pages/ja/subgraphs/cookbook/arweave.mdx b/website/src/pages/ja/subgraphs/cookbook/arweave.mdx index b834f96b5cb9..6146ab42c7c6 100644 --- a/website/src/pages/ja/subgraphs/cookbook/arweave.mdx +++ b/website/src/pages/ja/subgraphs/cookbook/arweave.mdx @@ -2,7 +2,7 @@ title: Arweaveでのサブグラフ構築 --- -> Arweave support in Graph Node and on Subgraph Studio is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave subgraphs! +> Arweave support in Graph Node and on Subgraph Studio is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave Subgraphs! このガイドでは、Arweaveブロックチェーンのインデックスを作成するためのサブグラフの構築とデプロイ方法について学びます。 @@ -25,12 +25,12 @@ The Graph allows you to build custom open APIs called "Subgraphs". Subgraphs are Arweaveのサブグラフを構築し展開できるようにするためには、2つのパッケージが必要です。 -1. `@graphprotocol/graph-cli` above version 0.30.2 - This is a command-line tool for building and deploying subgraphs. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-cli) to download using `npm`. -2. `@graphprotocol/graph-ts` above version 0.27.0 - This is library of subgraph-specific types. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-ts) to download using `npm`. +1. `@graphprotocol/graph-cli` above version 0.30.2 - This is a command-line tool for building and deploying Subgraphs. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-cli) to download using `npm`. +2. `@graphprotocol/graph-ts` above version 0.27.0 - This is library of Subgraph-specific types. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-ts) to download using `npm`. ## サブグラフのコンポーネント -サブグラフには3つの構成要素があります: +There are three components of a Subgraph: ### 1. Manifest - `subgraph.yaml` @@ -40,22 +40,22 @@ Arweaveのサブグラフを構築し展開できるようにするためには ここでは、GraphQL を使用してサブグラフにインデックスを付けた後にクエリできるようにするデータを定義します。これは実際には API のモデルに似ており、モデルはリクエスト本文の構造を定義します。 -The requirements for Arweave subgraphs are covered by the [existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). +The requirements for Arweave Subgraphs are covered by the [existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). ### 3. AssemblyScript Mappings - `mapping.ts` これは、リスニングしているデータソースと誰かがやりとりするときに、データをどのように取得し、保存するかを決定するロジックです。データは変換され、あなたがリストアップしたスキーマに基づいて保存されます。 -サブグラフの開発には 2 つの重要なコマンドがあります: +During Subgraph development there are two key commands: ``` -$ graph codegen # マニフェストで識別されたようにファイルから型を生成します -$ グラフ ビルド # AssemblyScript ファイルから Web アセンブリを生成し、/build フォルダにすべてのサブグラフ ファイルを準備します +$ graph codegen # generates types from the schema file identified in the manifest +$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the Subgraph files in a /build folder ``` ## サブグラフマニフェストの定義 -The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for an Arweave subgraph: +The Subgraph manifest `subgraph.yaml` identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest for an Arweave Subgraph: ```yaml specVersion: 0.0.5 @@ -82,7 +82,7 @@ dataSources: - handler: handleTx # the function name in the mapping file ``` -- Arweave subgraphs introduce a new kind of data source (`arweave`) +- Arweave Subgraphs introduce a new kind of data source (`arweave`) - The network should correspond to a network on the hosting Graph Node. In Subgraph Studio, Arweave's mainnet is `arweave-mainnet` - Arweave データ ソースには、オプションの source.owner フィールドが導入されています。これは、Arweave ウォレットの公開鍵です。 @@ -99,7 +99,7 @@ Arweaveデータソースは 2 種類のハンドラーをサポートしてい ## スキーマ定義 -Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on the subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). +Schema definition describes the structure of the resulting Subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on the Subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). ## AssemblyScript マッピング @@ -152,7 +152,7 @@ Writing the mappings of an Arweave Subgraph is very similar to writing the mappi ## Deploying an Arweave Subgraph in Subgraph Studio -Once your subgraph has been created on your Subgraph Studio dashboard, you can deploy by using the `graph deploy` CLI command. +Once your Subgraph has been created on your Subgraph Studio dashboard, you can deploy by using the `graph deploy` CLI command. ```bash graph deploy --access-token @@ -160,25 +160,25 @@ graph deploy --access-token ## Arweaveサブグラフのクエリ -The GraphQL endpoint for Arweave subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. +The GraphQL endpoint for Arweave Subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. ## サブグラフの例 -参考までにサブグラフの例を紹介します: +Here is an example Subgraph for reference: -- [Example subgraph for Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) +- [Example Subgraph for Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) ## FAQ -### サブグラフは Arweave やその他のチェーンにインデックスを付けることができますか? +### Can a Subgraph index Arweave and other chains? -いいえ、サブグラフは 1 つのチェーン/ネットワークのデータソースのみをサポートします。 +No, a Subgraph can only support data sources from one chain/network. ### 保存されたファイルをArweaveでインデックス化することはできますか? 現在、The Graph は Arweave をブロックチェーン (ブロックとトランザクション) としてのみインデックス化しています。 -### 自分のサブグラフにあるBundlrバンドルは特定できるのか? +### Can I identify Bundlr bundles in my Subgraph? 現在はサポートされていません。 @@ -188,7 +188,7 @@ Source.ownerには、ユーザの公開鍵またはアカウントアドレス ### 現在の暗号化フォーマットは? -Data is generally passed into the mappings as Bytes, which if stored directly is returned in the subgraph in a `hex` format (ex. block and transaction hashes). You may want to convert to a `base64` or `base64 URL`-safe format in your mappings, in order to match what is displayed in block explorers like [Arweave Explorer](https://viewblock.io/arweave/). +Data is generally passed into the mappings as Bytes, which if stored directly is returned in the Subgraph in a `hex` format (ex. block and transaction hashes). You may want to convert to a `base64` or `base64 URL`-safe format in your mappings, in order to match what is displayed in block explorers like [Arweave Explorer](https://viewblock.io/arweave/). The following `bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string` helper function can be used, and will be added to `graph-ts`: From 2ad19670489358cb75c80cbcf45004eb83f21d4c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:18:29 -0500 Subject: [PATCH 0568/1789] New translations arweave.mdx (Korean) --- .../pages/ko/subgraphs/cookbook/arweave.mdx | 36 +++++++++---------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/website/src/pages/ko/subgraphs/cookbook/arweave.mdx b/website/src/pages/ko/subgraphs/cookbook/arweave.mdx index 2372025621d1..18b485a9c382 100644 --- a/website/src/pages/ko/subgraphs/cookbook/arweave.mdx +++ b/website/src/pages/ko/subgraphs/cookbook/arweave.mdx @@ -2,7 +2,7 @@ title: Building Subgraphs on Arweave --- -> Arweave support in Graph Node and on Subgraph Studio is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave subgraphs! +> Arweave support in Graph Node and on Subgraph Studio is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave Subgraphs! In this guide, you will learn how to build and deploy Subgraphs to index the Arweave blockchain. @@ -25,12 +25,12 @@ The Graph allows you to build custom open APIs called "Subgraphs". Subgraphs are To be able to build and deploy Arweave Subgraphs, you need two packages: -1. `@graphprotocol/graph-cli` above version 0.30.2 - This is a command-line tool for building and deploying subgraphs. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-cli) to download using `npm`. -2. `@graphprotocol/graph-ts` above version 0.27.0 - This is library of subgraph-specific types. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-ts) to download using `npm`. +1. `@graphprotocol/graph-cli` above version 0.30.2 - This is a command-line tool for building and deploying Subgraphs. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-cli) to download using `npm`. +2. `@graphprotocol/graph-ts` above version 0.27.0 - This is library of Subgraph-specific types. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-ts) to download using `npm`. ## Subgraph's components -There are three components of a subgraph: +There are three components of a Subgraph: ### 1. Manifest - `subgraph.yaml` @@ -40,22 +40,22 @@ Defines the data sources of interest, and how they should be processed. Arweave Here you define which data you want to be able to query after indexing your Subgraph using GraphQL. This is actually similar to a model for an API, where the model defines the structure of a request body. -The requirements for Arweave subgraphs are covered by the [existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). +The requirements for Arweave Subgraphs are covered by the [existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). ### 3. AssemblyScript Mappings - `mapping.ts` This is the logic that determines how data should be retrieved and stored when someone interacts with the data sources you are listening to. The data gets translated and is stored based off the schema you have listed. -During subgraph development there are two key commands: +During Subgraph development there are two key commands: ``` $ graph codegen # generates types from the schema file identified in the manifest -$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the subgraph files in a /build folder +$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the Subgraph files in a /build folder ``` ## Subgraph Manifest Definition -The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for an Arweave subgraph: +The Subgraph manifest `subgraph.yaml` identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest for an Arweave Subgraph: ```yaml specVersion: 0.0.5 @@ -82,7 +82,7 @@ dataSources: - handler: handleTx # the function name in the mapping file ``` -- Arweave subgraphs introduce a new kind of data source (`arweave`) +- Arweave Subgraphs introduce a new kind of data source (`arweave`) - The network should correspond to a network on the hosting Graph Node. In Subgraph Studio, Arweave's mainnet is `arweave-mainnet` - Arweave data sources introduce an optional source.owner field, which is the public key of an Arweave wallet @@ -99,7 +99,7 @@ Arweave data sources support two types of handlers: ## Schema Definition -Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on the subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). +Schema definition describes the structure of the resulting Subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on the Subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). ## AssemblyScript Mappings @@ -152,7 +152,7 @@ Writing the mappings of an Arweave Subgraph is very similar to writing the mappi ## Deploying an Arweave Subgraph in Subgraph Studio -Once your subgraph has been created on your Subgraph Studio dashboard, you can deploy by using the `graph deploy` CLI command. +Once your Subgraph has been created on your Subgraph Studio dashboard, you can deploy by using the `graph deploy` CLI command. ```bash graph deploy --access-token @@ -160,25 +160,25 @@ graph deploy --access-token ## Querying an Arweave Subgraph -The GraphQL endpoint for Arweave subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. +The GraphQL endpoint for Arweave Subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. ## Example Subgraphs -Here is an example subgraph for reference: +Here is an example Subgraph for reference: -- [Example subgraph for Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) +- [Example Subgraph for Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) ## FAQ -### Can a subgraph index Arweave and other chains? +### Can a Subgraph index Arweave and other chains? -No, a subgraph can only support data sources from one chain/network. +No, a Subgraph can only support data sources from one chain/network. ### Can I index the stored files on Arweave? Currently, The Graph is only indexing Arweave as a blockchain (its blocks and transactions). -### Can I identify Bundlr bundles in my subgraph? +### Can I identify Bundlr bundles in my Subgraph? This is not currently supported. @@ -188,7 +188,7 @@ The source.owner can be the user's public key or account address. ### What is the current encryption format? -Data is generally passed into the mappings as Bytes, which if stored directly is returned in the subgraph in a `hex` format (ex. block and transaction hashes). You may want to convert to a `base64` or `base64 URL`-safe format in your mappings, in order to match what is displayed in block explorers like [Arweave Explorer](https://viewblock.io/arweave/). +Data is generally passed into the mappings as Bytes, which if stored directly is returned in the Subgraph in a `hex` format (ex. block and transaction hashes). You may want to convert to a `base64` or `base64 URL`-safe format in your mappings, in order to match what is displayed in block explorers like [Arweave Explorer](https://viewblock.io/arweave/). The following `bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string` helper function can be used, and will be added to `graph-ts`: From a81be07f84c1c480d40aea91426637b564f5fca7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:18:29 -0500 Subject: [PATCH 0569/1789] New translations arweave.mdx (Dutch) --- .../pages/nl/subgraphs/cookbook/arweave.mdx | 38 +++++++++---------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/website/src/pages/nl/subgraphs/cookbook/arweave.mdx b/website/src/pages/nl/subgraphs/cookbook/arweave.mdx index 1ff7fdd460fc..aeab73e164bd 100644 --- a/website/src/pages/nl/subgraphs/cookbook/arweave.mdx +++ b/website/src/pages/nl/subgraphs/cookbook/arweave.mdx @@ -2,7 +2,7 @@ title: Bouwen van Subgraphs op Arweave --- -> Arweave support in Graph Node and on Subgraph Studio is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave subgraphs! +> Arweave support in Graph Node and on Subgraph Studio is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave Subgraphs! In deze gids, zul je leren hoe je Subgraphs bouwt en implementeer om de Arweave blockchain te indexeren. @@ -25,12 +25,12 @@ The Graph allows you to build custom open APIs called "Subgraphs". Subgraphs are Voor het kunnen bouwen en implementeren van Arweave Subgraphs, heb je twee paketten nodig: -1. `@graphprotocol/graph-cli` above version 0.30.2 - This is a command-line tool for building and deploying subgraphs. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-cli) to download using `npm`. -2. `@graphprotocol/graph-ts` above version 0.27.0 - This is library of subgraph-specific types. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-ts) to download using `npm`. +1. `@graphprotocol/graph-cli` above version 0.30.2 - This is a command-line tool for building and deploying Subgraphs. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-cli) to download using `npm`. +2. `@graphprotocol/graph-ts` above version 0.27.0 - This is library of Subgraph-specific types. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-ts) to download using `npm`. ## Subgraph's componenten -Er zijn drie componenten van een subgraph: +There are three components of a Subgraph: ### 1. Manifest - `subgraph.yaml` @@ -40,22 +40,22 @@ Definieert gegevensbronnen die van belang zijn en hoe deze verwerkt moeten worde Hier definieer je welke gegevens je wilt kunnen opvragen na het indexeren van je subgraph door het gebruik van GraphQL. Dit lijkt eigenlijk op een model voor een API, waarbij het model de structuur van een verzoek definieert. -The requirements for Arweave subgraphs are covered by the [existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). +The requirements for Arweave Subgraphs are covered by the [existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). ### 3. AssemblyScript Mappings - `mapping.ts` Dit is de logica die definieert hoe data zou moeten worden opgevraagd en opgeslagen wanneer iemand met de gegevens communiceert waarnaar jij aan het luisteren bent. De gegevens worden vertaald en is opgeslagen gebaseerd op het schema die je genoteerd hebt. -Tijdens subgraph ontwikkeling zijn er twee belangrijke commando's: +During Subgraph development there are two key commands: ``` -$ graph codegen # genereert types van het schema bestand die geïdentificeerd is in het manifest -$ graph build # genereert Web Assembly vanuit de AssemblyScript-bestanden, en bereidt alle Subgraph-bestanden voor in een /build map +$ graph codegen # generates types from the schema file identified in the manifest +$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the Subgraph files in a /build folder ``` ## Subgraph Manifest Definition -The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for an Arweave subgraph: +The Subgraph manifest `subgraph.yaml` identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest for an Arweave Subgraph: ```yaml specVersie: 0.0.5 @@ -82,7 +82,7 @@ dataSources: - afhandelaar: handleTx # the function name in the mapping file ``` -- Arweave subgraphs introduce a new kind of data source (`arweave`) +- Arweave Subgraphs introduce a new kind of data source (`arweave`) - The network should correspond to a network on the hosting Graph Node. In Subgraph Studio, Arweave's mainnet is `arweave-mainnet` - Arweave data bronnen introduceert een optionele bron.eigenaar veld, dat de openbare sleutel is van een Arweave wallet @@ -99,7 +99,7 @@ Arweave data bronnen ondersteunt twee typen verwerkers: ## Schema Definition -Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on the subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). +Schema definition describes the structure of the resulting Subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on the Subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). ## AssemblyScript Mappings @@ -152,7 +152,7 @@ Writing the mappings of an Arweave Subgraph is very similar to writing the mappi ## Deploying an Arweave Subgraph in Subgraph Studio -Once your subgraph has been created on your Subgraph Studio dashboard, you can deploy by using the `graph deploy` CLI command. +Once your Subgraph has been created on your Subgraph Studio dashboard, you can deploy by using the `graph deploy` CLI command. ```bash graph deploy --access-token @@ -160,25 +160,25 @@ graph deploy --access-token ## Querying an Arweave Subgraph -The GraphQL endpoint for Arweave subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. +The GraphQL endpoint for Arweave Subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. ## Example Subgraphs -Here is an example subgraph for reference: +Here is an example Subgraph for reference: -- [Example subgraph for Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) +- [Example Subgraph for Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) ## FAQ -### Can a subgraph index Arweave and other chains? +### Can a Subgraph index Arweave and other chains? -No, a subgraph can only support data sources from one chain/network. +No, a Subgraph can only support data sources from one chain/network. ### Can I index the stored files on Arweave? Currently, The Graph is only indexing Arweave as a blockchain (its blocks and transactions). -### Can I identify Bundlr bundles in my subgraph? +### Can I identify Bundlr bundles in my Subgraph? This is not currently supported. @@ -188,7 +188,7 @@ The source.owner can be the user's public key or account address. ### What is the current encryption format? -Data is generally passed into the mappings as Bytes, which if stored directly is returned in the subgraph in a `hex` format (ex. block and transaction hashes). You may want to convert to a `base64` or `base64 URL`-safe format in your mappings, in order to match what is displayed in block explorers like [Arweave Explorer](https://viewblock.io/arweave/). +Data is generally passed into the mappings as Bytes, which if stored directly is returned in the Subgraph in a `hex` format (ex. block and transaction hashes). You may want to convert to a `base64` or `base64 URL`-safe format in your mappings, in order to match what is displayed in block explorers like [Arweave Explorer](https://viewblock.io/arweave/). The following `bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string` helper function can be used, and will be added to `graph-ts`: From 15640acbc98c7202d27c08f523864d976140d743 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:18:30 -0500 Subject: [PATCH 0570/1789] New translations arweave.mdx (Polish) --- .../pages/pl/subgraphs/cookbook/arweave.mdx | 36 +++++++++---------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/website/src/pages/pl/subgraphs/cookbook/arweave.mdx b/website/src/pages/pl/subgraphs/cookbook/arweave.mdx index 2372025621d1..18b485a9c382 100644 --- a/website/src/pages/pl/subgraphs/cookbook/arweave.mdx +++ b/website/src/pages/pl/subgraphs/cookbook/arweave.mdx @@ -2,7 +2,7 @@ title: Building Subgraphs on Arweave --- -> Arweave support in Graph Node and on Subgraph Studio is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave subgraphs! +> Arweave support in Graph Node and on Subgraph Studio is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave Subgraphs! In this guide, you will learn how to build and deploy Subgraphs to index the Arweave blockchain. @@ -25,12 +25,12 @@ The Graph allows you to build custom open APIs called "Subgraphs". Subgraphs are To be able to build and deploy Arweave Subgraphs, you need two packages: -1. `@graphprotocol/graph-cli` above version 0.30.2 - This is a command-line tool for building and deploying subgraphs. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-cli) to download using `npm`. -2. `@graphprotocol/graph-ts` above version 0.27.0 - This is library of subgraph-specific types. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-ts) to download using `npm`. +1. `@graphprotocol/graph-cli` above version 0.30.2 - This is a command-line tool for building and deploying Subgraphs. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-cli) to download using `npm`. +2. `@graphprotocol/graph-ts` above version 0.27.0 - This is library of Subgraph-specific types. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-ts) to download using `npm`. ## Subgraph's components -There are three components of a subgraph: +There are three components of a Subgraph: ### 1. Manifest - `subgraph.yaml` @@ -40,22 +40,22 @@ Defines the data sources of interest, and how they should be processed. Arweave Here you define which data you want to be able to query after indexing your Subgraph using GraphQL. This is actually similar to a model for an API, where the model defines the structure of a request body. -The requirements for Arweave subgraphs are covered by the [existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). +The requirements for Arweave Subgraphs are covered by the [existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). ### 3. AssemblyScript Mappings - `mapping.ts` This is the logic that determines how data should be retrieved and stored when someone interacts with the data sources you are listening to. The data gets translated and is stored based off the schema you have listed. -During subgraph development there are two key commands: +During Subgraph development there are two key commands: ``` $ graph codegen # generates types from the schema file identified in the manifest -$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the subgraph files in a /build folder +$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the Subgraph files in a /build folder ``` ## Subgraph Manifest Definition -The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for an Arweave subgraph: +The Subgraph manifest `subgraph.yaml` identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest for an Arweave Subgraph: ```yaml specVersion: 0.0.5 @@ -82,7 +82,7 @@ dataSources: - handler: handleTx # the function name in the mapping file ``` -- Arweave subgraphs introduce a new kind of data source (`arweave`) +- Arweave Subgraphs introduce a new kind of data source (`arweave`) - The network should correspond to a network on the hosting Graph Node. In Subgraph Studio, Arweave's mainnet is `arweave-mainnet` - Arweave data sources introduce an optional source.owner field, which is the public key of an Arweave wallet @@ -99,7 +99,7 @@ Arweave data sources support two types of handlers: ## Schema Definition -Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on the subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). +Schema definition describes the structure of the resulting Subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on the Subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). ## AssemblyScript Mappings @@ -152,7 +152,7 @@ Writing the mappings of an Arweave Subgraph is very similar to writing the mappi ## Deploying an Arweave Subgraph in Subgraph Studio -Once your subgraph has been created on your Subgraph Studio dashboard, you can deploy by using the `graph deploy` CLI command. +Once your Subgraph has been created on your Subgraph Studio dashboard, you can deploy by using the `graph deploy` CLI command. ```bash graph deploy --access-token @@ -160,25 +160,25 @@ graph deploy --access-token ## Querying an Arweave Subgraph -The GraphQL endpoint for Arweave subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. +The GraphQL endpoint for Arweave Subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. ## Example Subgraphs -Here is an example subgraph for reference: +Here is an example Subgraph for reference: -- [Example subgraph for Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) +- [Example Subgraph for Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) ## FAQ -### Can a subgraph index Arweave and other chains? +### Can a Subgraph index Arweave and other chains? -No, a subgraph can only support data sources from one chain/network. +No, a Subgraph can only support data sources from one chain/network. ### Can I index the stored files on Arweave? Currently, The Graph is only indexing Arweave as a blockchain (its blocks and transactions). -### Can I identify Bundlr bundles in my subgraph? +### Can I identify Bundlr bundles in my Subgraph? This is not currently supported. @@ -188,7 +188,7 @@ The source.owner can be the user's public key or account address. ### What is the current encryption format? -Data is generally passed into the mappings as Bytes, which if stored directly is returned in the subgraph in a `hex` format (ex. block and transaction hashes). You may want to convert to a `base64` or `base64 URL`-safe format in your mappings, in order to match what is displayed in block explorers like [Arweave Explorer](https://viewblock.io/arweave/). +Data is generally passed into the mappings as Bytes, which if stored directly is returned in the Subgraph in a `hex` format (ex. block and transaction hashes). You may want to convert to a `base64` or `base64 URL`-safe format in your mappings, in order to match what is displayed in block explorers like [Arweave Explorer](https://viewblock.io/arweave/). The following `bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string` helper function can be used, and will be added to `graph-ts`: From cc1463bb6393912af19bb1f26b3a6afa0cd99f7b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:18:32 -0500 Subject: [PATCH 0571/1789] New translations arweave.mdx (Portuguese) --- .../pages/pt/subgraphs/cookbook/arweave.mdx | 38 +++++++++---------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/website/src/pages/pt/subgraphs/cookbook/arweave.mdx b/website/src/pages/pt/subgraphs/cookbook/arweave.mdx index a84800d73d48..4904f046e479 100644 --- a/website/src/pages/pt/subgraphs/cookbook/arweave.mdx +++ b/website/src/pages/pt/subgraphs/cookbook/arweave.mdx @@ -2,7 +2,7 @@ title: Construindo Subgraphs no Arweave --- -> O apoio ao Arweave no Graph Node, e no Subgraph Studio, está em beta: por favor nos contacte no [Discord](https://discord.gg/graphprotocol) se tiver dúvidas sobre como construir subgraphs no Arweave! +> Arweave support in Graph Node and on Subgraph Studio is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave Subgraphs! Neste guia, você aprenderá como construir e lançar Subgraphs para indexar a blockchain Arweave. @@ -25,12 +25,12 @@ O [Graph Node](https://github.com/graphprotocol/graph-node) é atualmente capaz Para construir e lançar Subgraphs no Arweave, são necessários dois pacotes: -1. `@graphprotocol/graph-cli` acima da versão 0.30.2 — Esta é uma ferramenta de linha de comandos para a construção e implantação de subgraphs. [Clique aqui](https://www.npmjs.com/package/@graphprotocol/graph-cli) para baixá-la usando o `npm`. -2. `@graphprotocol/graph-ts` acima da versão 0.27.0 — Esta é uma ferramenta de linha de comandos para a construção e implantação de subgraphs. [Clique aqui](https://www.npmjs.com/package/@graphprotocol/graph-ts) para baixá-la usando o `npm`. +1. `@graphprotocol/graph-cli` above version 0.30.2 - This is a command-line tool for building and deploying Subgraphs. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-cli) to download using `npm`. +2. `@graphprotocol/graph-ts` above version 0.27.0 - This is library of Subgraph-specific types. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-ts) to download using `npm`. ## Os componentes de um subgraph -Um subgraph tem três componentes: +There are three components of a Subgraph: ### 1. Manifest - `subgraph.yaml` @@ -40,22 +40,22 @@ Define as fontes de dados de interesse, e como elas devem ser processadas. O Arw Aqui é possível definir quais dados queres consultar após indexar o seu subgraph utilizando o GraphQL. Isto é como um modelo para uma API, onde o modelo define a estrutura de um órgão de requisito. -Os requisitos para subgraphs do Arweave estão cobertos pela [documentação](/developing/creating-a-subgraph/#the-graphql-schema). +The requirements for Arweave Subgraphs are covered by the [existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). ### 3. Mapeamentos de AssemblyScript - `mapping.ts` Esta é a lógica que determina como os dados devem ser retirados e armazenados quando alguém interage com as fontes de dados que estás a escutar. Os dados são traduzidos e armazenados baseados no schema que listaste. -Durante o desenvolvimento de um subgraph, existem dois comandos importantes: +During Subgraph development there are two key commands: ``` -$ graph codegen # gera tipos do arquivo de schema identificado no manifest -$ graph build # gera Web Assembly dos arquivos AssemblyScript, e prepara todos os arquivos do subgraph em uma pasta /build +$ graph codegen # generates types from the schema file identified in the manifest +$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the Subgraph files in a /build folder ``` ## Definição de Manifest de Subgraph -O manifest do subgraph `subgraph.yaml` identifica as fontes de dados para o subgraph, os gatilhos de interesse, e as funções que devem ser executadas em resposta a tais gatilhos. Veja abaixo um exemplo de um manifest de subgraph, para um subgraph no Arweave: +The Subgraph manifest `subgraph.yaml` identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest for an Arweave Subgraph: ```yaml specVersion: 0.0.5 @@ -82,7 +82,7 @@ dataSources: - handler: handleTx # o nome da função no arquivo de mapeamento ``` -- Subgraphs no Arweave introduzem uma nova categoria de fonte de dados (`arweave`) +- Arweave Subgraphs introduce a new kind of data source (`arweave`) - A rede deve corresponder a uma rede no Graph Node que a hospeda. No Subgraph Studio, a mainnet do Arweave é `arweave-mainnet` - Fontes de dados no Arweave introduzem um campo source.owner opcional, a chave pública de uma carteira no Arweave @@ -99,7 +99,7 @@ Fontes de dados no Arweave apoiam duas categorias de handlers: ## Definição de Schema -A definição de Schema descreve a estrutura do banco de dados resultado do subgraph, e os relacionamentos entre entidades. Isto é agnóstico da fonte de dados original. Para mais detalhes na definição de schema de subgraph, [clique aqui](/developing/creating-a-subgraph/#the-graphql-schema). +Schema definition describes the structure of the resulting Subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on the Subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). ## Mapeamentos em AssemblyScript @@ -152,7 +152,7 @@ Escrever os mapeamentos de um Subgraph no Arweave é parecido com a escrita dos ## Como lançar um Subgraph no Arweave ao Subgraph Studio -Após criar o seu Subgraph no painel de controlo do Subgraph Studio, este pode ser implantado com o comando `graph deploy`. +Once your Subgraph has been created on your Subgraph Studio dashboard, you can deploy by using the `graph deploy` CLI command. ```bash graph deploy --access-token @@ -160,25 +160,25 @@ graph deploy --access-token ## Consultando um Subgraph no Arweave -O ponto final do GraphQL para subgraphs no Arweave é determinado pela definição do schema, com a interface existente da API. Visite a [documentação da API da GraphQL](/subgraphs/querying/graphql-api/) para mais informações. +The GraphQL endpoint for Arweave Subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. ## Exemplos de Subgraphs -Aqui está um exemplo de subgraph para referência: +Here is an example Subgraph for reference: -- [Exemplo de subgraph para o Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) +- [Example Subgraph for Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) ## FAQ -### Um subgraph pode indexar o Arweave e outras chains? +### Can a Subgraph index Arweave and other chains? -Não, um subgraph só pode apoiar fontes de dados de apenas uma chain/rede. +No, a Subgraph can only support data sources from one chain/network. ### Posso indexar os arquivos armazenados no Arweave? Atualmente, The Graph apenas indexa o Arweave como uma blockchain (seus blocos e transações). -### Posso identificar pacotes do Bundlr em meu subgraph? +### Can I identify Bundlr bundles in my Subgraph? Isto não é apoiado no momento. @@ -188,7 +188,7 @@ O source.owner pode ser a chave pública ou o endereço da conta do ### Qual é o formato atual de encriptação? -Os dados são geralmente passados aos mapeamentos como Bytes, que se armazenados diretamente, são retornados ao subgraph em um formato `hex` (por ex. hashes de transações e blocos). Você pode querer convertê-lo a um formato seguro para `base64` ou `base64 URL` em seus mapeamentos, para combinar com o que é exibido em exploradores de blocos, como o [Arweave Explorer](https://viewblock.io/arweave/). +Data is generally passed into the mappings as Bytes, which if stored directly is returned in the Subgraph in a `hex` format (ex. block and transaction hashes). You may want to convert to a `base64` or `base64 URL`-safe format in your mappings, in order to match what is displayed in block explorers like [Arweave Explorer](https://viewblock.io/arweave/). A seguinte função de helper `bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string` pode ser usada, e será adicionada ao `graph-ts`: From b3bc823180b4efc65239364157971cd09053a2e0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:18:32 -0500 Subject: [PATCH 0572/1789] New translations arweave.mdx (Russian) --- .../pages/ru/subgraphs/cookbook/arweave.mdx | 38 +++++++++---------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/website/src/pages/ru/subgraphs/cookbook/arweave.mdx b/website/src/pages/ru/subgraphs/cookbook/arweave.mdx index a7f24e1bf79e..4a06fab5bfbd 100644 --- a/website/src/pages/ru/subgraphs/cookbook/arweave.mdx +++ b/website/src/pages/ru/subgraphs/cookbook/arweave.mdx @@ -2,7 +2,7 @@ title: Создание Субграфов на Arweave --- -> Поддержка Arweave в Graph Node и Subgraph Studio находится на стадии бета-тестирования. Если у Вас есть вопросы о создании субграфов Arweave, свяжитесь с нами в [Discord](https://discord.gg/graphprotocol)! +> Arweave support in Graph Node and on Subgraph Studio is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave Subgraphs! Из этого руководства Вы узнаете, как создавать и развертывать субграфы для индексации блокчейна Arweave. @@ -25,12 +25,12 @@ The Graph позволяет создавать собственные откр Чтобы иметь возможность создавать и развертывать Субграфы на Arweave, Вам понадобятся два пакета: -1. `@graphprotocol/graph-cli` версии выше 0.30.2 — это инструмент командной строки для создания и развертывания субграфов. [Нажмите здесь](https://www.npmjs.com/package/@graphprotocol/graph-cli), чтобы скачать с помощью `npm`. -2. `@graphprotocol/graph-ts` версии выше 0.27.0 — это библиотека типов, специфичных для субграфов. [Нажмите здесь](https://www.npmjs.com/package/@graphprotocol/graph-ts), чтобы скачать с помощью `npm`. +1. `@graphprotocol/graph-cli` above version 0.30.2 - This is a command-line tool for building and deploying Subgraphs. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-cli) to download using `npm`. +2. `@graphprotocol/graph-ts` above version 0.27.0 - This is library of Subgraph-specific types. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-ts) to download using `npm`. ## Составляющие Субграфов -Существует 3 компонента субграфа: +There are three components of a Subgraph: ### 1. Манифест - `subgraph.yaml` @@ -40,22 +40,22 @@ The Graph позволяет создавать собственные откр Здесь Вы определяете, какие данные хотите иметь возможность запрашивать после индексации своего субграфа с помощью GraphQL. На самом деле это похоже на модель для API, где модель определяет структуру тела запроса. -Требования для субграфов Arweave описаны в [имеющейся документации](/developing/creating-a-subgraph/#the-graphql-schema). +The requirements for Arweave Subgraphs are covered by the [existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). ### 3. Мэппинги на AssemblyScript - `mapping.ts` Это логика, которая определяет, как данные должны извлекаться и храниться, когда кто-то взаимодействует с источниками данных, которые Вы отслеживаете. Данные переводятся и сохраняются в соответствии с указанной Вами схемой. -Во время разработки субграфа есть две ключевые команды: +During Subgraph development there are two key commands: ``` -$ graph codegen # генерирует типы из файла схемы, указанного в манифесте -$ graph build # генерирует Web Assembly из файлов AssemblyScript и подготавливает все файлы субграфа в папке /build +$ graph codegen # generates types from the schema file identified in the manifest +$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the Subgraph files in a /build folder ``` ## Определение манифеста субграфа -Манифест субграфа `subgraph.yaml` определяет источники данных для субграфа, триггеры, представляющие интерес, и функции, которые должны выполняться в ответ на эти триггеры. Ниже приведён пример манифеста субграфа для Arweave: +The Subgraph manifest `subgraph.yaml` identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest for an Arweave Subgraph: ```yaml specVersion: 0.0.5 @@ -82,7 +82,7 @@ dataSources: - handler: handleTx # имя функции в файле мэппинга ``` -- Субграфы Arweave вводят новый тип источника данных (`arweave`) +- Arweave Subgraphs introduce a new kind of data source (`arweave`) - Сеть должна соответствовать сети на размещенной Graph Node. В Subgraph Studio мейннет Arweave обозначается как `arweave-mainnet` - Источники данных Arweave содержат необязательное поле source.owner, которое является открытым ключом кошелька Arweave @@ -99,7 +99,7 @@ dataSources: ## Определение схемы -Определение схемы описывает структуру базы данных итогового субграфа и взаимосвязи между объектами. Это не зависит от исходного источника данных. Более подробную информацию об определении схемы субграфа можно найти [здесь](/developing/creating-a-subgraph/#the-graphql-schema). +Schema definition describes the structure of the resulting Subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on the Subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). ## Мэппинги AssemblyScript @@ -152,7 +152,7 @@ class Transaction { ## Развертывание субграфа Arweave в Subgraph Studio -Как только Ваш субграф будет создан на панели управления Subgraph Studio, Вы можете развернуть его с помощью команды CLI `graph deploy`. +Once your Subgraph has been created on your Subgraph Studio dashboard, you can deploy by using the `graph deploy` CLI command. ```bash graph deploy --access-token @@ -160,25 +160,25 @@ graph deploy --access-token ## Запрос субграфа Arweave -Конечная точка GraphQL для субграфов Arweave определяется схемой и существующим интерфейсом API. Для получения дополнительной информации ознакомьтесь с [документацией по API GraphQL](/subgraphs/querying/graphql-api/). +The GraphQL endpoint for Arweave Subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. ## Примеры субграфов -Ниже приведен пример субграфа для справки: +Here is an example Subgraph for reference: -- [Пример субграфа для Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) +- [Example Subgraph for Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) ## FAQ -### Может ли субграф индексировать Arweave и другие чейны? +### Can a Subgraph index Arweave and other chains? -Нет, субграф может поддерживать источники данных только из одного чейна/сети. +No, a Subgraph can only support data sources from one chain/network. ### Могу ли я проиндексировать сохраненные файлы в Arweave? В настоящее время The Graph индексирует Arweave только как блокчейн (его блоки и транзакции). -### Могу ли я идентифицировать связки Bundle в своем субграфе? +### Can I identify Bundlr bundles in my Subgraph? В настоящее время это не поддерживается. @@ -188,7 +188,7 @@ Source.owner может быть открытым ключом пользова ### Каков текущий формат шифрования? -Данные обычно передаются в мэппингах в виде байтов (Bytes), которые, если хранятся напрямую, возвращаются в субграф в формате `hex` (например, хэши блоков и транзакций). Вы можете захотеть преобразовать их в формат `base64` или `base64 URL`-безопасный в Ваших мэппингах, чтобы они соответствовали тому, что отображается в блок-обозревателях, таких как [Arweave Explorer](https://viewblock.io/arweave/). +Data is generally passed into the mappings as Bytes, which if stored directly is returned in the Subgraph in a `hex` format (ex. block and transaction hashes). You may want to convert to a `base64` or `base64 URL`-safe format in your mappings, in order to match what is displayed in block explorers like [Arweave Explorer](https://viewblock.io/arweave/). Следующая вспомогательная функция `bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string` может быть использована и будет добавлена в `graph-ts`: From d718d0afcc27bb3bc26e562628b6a8a6e86fa70a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:18:33 -0500 Subject: [PATCH 0573/1789] New translations arweave.mdx (Swedish) --- .../pages/sv/subgraphs/cookbook/arweave.mdx | 36 +++++++++---------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/website/src/pages/sv/subgraphs/cookbook/arweave.mdx b/website/src/pages/sv/subgraphs/cookbook/arweave.mdx index 8a78a4ffa184..ee9e8aec229b 100644 --- a/website/src/pages/sv/subgraphs/cookbook/arweave.mdx +++ b/website/src/pages/sv/subgraphs/cookbook/arweave.mdx @@ -2,7 +2,7 @@ title: Bygga subgrafer på Arweave --- -> Arweave support in Graph Node and on Subgraph Studio is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave subgraphs! +> Arweave support in Graph Node and on Subgraph Studio is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave Subgraphs! I den här guiden kommer du att lära dig hur du bygger och distribuerar subgrafer för att indexera Weaver-blockkedjan. @@ -25,12 +25,12 @@ The Graph allows you to build custom open APIs called "Subgraphs". Subgraphs are För att kunna bygga och distribuera Arweave Subgraphs behöver du två paket: -1. `@graphprotocol/graph-cli` above version 0.30.2 - This is a command-line tool for building and deploying subgraphs. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-cli) to download using `npm`. -2. `@graphprotocol/graph-ts` above version 0.27.0 - This is library of subgraph-specific types. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-ts) to download using `npm`. +1. `@graphprotocol/graph-cli` above version 0.30.2 - This is a command-line tool for building and deploying Subgraphs. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-cli) to download using `npm`. +2. `@graphprotocol/graph-ts` above version 0.27.0 - This is library of Subgraph-specific types. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-ts) to download using `npm`. ## Subgraphs komponenter -Det finns tre komponenter i en subgraf: +There are three components of a Subgraph: ### 1. Manifest - `subgraph.yaml` @@ -40,22 +40,22 @@ Definierar datakällorna av intresse och hur de ska behandlas. Arweave är en ny Här definierar du vilken data du vill kunna fråga efter att du har indexerat din subgrafer med GraphQL. Detta liknar faktiskt en modell för ett API, där modellen definierar strukturen för en begäran. -The requirements for Arweave subgraphs are covered by the [existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). +The requirements for Arweave Subgraphs are covered by the [existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). ### 3. AssemblyScript Mappings - `mapping.ts` Detta är logiken som avgör hur data ska hämtas och lagras när någon interagerar med datakällorna du lyssnar på. Data översätts och lagras utifrån det schema du har listat. -Under subgrafutveckling finns det två nyckelkommandon: +During Subgraph development there are two key commands: ``` $ graph codegen # generates types from the schema file identified in the manifest -$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the subgraph files in a /build folder +$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the Subgraph files in a /build folder ``` ## Definition av subgraf manifestet -The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for an Arweave subgraph: +The Subgraph manifest `subgraph.yaml` identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest for an Arweave Subgraph: ```yaml specVersion: 0.0.5 @@ -82,7 +82,7 @@ dataSources: - handler: handleTx # the function name in the mapping file ``` -- Arweave subgraphs introduce a new kind of data source (`arweave`) +- Arweave Subgraphs introduce a new kind of data source (`arweave`) - The network should correspond to a network on the hosting Graph Node. In Subgraph Studio, Arweave's mainnet is `arweave-mainnet` - Arweave datakällor introducerar ett valfritt source.owner fält, som är den publika nyckeln till en Arweave plånbok @@ -99,7 +99,7 @@ Arweave datakällor stöder två typer av hanterare: ## Schema Definition -Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on the subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). +Schema definition describes the structure of the resulting Subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on the Subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). ## AssemblyScript mappningar @@ -152,7 +152,7 @@ Writing the mappings of an Arweave Subgraph is very similar to writing the mappi ## Deploying an Arweave Subgraph in Subgraph Studio -Once your subgraph has been created on your Subgraph Studio dashboard, you can deploy by using the `graph deploy` CLI command. +Once your Subgraph has been created on your Subgraph Studio dashboard, you can deploy by using the `graph deploy` CLI command. ```bash graph deploy --access-token @@ -160,25 +160,25 @@ graph deploy --access-token ## Fråga efter en Arweave-subgraf -The GraphQL endpoint for Arweave subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. +The GraphQL endpoint for Arweave Subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. ## Exempel på subgrafer -Här är ett exempel på subgraf som referens: +Here is an example Subgraph for reference: -- [Example subgraph for Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) +- [Example Subgraph for Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) ## FAQ -### Kan en subgraf indexera Arweave och andra kedjor? +### Can a Subgraph index Arweave and other chains? -Nej, en subgraf kan bara stödja datakällor från en kedja/nätverk. +No, a Subgraph can only support data sources from one chain/network. ### Kan jag indexera de lagrade filerna på Arweave? För närvarande indexerar The Graph bara Arweave som en blockkedja (dess block och transaktioner). -### Kan jag identifiera Bundlr buntar i min subgraf? +### Can I identify Bundlr bundles in my Subgraph? Detta stöds inte för närvarande. @@ -188,7 +188,7 @@ Source.owner kan vara användarens publika nyckel eller kontoadress. ### Vad är det aktuella krypteringsformatet? -Data is generally passed into the mappings as Bytes, which if stored directly is returned in the subgraph in a `hex` format (ex. block and transaction hashes). You may want to convert to a `base64` or `base64 URL`-safe format in your mappings, in order to match what is displayed in block explorers like [Arweave Explorer](https://viewblock.io/arweave/). +Data is generally passed into the mappings as Bytes, which if stored directly is returned in the Subgraph in a `hex` format (ex. block and transaction hashes). You may want to convert to a `base64` or `base64 URL`-safe format in your mappings, in order to match what is displayed in block explorers like [Arweave Explorer](https://viewblock.io/arweave/). The following `bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string` helper function can be used, and will be added to `graph-ts`: From a9e3b5006db4d955b0cd9ce7e34c50a617320cb7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:18:34 -0500 Subject: [PATCH 0574/1789] New translations arweave.mdx (Turkish) --- .../pages/tr/subgraphs/cookbook/arweave.mdx | 38 +++++++++---------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/website/src/pages/tr/subgraphs/cookbook/arweave.mdx b/website/src/pages/tr/subgraphs/cookbook/arweave.mdx index 8495fd68c39b..cbded541129c 100644 --- a/website/src/pages/tr/subgraphs/cookbook/arweave.mdx +++ b/website/src/pages/tr/subgraphs/cookbook/arweave.mdx @@ -2,7 +2,7 @@ title: Arweave Üzerinde Subgraphlar Oluşturma --- -> Graph Düğümü ve Subgraph Studio'daki Arweave desteği beta aşamasındadır: Arweave subgraph'ları oluşturma konusunda herhangi bir sorunuz varsa lütfen [Discord](https://discord.gg/graphprotocol) üzerinden bizimle iletişime geçin! +> Arweave support in Graph Node and on Subgraph Studio is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave Subgraphs! Bu rehberde, Arweave blok zincirini indekslemek için nasıl Subgraphs oluşturacağınızı ve dağıtacağınızı öğreneceksiniz. @@ -25,12 +25,12 @@ The Graph, "Subgraph" adı verilen özel açık API'ler oluşturmanıza olanak t Arweave Subgraphları oluşturabilmek ve dağıtabilmek için iki pakete ihtiyacınız vardır: -1. `@graphprotocol/graph-cli` 0.30.2 sürümünün üzerinde - Bu, subgraph'ler oluşturmak ve dağıtmak için kullanılan bir komut satırı aracıdır. [Buraya] (https://www.npmjs.com/package/@graphprotocol/graph-cli) tıklayarak npm kullanarak indirebilirsiniz. -2. `@graphprotocol/graph-ts` 0.27.0 sürümünün üzerinde - Bu, subgraph'e özgü türler içeren bir kütüphanedir. [Buraya] (https://www.npmjs.com/package/@graphprotocol/graph-ts) tıklayarak npm kullanarak indirebilirsiniz. +1. `@graphprotocol/graph-cli` above version 0.30.2 - This is a command-line tool for building and deploying Subgraphs. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-cli) to download using `npm`. +2. `@graphprotocol/graph-ts` above version 0.27.0 - This is library of Subgraph-specific types. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-ts) to download using `npm`. ## Subgraph'ın bileşenleri -Bir subgraph'ın üç bileşeni vardır: +There are three components of a Subgraph: ### 1. Manifesto - `subgraph.yaml` @@ -40,22 +40,22 @@ Bir subgraph'ın üç bileşeni vardır: Burada, GraphQL kullanarak Subgraph'ınızı indeksledikten sonra hangi verileri sorgulayabilmek istediğinizi tanımlarsınız. Bu aslında, modelin bir istek gövdesinin yapısını tanımladığı bir API modeline benzer. -Arweave subgraph'leri için gereksinimler [mevcut dokümanlarda](/developing/creating-a-subgraph/#the-graphql-schema) ele alınmıştır. +The requirements for Arweave Subgraphs are covered by the [existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). ### 3. AssemblyScript Eşlemeleri - `mapping.ts` Bu, birisi sizin etkinliklerini gözlemlediğiniz veri kaynaklarıyla etkileşimde bulunduğunda verinin nasıl alınması ve depolanması gerektiğini belirleyen mantıktır. Veri çevrilir ve belirttiğiniz şemaya göre depolanır. -Subgraph geliştirme sırasında iki anahtar komut vardır: +During Subgraph development there are two key commands: ``` -$ graph codegen # manifest'de tanımlanan şema dosyasından tipleri üretir -$ graph build # AssemblyScript dosyalarından Web Assembly oluşturur ve tüm subgraph dosyalarını bir /build klasöründe hazırlar +$ graph codegen # generates types from the schema file identified in the manifest +$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the Subgraph files in a /build folder ``` ## Subgraph Manifest Tanımı -Subgraph manifestosu subgraph.yaml; subgraph'in veri kaynaklarını, ilgili tetikleyicileri ve bu tetikleyicilere yanıt olarak çalıştırılması gereken fonksiyonları tanımlar. Aşağıda, bir Arweave subgraph'i için örnek bir subgraph manifestosu bulunmaktadır: +The Subgraph manifest `subgraph.yaml` identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest for an Arweave Subgraph: ```yaml specVersion: 0.0.5 @@ -82,7 +82,7 @@ dataSources: - handler: handleTx # eşleştirme dosyasındaki fonksiyon adı ``` -- Arweave subgraph'leri, yeni bir veri kaynağı türü (`arweave`) sunar +- Arweave Subgraphs introduce a new kind of data source (`arweave`) - Ağ, sağlayıcı Graph Düğümü üzerindeki bir ağa karşılık gelmelidir. Subgraph Studio'da, Arweave'in ana ağı `arweave-mainnet` olarak tanımlanır - Arweave veri kaynakları, bir Arweave cüzdanının genel anahtarı olan opsiyonel bir source.owner alanı sunar @@ -99,7 +99,7 @@ Arweave veri kaynakları iki tür işleyiciyi destekler: ## Şema Tanımı -Şema tanımı, oluşan subgraph veritabanının yapısını ve varlıklar arasındaki ilişkileri tanımlar. Bu ilişki orijinal veri kaynağından bağımsızdır. Subgraph şema tanımı hakkında daha fazla detay [burada](/developing/creating-a-subgraph/#the-graphql-schema) bulunmaktadır. +Schema definition describes the structure of the resulting Subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on the Subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). ## AssemblyScript Eşlemeleri @@ -152,7 +152,7 @@ Arweave Subgraph'inin eşleştirmelerini yazmak, bir Ethereum Subgraph'inin eşl ## Subgraph Studio'da Arweave Subgraph'i Dağıtma -Subgraph Studio panelinizde subgraph'iniz oluşturulduktan sonra onu `graph deploy` CLI komutunu kullanarak dağıtabilirsiniz. +Once your Subgraph has been created on your Subgraph Studio dashboard, you can deploy by using the `graph deploy` CLI command. ```bash graph deploy --access-token @@ -160,25 +160,25 @@ graph deploy --access-token ## Arweave Subgraph'ını Sorgulama -Arweave subgraph'leri için GraphQL endpoint'i, mevcut API arayüzüyle şema tanımına göre belirlenir. Daha fazla bilgi için [GraphQL API dokümantasyonuna](/subgraphs/querying/graphql-api/) göz atın. +The GraphQL endpoint for Arweave Subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. ## Örnek Subgraph'ler -İşte referans olması için örnek bir subgraph: +Here is an example Subgraph for reference: -- [Arweave için örnek subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) +- [Example Subgraph for Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) ## FAQ -### Bir subgraph Arweave ve diğer zincirleri indeksleyebilir mi? +### Can a Subgraph index Arweave and other chains? -Hayır, bir subgraph yalnızca bir zincirden/ağdan veri kaynaklarını destekleyebilir. +No, a Subgraph can only support data sources from one chain/network. ### Depolanmış dosyaları Arweave üzerinde indeksleyebilir miyim? Şu anda Graph, Arweave'yi yalnızca bir blok zinciri (blokları ve işlemleri) olarak indekslemektedir. -### Subgraph'ımdaki Bundlr paketlerini tanımlayabilir miyim? +### Can I identify Bundlr bundles in my Subgraph? Bu şu anda desteklenmemektedir. @@ -188,7 +188,7 @@ source.owner kullanıcının genel anahtarı veya hesap adresi olabilir. ### Mevcut şifreleme formatı nedir? -Veriler genellikle Bytes olarak eşleştirmelere aktarılır ve doğrudan kaydedilirse subgraph'te hex formatında (ör. blok ve işlem hash'leri) döner. [Arweave Explorer](https://viewblock.io/arweave/) gibi blok gezginlerinde görüntülenenlerle denkleştirmek için eşlemelerinizi `base64` veya `base64 URL`-safe biçimine dönüştürmek isteyebilirsiniz. +Data is generally passed into the mappings as Bytes, which if stored directly is returned in the Subgraph in a `hex` format (ex. block and transaction hashes). You may want to convert to a `base64` or `base64 URL`-safe format in your mappings, in order to match what is displayed in block explorers like [Arweave Explorer](https://viewblock.io/arweave/). Aşağıdaki `bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string` yardımcı fonksiyonu kullanılabilir. Bu fonksiyon, `graph-ts`'e eklenecektir: From d0e26f5afb25989bf60dc8fe9d6d1806cb274f1a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:18:36 -0500 Subject: [PATCH 0575/1789] New translations arweave.mdx (Ukrainian) --- .../pages/uk/subgraphs/cookbook/arweave.mdx | 36 +++++++++---------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/website/src/pages/uk/subgraphs/cookbook/arweave.mdx b/website/src/pages/uk/subgraphs/cookbook/arweave.mdx index 6b54757440a0..0e3d309e5733 100644 --- a/website/src/pages/uk/subgraphs/cookbook/arweave.mdx +++ b/website/src/pages/uk/subgraphs/cookbook/arweave.mdx @@ -2,7 +2,7 @@ title: Building Subgraphs on Arweave --- -> Arweave support in Graph Node and on Subgraph Studio is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave subgraphs! +> Arweave support in Graph Node and on Subgraph Studio is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave Subgraphs! In this guide, you will learn how to build and deploy Subgraphs to index the Arweave blockchain. @@ -25,12 +25,12 @@ The Graph allows you to build custom open APIs called "Subgraphs". Subgraphs are To be able to build and deploy Arweave Subgraphs, you need two packages: -1. `@graphprotocol/graph-cli` above version 0.30.2 - This is a command-line tool for building and deploying subgraphs. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-cli) to download using `npm`. -2. `@graphprotocol/graph-ts` above version 0.27.0 - This is library of subgraph-specific types. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-ts) to download using `npm`. +1. `@graphprotocol/graph-cli` above version 0.30.2 - This is a command-line tool for building and deploying Subgraphs. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-cli) to download using `npm`. +2. `@graphprotocol/graph-ts` above version 0.27.0 - This is library of Subgraph-specific types. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-ts) to download using `npm`. ## Subgraph's components -There are three components of a subgraph: +There are three components of a Subgraph: ### 1. Manifest - `subgraph.yaml` @@ -40,22 +40,22 @@ Defines the data sources of interest, and how they should be processed. Arweave Here you define which data you want to be able to query after indexing your Subgraph using GraphQL. This is actually similar to a model for an API, where the model defines the structure of a request body. -The requirements for Arweave subgraphs are covered by the [existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). +The requirements for Arweave Subgraphs are covered by the [existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). ### 3. AssemblyScript Mappings - `mapping.ts` This is the logic that determines how data should be retrieved and stored when someone interacts with the data sources you are listening to. The data gets translated and is stored based off the schema you have listed. -During subgraph development there are two key commands: +During Subgraph development there are two key commands: ``` $ graph codegen # generates types from the schema file identified in the manifest -$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the subgraph files in a /build folder +$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the Subgraph files in a /build folder ``` ## Визначення маніфесту підграфів -The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for an Arweave subgraph: +The Subgraph manifest `subgraph.yaml` identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest for an Arweave Subgraph: ```yaml specVersion: 0.0.5 @@ -82,7 +82,7 @@ dataSources: - handler: handleTx # the function name in the mapping file ``` -- Arweave subgraphs introduce a new kind of data source (`arweave`) +- Arweave Subgraphs introduce a new kind of data source (`arweave`) - The network should correspond to a network on the hosting Graph Node. In Subgraph Studio, Arweave's mainnet is `arweave-mainnet` - Arweave data sources introduce an optional source.owner field, which is the public key of an Arweave wallet @@ -99,7 +99,7 @@ Arweave data sources support two types of handlers: ## Визначення схеми -Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on the subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). +Schema definition describes the structure of the resulting Subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on the Subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). ## AssemblyScript Mappings @@ -152,7 +152,7 @@ Writing the mappings of an Arweave Subgraph is very similar to writing the mappi ## Deploying an Arweave Subgraph in Subgraph Studio -Once your subgraph has been created on your Subgraph Studio dashboard, you can deploy by using the `graph deploy` CLI command. +Once your Subgraph has been created on your Subgraph Studio dashboard, you can deploy by using the `graph deploy` CLI command. ```bash graph deploy --access-token @@ -160,25 +160,25 @@ graph deploy --access-token ## Querying an Arweave Subgraph -The GraphQL endpoint for Arweave subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. +The GraphQL endpoint for Arweave Subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. ## Приклади підграфів -Here is an example subgraph for reference: +Here is an example Subgraph for reference: -- [Example subgraph for Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) +- [Example Subgraph for Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) ## FAQ -### Can a subgraph index Arweave and other chains? +### Can a Subgraph index Arweave and other chains? -No, a subgraph can only support data sources from one chain/network. +No, a Subgraph can only support data sources from one chain/network. ### Can I index the stored files on Arweave? Currently, The Graph is only indexing Arweave as a blockchain (its blocks and transactions). -### Can I identify Bundlr bundles in my subgraph? +### Can I identify Bundlr bundles in my Subgraph? This is not currently supported. @@ -188,7 +188,7 @@ The source.owner can be the user's public key or account address. ### What is the current encryption format? -Data is generally passed into the mappings as Bytes, which if stored directly is returned in the subgraph in a `hex` format (ex. block and transaction hashes). You may want to convert to a `base64` or `base64 URL`-safe format in your mappings, in order to match what is displayed in block explorers like [Arweave Explorer](https://viewblock.io/arweave/). +Data is generally passed into the mappings as Bytes, which if stored directly is returned in the Subgraph in a `hex` format (ex. block and transaction hashes). You may want to convert to a `base64` or `base64 URL`-safe format in your mappings, in order to match what is displayed in block explorers like [Arweave Explorer](https://viewblock.io/arweave/). The following `bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string` helper function can be used, and will be added to `graph-ts`: From 0a5e6de47dbcac9aadead4f3df61b0a282879150 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:18:36 -0500 Subject: [PATCH 0576/1789] New translations arweave.mdx (Chinese Simplified) --- .../pages/zh/subgraphs/cookbook/arweave.mdx | 38 +++++++++---------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/website/src/pages/zh/subgraphs/cookbook/arweave.mdx b/website/src/pages/zh/subgraphs/cookbook/arweave.mdx index 24eafa5cdebe..61529f56a2d5 100644 --- a/website/src/pages/zh/subgraphs/cookbook/arweave.mdx +++ b/website/src/pages/zh/subgraphs/cookbook/arweave.mdx @@ -2,7 +2,7 @@ title: 在 Arweave 上构建子图 --- -> Arweave support in Graph Node and on Subgraph Studio is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave subgraphs! +> Arweave support in Graph Node and on Subgraph Studio is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave Subgraphs! 在本指南中,您将学习如何构建和部署子图以索引Arweave区块链。 @@ -25,12 +25,12 @@ The Graph allows you to build custom open APIs called "Subgraphs". Subgraphs are 为了能够构建和部署 Arweave 子图,您需要两个包: -1. `@graphprotocol/graph-cli` above version 0.30.2 - This is a command-line tool for building and deploying subgraphs. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-cli) to download using `npm`. -2. `@graphprotocol/graph-ts` above version 0.27.0 - This is library of subgraph-specific types. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-ts) to download using `npm`. +1. `@graphprotocol/graph-cli` above version 0.30.2 - This is a command-line tool for building and deploying Subgraphs. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-cli) to download using `npm`. +2. `@graphprotocol/graph-ts` above version 0.27.0 - This is library of Subgraph-specific types. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-ts) to download using `npm`. ## 子图的组成部分 -一个子图有三个组成部分: +There are three components of a Subgraph: ### 1. Manifest - `subgraph.yaml` @@ -40,22 +40,22 @@ The Graph allows you to build custom open APIs called "Subgraphs". Subgraphs are 在这里,您可以定义在使用 GraphQL 索引子图之后希望能够查询的数据。这实际上类似于 API 的模型,其中模型定义了请求主体的结构。 -The requirements for Arweave subgraphs are covered by the [existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). +The requirements for Arweave Subgraphs are covered by the [existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). ### 3. AssemblyScript Mappings - `mapping.ts` 这种逻辑决定了当有人与您正在监听的数据源进行交互时,应该如何检索和存储数据。数据将被翻译并根据您列出的模式进行存储。 -在子图开发过程中,有两个关键命令: +During Subgraph development there are two key commands: ``` -$ graph codegen # 从清单中标识的模式文件生成类型 -$ graph build # 从 AssemblyScript 文件生成 Web Assembly,并在 /build 文件夹中准备所有子图文件 +$ graph codegen # generates types from the schema file identified in the manifest +$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the Subgraph files in a /build folder ``` ## 子图清单定义 -The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for an Arweave subgraph: +The Subgraph manifest `subgraph.yaml` identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest for an Arweave Subgraph: ```yaml specVersion: 0.0.5 @@ -82,7 +82,7 @@ dataSources: - handler: handleTx # the function name in the mapping file ``` -- Arweave subgraphs introduce a new kind of data source (`arweave`) +- Arweave Subgraphs introduce a new kind of data source (`arweave`) - The network should correspond to a network on the hosting Graph Node. In Subgraph Studio, Arweave's mainnet is `arweave-mainnet` - Arweave 数据源引入了一个可选的 source. owner 字段,它是 Arweave 钱包的公钥 @@ -99,7 +99,7 @@ Arweave 数据源支持两种类型的处理程序: ## 模式定义 -Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on the subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). +Schema definition describes the structure of the resulting Subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on the Subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). ## AssemblyScript 映射 @@ -152,7 +152,7 @@ Writing the mappings of an Arweave Subgraph is very similar to writing the mappi ## Deploying an Arweave Subgraph in Subgraph Studio -Once your subgraph has been created on your Subgraph Studio dashboard, you can deploy by using the `graph deploy` CLI command. +Once your Subgraph has been created on your Subgraph Studio dashboard, you can deploy by using the `graph deploy` CLI command. ```bash graph deploy --access-token @@ -160,25 +160,25 @@ graph deploy --access-token ## 查询 Arweave 子图 -The GraphQL endpoint for Arweave subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. +The GraphQL endpoint for Arweave Subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. ## 示例子图 -下面是一个子图的例子,以供参考: +Here is an example Subgraph for reference: -- [Example subgraph for Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) +- [Example Subgraph for Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) ## FAQ -### 子图可以索引 Arweave 和其他链吗? +### Can a Subgraph index Arweave and other chains? -不,子图只能支持来自一个链/网络的数据源。 +No, a Subgraph can only support data sources from one chain/network. ### 我可以索引存储在 Arweave 上的文件吗? 目前,Graph 只是将 Arweave 索引为区块链(它的区块和交易)。 -### 我可以识别我的子图中的 Bundlr 包吗? +### Can I identify Bundlr bundles in my Subgraph? 目前还不支持。 @@ -188,7 +188,7 @@ Source.owner可以是用户的公钥或账户地址。 ### 当前的加密格式是什么? -Data is generally passed into the mappings as Bytes, which if stored directly is returned in the subgraph in a `hex` format (ex. block and transaction hashes). You may want to convert to a `base64` or `base64 URL`-safe format in your mappings, in order to match what is displayed in block explorers like [Arweave Explorer](https://viewblock.io/arweave/). +Data is generally passed into the mappings as Bytes, which if stored directly is returned in the Subgraph in a `hex` format (ex. block and transaction hashes). You may want to convert to a `base64` or `base64 URL`-safe format in your mappings, in order to match what is displayed in block explorers like [Arweave Explorer](https://viewblock.io/arweave/). The following `bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string` helper function can be used, and will be added to `graph-ts`: From ced0ee4133287dca007e7618c8d8da15525cc5f9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:18:38 -0500 Subject: [PATCH 0577/1789] New translations arweave.mdx (Urdu (Pakistan)) --- .../pages/ur/subgraphs/cookbook/arweave.mdx | 38 +++++++++---------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/website/src/pages/ur/subgraphs/cookbook/arweave.mdx b/website/src/pages/ur/subgraphs/cookbook/arweave.mdx index 035056b7f5a6..03be894a8918 100644 --- a/website/src/pages/ur/subgraphs/cookbook/arweave.mdx +++ b/website/src/pages/ur/subgraphs/cookbook/arweave.mdx @@ -2,7 +2,7 @@ title: بناۓ گئے سب گرافز آرویو(Arweave) پر --- -> Arweave support in Graph Node and on Subgraph Studio is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave subgraphs! +> Arweave support in Graph Node and on Subgraph Studio is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave Subgraphs! اس گائڈ میں، آپ سیکھیں گے کہ آرویو(Arweave) بلاکچین کو انڈیکس کرنے کیلئے سب گرافز بنانے اور مستعمل کرنے کا طریقہ کار کیسے ہے۔ @@ -25,12 +25,12 @@ The Graph allows you to build custom open APIs called "Subgraphs". Subgraphs are آرویو کے سب گراف بنانے اور تعینات کرنے کے لئے،آپ کو دو پیکجوں کی ضرورت ہے: -1. `@graphprotocol/graph-cli` above version 0.30.2 - This is a command-line tool for building and deploying subgraphs. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-cli) to download using `npm`. -2. `@graphprotocol/graph-ts` above version 0.27.0 - This is library of subgraph-specific types. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-ts) to download using `npm`. +1. `@graphprotocol/graph-cli` above version 0.30.2 - This is a command-line tool for building and deploying Subgraphs. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-cli) to download using `npm`. +2. `@graphprotocol/graph-ts` above version 0.27.0 - This is library of Subgraph-specific types. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-ts) to download using `npm`. ## سب گراف کے حصے -سب گراف کے تین حصے ہیں: +There are three components of a Subgraph: ### 1. Manifest - `subgraph.yaml` @@ -40,22 +40,22 @@ The Graph allows you to build custom open APIs called "Subgraphs". Subgraphs are یہاں آپ بیان کرتے ہیں کے کونسا ڈیٹا آپ کے سب گراف کا کیوری گراف کیو ایل کا استعمال کرتے ہوۓ کر سکے۔یہ دراصل اے پی آی(API) کے ماڈل سے ملتا ہے،جہاں ماڈل درخواست کے جسم کے ڈھانچے کو بیان کرتا ہے. -The requirements for Arweave subgraphs are covered by the [existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). +The requirements for Arweave Subgraphs are covered by the [existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). ### 3. AssemblyScript Mappings - `mapping.ts` یہ وہ منطق جو اس بات کا پتہ لگاتا ہے کے کیسے ڈیٹا کو بازیافت اور مہفوظ کیا جاۓ جب کوئ اس ڈیٹا کے ذخیرہ سے تعامل کرے جسے آپ سن رہے ہیں۔اس ڈیٹا کا ترجمہ کیا جاتا ہے اور آپ کے درج کردہ اسکیما کی بنیاد پر مہفوظ کیا جاتا ہے. -سب گراف کی ترقی کے دوران دو اہم کمانڈز ہیں: +During Subgraph development there are two key commands: ``` -$graph codegen # ظاہر میں شناخت کردہ اسکیما فائل سے اقسام تیار کرتا ہے۔ -$graph build # اسمبلی سکرپٹ فائلوں سے ویب اسمبلی تیار کرتا ہے، اور تمام ذیلی گراف فائلوں کو /build فولڈر میں تیار کرتا ہے۔ +$ graph codegen # generates types from the schema file identified in the manifest +$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the Subgraph files in a /build folder ``` ## سب گراف مینی فیسٹ کی تعریف -The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for an Arweave subgraph: +The Subgraph manifest `subgraph.yaml` identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest for an Arweave Subgraph: ```yaml specVersion: 0.0.5 @@ -82,7 +82,7 @@ dataSources: - handler: handleTx # the function name in the mapping file ``` -- Arweave subgraphs introduce a new kind of data source (`arweave`) +- Arweave Subgraphs introduce a new kind of data source (`arweave`) - The network should correspond to a network on the hosting Graph Node. In Subgraph Studio, Arweave's mainnet is `arweave-mainnet` - آرویو ڈیٹا کے ذرائع ایک اختیاری source.owner فیلڈ متعارف کراتے ہیں، جو آرویو والیٹ کی عوامی کلید ہے @@ -99,7 +99,7 @@ dataSources: ## اسکیما کی تعریف -Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on the subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). +Schema definition describes the structure of the resulting Subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on the Subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). ## اسمبلی اسکرپٹ سب میپنک @@ -152,7 +152,7 @@ Writing the mappings of an Arweave Subgraph is very similar to writing the mappi ## Deploying an Arweave Subgraph in Subgraph Studio -Once your subgraph has been created on your Subgraph Studio dashboard, you can deploy by using the `graph deploy` CLI command. +Once your Subgraph has been created on your Subgraph Studio dashboard, you can deploy by using the `graph deploy` CLI command. ```bash graph deploy --access-token @@ -160,25 +160,25 @@ graph deploy --access-token ## آرویو سب گراف سے کیوری کرنا -The GraphQL endpoint for Arweave subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. +The GraphQL endpoint for Arweave Subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. ## سب گراف کی مثال -حوالہ کے لیے سب گراف کی ایک مثال یہ ہے: +Here is an example Subgraph for reference: -- [Example subgraph for Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) +- [Example Subgraph for Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) ## FAQ -### کیا ایک سب گراف آرویو اور دیگر چینس کو انڈیکس کر سکتا ہے؟ +### Can a Subgraph index Arweave and other chains? -نہیں، ایک سب گراف صرف ایک چین/نیٹ ورک سے ڈیٹا کے ذرائع کو سپورٹ کر سکتا ہے. +No, a Subgraph can only support data sources from one chain/network. ### کیا میں آرویو پر ذخیرہ شدہ فائلوں کو انڈیکس کر سکتا ہوں؟ فی الحال، دی گراف صرف آرویو کو بلاک چین (اس کے بلاکس اور لین دین) کے طور پر ترتیب دے رہا ہے. -### کیا میں اپنے سب گراف میں Bundlr کے بنڈلوں کی شناخت کر سکتا ہوں؟ +### Can I identify Bundlr bundles in my Subgraph? یہ فی الحال سپورٹڈ نہیں ہے. @@ -188,7 +188,7 @@ Source.owner صارف کی عوامی کلید یا اکاؤنٹ ایڈریس ہ ### موجودہ خفیہ کاری کا فارمیٹ کیا ہے؟ -Data is generally passed into the mappings as Bytes, which if stored directly is returned in the subgraph in a `hex` format (ex. block and transaction hashes). You may want to convert to a `base64` or `base64 URL`-safe format in your mappings, in order to match what is displayed in block explorers like [Arweave Explorer](https://viewblock.io/arweave/). +Data is generally passed into the mappings as Bytes, which if stored directly is returned in the Subgraph in a `hex` format (ex. block and transaction hashes). You may want to convert to a `base64` or `base64 URL`-safe format in your mappings, in order to match what is displayed in block explorers like [Arweave Explorer](https://viewblock.io/arweave/). The following `bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string` helper function can be used, and will be added to `graph-ts`: From 2f9f1d82df00094d269b11ff678abba2a89f03f8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:18:39 -0500 Subject: [PATCH 0578/1789] New translations arweave.mdx (Vietnamese) --- .../pages/vi/subgraphs/cookbook/arweave.mdx | 36 +++++++++---------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/website/src/pages/vi/subgraphs/cookbook/arweave.mdx b/website/src/pages/vi/subgraphs/cookbook/arweave.mdx index 2372025621d1..18b485a9c382 100644 --- a/website/src/pages/vi/subgraphs/cookbook/arweave.mdx +++ b/website/src/pages/vi/subgraphs/cookbook/arweave.mdx @@ -2,7 +2,7 @@ title: Building Subgraphs on Arweave --- -> Arweave support in Graph Node and on Subgraph Studio is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave subgraphs! +> Arweave support in Graph Node and on Subgraph Studio is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave Subgraphs! In this guide, you will learn how to build and deploy Subgraphs to index the Arweave blockchain. @@ -25,12 +25,12 @@ The Graph allows you to build custom open APIs called "Subgraphs". Subgraphs are To be able to build and deploy Arweave Subgraphs, you need two packages: -1. `@graphprotocol/graph-cli` above version 0.30.2 - This is a command-line tool for building and deploying subgraphs. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-cli) to download using `npm`. -2. `@graphprotocol/graph-ts` above version 0.27.0 - This is library of subgraph-specific types. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-ts) to download using `npm`. +1. `@graphprotocol/graph-cli` above version 0.30.2 - This is a command-line tool for building and deploying Subgraphs. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-cli) to download using `npm`. +2. `@graphprotocol/graph-ts` above version 0.27.0 - This is library of Subgraph-specific types. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-ts) to download using `npm`. ## Subgraph's components -There are three components of a subgraph: +There are three components of a Subgraph: ### 1. Manifest - `subgraph.yaml` @@ -40,22 +40,22 @@ Defines the data sources of interest, and how they should be processed. Arweave Here you define which data you want to be able to query after indexing your Subgraph using GraphQL. This is actually similar to a model for an API, where the model defines the structure of a request body. -The requirements for Arweave subgraphs are covered by the [existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). +The requirements for Arweave Subgraphs are covered by the [existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). ### 3. AssemblyScript Mappings - `mapping.ts` This is the logic that determines how data should be retrieved and stored when someone interacts with the data sources you are listening to. The data gets translated and is stored based off the schema you have listed. -During subgraph development there are two key commands: +During Subgraph development there are two key commands: ``` $ graph codegen # generates types from the schema file identified in the manifest -$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the subgraph files in a /build folder +$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the Subgraph files in a /build folder ``` ## Subgraph Manifest Definition -The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for an Arweave subgraph: +The Subgraph manifest `subgraph.yaml` identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest for an Arweave Subgraph: ```yaml specVersion: 0.0.5 @@ -82,7 +82,7 @@ dataSources: - handler: handleTx # the function name in the mapping file ``` -- Arweave subgraphs introduce a new kind of data source (`arweave`) +- Arweave Subgraphs introduce a new kind of data source (`arweave`) - The network should correspond to a network on the hosting Graph Node. In Subgraph Studio, Arweave's mainnet is `arweave-mainnet` - Arweave data sources introduce an optional source.owner field, which is the public key of an Arweave wallet @@ -99,7 +99,7 @@ Arweave data sources support two types of handlers: ## Schema Definition -Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on the subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). +Schema definition describes the structure of the resulting Subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on the Subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). ## AssemblyScript Mappings @@ -152,7 +152,7 @@ Writing the mappings of an Arweave Subgraph is very similar to writing the mappi ## Deploying an Arweave Subgraph in Subgraph Studio -Once your subgraph has been created on your Subgraph Studio dashboard, you can deploy by using the `graph deploy` CLI command. +Once your Subgraph has been created on your Subgraph Studio dashboard, you can deploy by using the `graph deploy` CLI command. ```bash graph deploy --access-token @@ -160,25 +160,25 @@ graph deploy --access-token ## Querying an Arweave Subgraph -The GraphQL endpoint for Arweave subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. +The GraphQL endpoint for Arweave Subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. ## Example Subgraphs -Here is an example subgraph for reference: +Here is an example Subgraph for reference: -- [Example subgraph for Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) +- [Example Subgraph for Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) ## FAQ -### Can a subgraph index Arweave and other chains? +### Can a Subgraph index Arweave and other chains? -No, a subgraph can only support data sources from one chain/network. +No, a Subgraph can only support data sources from one chain/network. ### Can I index the stored files on Arweave? Currently, The Graph is only indexing Arweave as a blockchain (its blocks and transactions). -### Can I identify Bundlr bundles in my subgraph? +### Can I identify Bundlr bundles in my Subgraph? This is not currently supported. @@ -188,7 +188,7 @@ The source.owner can be the user's public key or account address. ### What is the current encryption format? -Data is generally passed into the mappings as Bytes, which if stored directly is returned in the subgraph in a `hex` format (ex. block and transaction hashes). You may want to convert to a `base64` or `base64 URL`-safe format in your mappings, in order to match what is displayed in block explorers like [Arweave Explorer](https://viewblock.io/arweave/). +Data is generally passed into the mappings as Bytes, which if stored directly is returned in the Subgraph in a `hex` format (ex. block and transaction hashes). You may want to convert to a `base64` or `base64 URL`-safe format in your mappings, in order to match what is displayed in block explorers like [Arweave Explorer](https://viewblock.io/arweave/). The following `bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string` helper function can be used, and will be added to `graph-ts`: From 7c6b68c78112fe9d4f61e6edfd72929e72e448d3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:18:40 -0500 Subject: [PATCH 0579/1789] New translations arweave.mdx (Marathi) --- .../pages/mr/subgraphs/cookbook/arweave.mdx | 38 +++++++++---------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/website/src/pages/mr/subgraphs/cookbook/arweave.mdx b/website/src/pages/mr/subgraphs/cookbook/arweave.mdx index 2b43324539b9..05f1c24683d2 100644 --- a/website/src/pages/mr/subgraphs/cookbook/arweave.mdx +++ b/website/src/pages/mr/subgraphs/cookbook/arweave.mdx @@ -2,7 +2,7 @@ title: Arweave वर सबग्राफ तयार करणे --- -> Arweave support in Graph Node and on Subgraph Studio is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave subgraphs! +> Arweave support in Graph Node and on Subgraph Studio is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave Subgraphs! या मार्गदर्शकामध्ये, तुम्ही Arweave ब्लॉकचेन इंडेक्स करण्यासाठी सबग्राफ कसे तयार करावे आणि कसे तैनात करावे ते शिकाल. @@ -25,12 +25,12 @@ The Graph allows you to build custom open APIs called "Subgraphs". Subgraphs are Arweave Subgraphs तयार आणि तैनात करण्यात सक्षम होण्यासाठी, तुम्हाला दोन पॅकेजेसची आवश्यकता आहे: -1. `@graphprotocol/graph-cli` above version 0.30.2 - This is a command-line tool for building and deploying subgraphs. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-cli) to download using `npm`. -2. `@graphprotocol/graph-ts` above version 0.27.0 - This is library of subgraph-specific types. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-ts) to download using `npm`. +1. `@graphprotocol/graph-cli` above version 0.30.2 - This is a command-line tool for building and deploying Subgraphs. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-cli) to download using `npm`. +2. `@graphprotocol/graph-ts` above version 0.27.0 - This is library of Subgraph-specific types. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-ts) to download using `npm`. ## सबग्राफचे घटक -सबग्राफचे तीन घटक आहेत: +There are three components of a Subgraph: ### 1. Manifest - `subgraph.yaml` @@ -40,22 +40,22 @@ Arweave Subgraphs तयार आणि तैनात करण्यात GraphQL वापरून तुमचा सबग्राफ इंडेक्स केल्यानंतर तुम्ही कोणता डेटा क्वेरी करू इच्छिता ते येथे तुम्ही परिभाषित करता. हे प्रत्यक्षात API च्या मॉडेलसारखेच आहे, जेथे मॉडेल विनंती मुख्य भागाची रचना परिभाषित करते. -The requirements for Arweave subgraphs are covered by the [existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). +The requirements for Arweave Subgraphs are covered by the [existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). ### 3. AssemblyScript Mappings - `mapping.ts` जेव्हा तुम्ही ऐकत असलेल्या डेटा स्रोतांशी कोणीतरी संवाद साधते तेव्हा डेटा कसा पुनर्प्राप्त आणि संग्रहित केला जावा हे हे तर्कशास्त्र आहे. डेटा अनुवादित केला जातो आणि तुम्ही सूचीबद्ध केलेल्या स्कीमावर आधारित संग्रहित केला जातो. -सबग्राफ विकासादरम्यान दोन प्रमुख आज्ञा आहेत: +During Subgraph development there are two key commands: ``` -$ graph codegen # मॅनिफेस्टमध्ये ओळखल्या गेलेल्या स्कीमा फाइलमधून प्रकार व्युत्पन्न करते -$ graph build # असेंबलीस्क्रिप्ट फायलींमधून वेब असेंब्ली तयार करते आणि /बिल्ड फोल्डरमध्ये सर्व सबग्राफ फाइल्स तयार करते +$ graph codegen # generates types from the schema file identified in the manifest +$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the Subgraph files in a /build folder ``` ## सबग्राफ मॅनिफेस्ट व्याख्या -The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for an Arweave subgraph: +The Subgraph manifest `subgraph.yaml` identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest for an Arweave Subgraph: ```yaml specVersion: 0.0.5 @@ -82,7 +82,7 @@ dataSources: - handler: handleTx # the function name in the mapping file ``` -- Arweave subgraphs introduce a new kind of data source (`arweave`) +- Arweave Subgraphs introduce a new kind of data source (`arweave`) - The network should correspond to a network on the hosting Graph Node. In Subgraph Studio, Arweave's mainnet is `arweave-mainnet` - Arweave डेटा स्रोत पर्यायी source.owner फील्ड सादर करतात, जी Arweave वॉलेटची सार्वजनिक की आहे @@ -99,7 +99,7 @@ Arweave डेटा स्रोत दोन प्रकारच्या ## स्कीमा व्याख्या -Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on the subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). +Schema definition describes the structure of the resulting Subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on the Subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). ## असेंबलीस्क्रिप्ट मॅपिंग @@ -152,7 +152,7 @@ Writing the mappings of an Arweave Subgraph is very similar to writing the mappi ## Deploying an Arweave Subgraph in Subgraph Studio -Once your subgraph has been created on your Subgraph Studio dashboard, you can deploy by using the `graph deploy` CLI command. +Once your Subgraph has been created on your Subgraph Studio dashboard, you can deploy by using the `graph deploy` CLI command. ```bash graph deploy --access-token @@ -160,25 +160,25 @@ graph deploy --access-token ## प्रश्न करत आहे Arweave सबग्राफ -The GraphQL endpoint for Arweave subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. +The GraphQL endpoint for Arweave Subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. ## उदाहरणे सबग्राफ -संदर्भासाठी येथे एक उदाहरण उपग्राफ आहे: +Here is an example Subgraph for reference: -- [Example subgraph for Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) +- [Example Subgraph for Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) ## FAQ -### सबग्राफ इंडेक्स Arweave आणि इतर साखळी करू शकता? +### Can a Subgraph index Arweave and other chains? -नाही, सबग्राफ केवळ एका साखळी/नेटवर्कमधील डेटा स्रोतांना समर्थन देऊ शकतो. +No, a Subgraph can only support data sources from one chain/network. ### मी Arweave वर संग्रहित फाइल्स अनुक्रमित करू शकतो? सध्या, ग्राफ फक्त ब्लॉकचेन (त्याचे ब्लॉक्स आणि व्यवहार) म्हणून Arweave अनुक्रमित करत आहे. -### Currently, The Graph फक्त blockchain (त्याचे blocks आणि transactions) म्हणून Arweave अनुक्रमित करत आहे? +### Can I identify Bundlr bundles in my Subgraph? हे सध्या समर्थित नाही. @@ -188,7 +188,7 @@ source.owner वापरकर्त्याची सार्वजनिक ### सध्याचे एन्क्रिप्शन स्वरूप काय आहे? -Data is generally passed into the mappings as Bytes, which if stored directly is returned in the subgraph in a `hex` format (ex. block and transaction hashes). You may want to convert to a `base64` or `base64 URL`-safe format in your mappings, in order to match what is displayed in block explorers like [Arweave Explorer](https://viewblock.io/arweave/). +Data is generally passed into the mappings as Bytes, which if stored directly is returned in the Subgraph in a `hex` format (ex. block and transaction hashes). You may want to convert to a `base64` or `base64 URL`-safe format in your mappings, in order to match what is displayed in block explorers like [Arweave Explorer](https://viewblock.io/arweave/). The following `bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string` helper function can be used, and will be added to `graph-ts`: From 8cd11bca81242d6e5e599def8910daf06d33e7c4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:18:40 -0500 Subject: [PATCH 0580/1789] New translations arweave.mdx (Hindi) --- .../pages/hi/subgraphs/cookbook/arweave.mdx | 36 +++++++++---------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/website/src/pages/hi/subgraphs/cookbook/arweave.mdx b/website/src/pages/hi/subgraphs/cookbook/arweave.mdx index b51d9a5405bc..fe6929503bf9 100644 --- a/website/src/pages/hi/subgraphs/cookbook/arweave.mdx +++ b/website/src/pages/hi/subgraphs/cookbook/arweave.mdx @@ -2,7 +2,7 @@ title: आरवीव पर सब-ग्राफ्र्स बनाना --- -> Arweave support in Graph Node and on Subgraph Studio is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave subgraphs! +> Arweave support in Graph Node and on Subgraph Studio is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave Subgraphs! इस गाइड में आप आरवीव ब्लॉकचेन पर सब ग्राफ्स बनाना और डेप्लॉय करना सीखेंगे! @@ -25,12 +25,12 @@ The Graph allows you to build custom open APIs called "Subgraphs". Subgraphs are आरवीवे पर सब ग्राफ बनाने के लिए हमे दो पैकेजेस की जरूरत है: -1. `@graphprotocol/graph-cli` above version 0.30.2 - This is a command-line tool for building and deploying subgraphs. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-cli) to download using `npm`. -2. `@graphprotocol/graph-ts` above version 0.27.0 - This is library of subgraph-specific types. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-ts) to download using `npm`. +1. `@graphprotocol/graph-cli` above version 0.30.2 - This is a command-line tool for building and deploying Subgraphs. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-cli) to download using `npm`. +2. `@graphprotocol/graph-ts` above version 0.27.0 - This is library of Subgraph-specific types. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-ts) to download using `npm`. ## सब ग्राफ के कॉम्पोनेन्ट -सब ग्राफ के तीन कॉम्पोनेन्ट होते हैं: +There are three components of a Subgraph: ### 1. Manifest - `subgraph.yaml` @@ -40,22 +40,22 @@ The Graph allows you to build custom open APIs called "Subgraphs". Subgraphs are यहाँ आप बताते हैं की आप कौन सा डाटा इंडेक्सिंग के बाद क्वेरी करना चाहते हैं| दरसअल यह एक API के मॉडल जैसा है, जहाँ मॉडल द्वारा रिक्वेस्ट बॉडी का स्ट्रक्चर परिभाषित किया जाता है| -The requirements for Arweave subgraphs are covered by the [existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). +The requirements for Arweave Subgraphs are covered by the [existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). ### 3. AssemblyScript Mappings - `mapping.ts` यह किसी के द्वारा इस्तेमाल किये जा रहे डाटा सोर्स से डाटा को पुनः प्राप्त करने और स्टोर करने के लॉजिक को बताता है| डाटा अनुवादित होकर आपके द्वारा सूचीबद्ध स्कीमा के अनुसार स्टोर हो जाता है| -सब ग्राफ को बनाते वक़्त दो मुख्य कमांड हैं: +During Subgraph development there are two key commands: ``` $ graph codegen # generates types from the schema file identified in the manifest -$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the subgraph files in a /build folder +$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the Subgraph files in a /build folder ``` ## सब ग्राफ मैनिफेस्ट की परिभाषा -The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for an Arweave subgraph: +The Subgraph manifest `subgraph.yaml` identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest for an Arweave Subgraph: ```yaml specVersion: 0.0.5 @@ -82,7 +82,7 @@ dataSources: - handler: handleTx # the function name in the mapping file ``` -- Arweave subgraphs introduce a new kind of data source (`arweave`) +- Arweave Subgraphs introduce a new kind of data source (`arweave`) - The network should correspond to a network on the hosting Graph Node. In Subgraph Studio, Arweave's mainnet is `arweave-mainnet` - अरवीव डाटा सोर्स द्वारा एक वैकल्पिक source.owner फील्ड लाया गया, जो की एक आरवीव वॉलेट का मपब्लिक key है| @@ -99,7 +99,7 @@ dataSources: ## स्कीमा की परिभाषा -Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on the subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). +Schema definition describes the structure of the resulting Subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on the Subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). ## असेंबली स्क्रिप्ट मैप्पिंग्स @@ -152,7 +152,7 @@ Writing the mappings of an Arweave Subgraph is very similar to writing the mappi ## Deploying an Arweave Subgraph in Subgraph Studio -Once your subgraph has been created on your Subgraph Studio dashboard, you can deploy by using the `graph deploy` CLI command. +Once your Subgraph has been created on your Subgraph Studio dashboard, you can deploy by using the `graph deploy` CLI command. ```bash graph deploy --access-token @@ -160,25 +160,25 @@ graph deploy --access-token ## आरवीव सब-ग्राफ क्वेरी करना -The GraphQL endpoint for Arweave subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. +The GraphQL endpoint for Arweave Subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. ## सब-ग्राफ के उदाहरण -सहायता के एक सब-ग्राफ का उदाहरण +Here is an example Subgraph for reference: -- [Example subgraph for Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) +- [Example Subgraph for Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) ## FAQ -### क्या एक सब-ग्राफ आरवीव और बाकी चेन्स को इंडेक्स कर सकता है? +### Can a Subgraph index Arweave and other chains? -नहीं, एक सब-ग्राफ केवल एक चेन/नेटवर्क से डाटा सोर्स को सपोर्ट कर सकता है +No, a Subgraph can only support data sources from one chain/network. ### क्या मैं आरवीव पर स्टोर की फाइल्स को इंडेक्स कर सकता हूँ? वर्तमान में द ग्राफ आरवीव को केवल एक ब्लॉकचेन की तरह इंडेक्स करता है (उसके ब्लॉक्स और ट्रांसक्शन्स)| -### क्या मैं अपने सब-ग्राफ में Bundlr बंडल्स को पहचान सकता हूँ? +### Can I identify Bundlr bundles in my Subgraph? यह वर्तमान में सपोर्टेड नहीं है| @@ -188,7 +188,7 @@ The GraphQL endpoint for Arweave subgraphs is determined by the schema definitio ### वर्तमान एन्क्रिप्शन फॉर्मेट क्या है? -Data is generally passed into the mappings as Bytes, which if stored directly is returned in the subgraph in a `hex` format (ex. block and transaction hashes). You may want to convert to a `base64` or `base64 URL`-safe format in your mappings, in order to match what is displayed in block explorers like [Arweave Explorer](https://viewblock.io/arweave/). +Data is generally passed into the mappings as Bytes, which if stored directly is returned in the Subgraph in a `hex` format (ex. block and transaction hashes). You may want to convert to a `base64` or `base64 URL`-safe format in your mappings, in order to match what is displayed in block explorers like [Arweave Explorer](https://viewblock.io/arweave/). The following `bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string` helper function can be used, and will be added to `graph-ts`: From 07cb865e2f12793a8107be66df1403380c045d8c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:18:41 -0500 Subject: [PATCH 0581/1789] New translations arweave.mdx (Swahili) --- .../pages/sw/subgraphs/cookbook/arweave.mdx | 239 ++++++++++++++++++ 1 file changed, 239 insertions(+) create mode 100644 website/src/pages/sw/subgraphs/cookbook/arweave.mdx diff --git a/website/src/pages/sw/subgraphs/cookbook/arweave.mdx b/website/src/pages/sw/subgraphs/cookbook/arweave.mdx new file mode 100644 index 000000000000..18b485a9c382 --- /dev/null +++ b/website/src/pages/sw/subgraphs/cookbook/arweave.mdx @@ -0,0 +1,239 @@ +--- +title: Building Subgraphs on Arweave +--- + +> Arweave support in Graph Node and on Subgraph Studio is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave Subgraphs! + +In this guide, you will learn how to build and deploy Subgraphs to index the Arweave blockchain. + +## What is Arweave? + +The Arweave protocol allows developers to store data permanently and that is the main difference between Arweave and IPFS, where IPFS lacks the feature; permanence, and files stored on Arweave can't be changed or deleted. + +Arweave already has built numerous libraries for integrating the protocol in a number of different programming languages. For more information you can check: + +- [Arwiki](https://arwiki.wiki/#/en/main) +- [Arweave Resources](https://www.arweave.org/build) + +## What are Arweave Subgraphs? + +The Graph allows you to build custom open APIs called "Subgraphs". Subgraphs are used to tell indexers (server operators) which data to index on a blockchain and save on their servers in order for you to be able to query it at any time using [GraphQL](https://graphql.org/). + +[Graph Node](https://github.com/graphprotocol/graph-node) is now able to index data on Arweave protocol. The current integration is only indexing Arweave as a blockchain (blocks and transactions), it is not indexing the stored files yet. + +## Building an Arweave Subgraph + +To be able to build and deploy Arweave Subgraphs, you need two packages: + +1. `@graphprotocol/graph-cli` above version 0.30.2 - This is a command-line tool for building and deploying Subgraphs. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-cli) to download using `npm`. +2. `@graphprotocol/graph-ts` above version 0.27.0 - This is library of Subgraph-specific types. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-ts) to download using `npm`. + +## Subgraph's components + +There are three components of a Subgraph: + +### 1. Manifest - `subgraph.yaml` + +Defines the data sources of interest, and how they should be processed. Arweave is a new kind of data source. + +### 2. Schema - `schema.graphql` + +Here you define which data you want to be able to query after indexing your Subgraph using GraphQL. This is actually similar to a model for an API, where the model defines the structure of a request body. + +The requirements for Arweave Subgraphs are covered by the [existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). + +### 3. AssemblyScript Mappings - `mapping.ts` + +This is the logic that determines how data should be retrieved and stored when someone interacts with the data sources you are listening to. The data gets translated and is stored based off the schema you have listed. + +During Subgraph development there are two key commands: + +``` +$ graph codegen # generates types from the schema file identified in the manifest +$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the Subgraph files in a /build folder +``` + +## Subgraph Manifest Definition + +The Subgraph manifest `subgraph.yaml` identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest for an Arweave Subgraph: + +```yaml +specVersion: 0.0.5 +description: Arweave Blocks Indexing +schema: + file: ./schema.graphql # link to the schema file +dataSources: + - kind: arweave + name: arweave-blocks + network: arweave-mainnet # The Graph only supports Arweave Mainnet + source: + owner: 'ID-OF-AN-OWNER' # The public key of an Arweave wallet + startBlock: 0 # set this to 0 to start indexing from chain genesis + mapping: + apiVersion: 0.0.5 + language: wasm/assemblyscript + file: ./src/blocks.ts # link to the file with the Assemblyscript mappings + entities: + - Block + - Transaction + blockHandlers: + - handler: handleBlock # the function name in the mapping file + transactionHandlers: + - handler: handleTx # the function name in the mapping file +``` + +- Arweave Subgraphs introduce a new kind of data source (`arweave`) +- The network should correspond to a network on the hosting Graph Node. In Subgraph Studio, Arweave's mainnet is `arweave-mainnet` +- Arweave data sources introduce an optional source.owner field, which is the public key of an Arweave wallet + +Arweave data sources support two types of handlers: + +- `blockHandlers` - Run on every new Arweave block. No source.owner is required. +- `transactionHandlers` - Run on every transaction where the data source's `source.owner` is the owner. Currently an owner is required for `transactionHandlers`, if users want to process all transactions they should provide "" as the `source.owner` + +> The source.owner can be the owner's address, or their Public Key. +> +> Transactions are the building blocks of the Arweave permaweb and they are objects created by end-users. +> +> Note: [Irys (previously Bundlr)](https://irys.xyz/) transactions are not supported yet. + +## Schema Definition + +Schema definition describes the structure of the resulting Subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on the Subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). + +## AssemblyScript Mappings + +The handlers for processing events are written in [AssemblyScript](https://www.assemblyscript.org/). + +Arweave indexing introduces Arweave-specific data types to the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/). + +```tsx +class Block { + timestamp: u64 + lastRetarget: u64 + height: u64 + indepHash: Bytes + nonce: Bytes + previousBlock: Bytes + diff: Bytes + hash: Bytes + txRoot: Bytes + txs: Bytes[] + walletList: Bytes + rewardAddr: Bytes + tags: Tag[] + rewardPool: Bytes + weaveSize: Bytes + blockSize: Bytes + cumulativeDiff: Bytes + hashListMerkle: Bytes + poa: ProofOfAccess +} + +class Transaction { + format: u32 + id: Bytes + lastTx: Bytes + owner: Bytes + tags: Tag[] + target: Bytes + quantity: Bytes + data: Bytes + dataSize: Bytes + dataRoot: Bytes + signature: Bytes + reward: Bytes +} +``` + +Block handlers receive a `Block`, while transactions receive a `Transaction`. + +Writing the mappings of an Arweave Subgraph is very similar to writing the mappings of an Ethereum Subgraph. For more information, click [here](/developing/creating-a-subgraph/#writing-mappings). + +## Deploying an Arweave Subgraph in Subgraph Studio + +Once your Subgraph has been created on your Subgraph Studio dashboard, you can deploy by using the `graph deploy` CLI command. + +```bash +graph deploy --access-token +``` + +## Querying an Arweave Subgraph + +The GraphQL endpoint for Arweave Subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. + +## Example Subgraphs + +Here is an example Subgraph for reference: + +- [Example Subgraph for Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) + +## FAQ + +### Can a Subgraph index Arweave and other chains? + +No, a Subgraph can only support data sources from one chain/network. + +### Can I index the stored files on Arweave? + +Currently, The Graph is only indexing Arweave as a blockchain (its blocks and transactions). + +### Can I identify Bundlr bundles in my Subgraph? + +This is not currently supported. + +### How can I filter transactions to a specific account? + +The source.owner can be the user's public key or account address. + +### What is the current encryption format? + +Data is generally passed into the mappings as Bytes, which if stored directly is returned in the Subgraph in a `hex` format (ex. block and transaction hashes). You may want to convert to a `base64` or `base64 URL`-safe format in your mappings, in order to match what is displayed in block explorers like [Arweave Explorer](https://viewblock.io/arweave/). + +The following `bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string` helper function can be used, and will be added to `graph-ts`: + +``` +const base64Alphabet = [ + "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", + "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", + "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", + "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", + "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "+", "/" +]; + +const base64UrlAlphabet = [ + "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", + "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", + "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", + "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", + "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "-", "_" +]; + +function bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string { + let alphabet = urlSafe? base64UrlAlphabet : base64Alphabet; + + let result = '', i: i32, l = bytes.length; + for (i = 2; i < l; i += 3) { + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; + result += alphabet[((bytes[i - 1] & 0x0F) << 2) | (bytes[i] >> 6)]; + result += alphabet[bytes[i] & 0x3F]; + } + if (i === l + 1) { // 1 octet yet to write + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[(bytes[i - 2] & 0x03) << 4]; + if (!urlSafe) { + result += "=="; + } + } + if (!urlSafe && i === l) { // 2 octets yet to write + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; + result += alphabet[(bytes[i - 1] & 0x0F) << 2]; + if (!urlSafe) { + result += "="; + } + } + return result; +} +``` From c9e0d6d4b81becdd35c06da4fe4d4d432e1e7ec9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:18:42 -0500 Subject: [PATCH 0582/1789] New translations enums.mdx (Romanian) --- website/src/pages/ro/subgraphs/cookbook/enums.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/ro/subgraphs/cookbook/enums.mdx b/website/src/pages/ro/subgraphs/cookbook/enums.mdx index a10970c1539f..9f55ae07c54b 100644 --- a/website/src/pages/ro/subgraphs/cookbook/enums.mdx +++ b/website/src/pages/ro/subgraphs/cookbook/enums.mdx @@ -10,7 +10,7 @@ Enums, or enumeration types, are a specific data type that allows you to define ### Example of Enums in Your Schema -If you're building a subgraph to track the ownership history of tokens on a marketplace, each token might go through different ownerships, such as `OriginalOwner`, `SecondOwner`, and `ThirdOwner`. By using enums, you can define these specific ownerships, ensuring only predefined values are assigned. +If you're building a Subgraph to track the ownership history of tokens on a marketplace, each token might go through different ownerships, such as `OriginalOwner`, `SecondOwner`, and `ThirdOwner`. By using enums, you can define these specific ownerships, ensuring only predefined values are assigned. You can define enums in your schema, and once defined, you can use the string representation of the enum values to set an enum field on an entity. @@ -65,7 +65,7 @@ Enums provide type safety, minimize typo risks, and ensure consistent and reliab > Note: The following guide uses the CryptoCoven NFT smart contract. -To define enums for the various marketplaces where NFTs are traded, use the following in your subgraph schema: +To define enums for the various marketplaces where NFTs are traded, use the following in your Subgraph schema: ```gql # Enum for Marketplaces that the CryptoCoven contract interacted with(likely a Trade/Mint) @@ -80,7 +80,7 @@ enum Marketplace { ## Using Enums for NFT Marketplaces -Once defined, enums can be used throughout your subgraph to categorize transactions or events. +Once defined, enums can be used throughout your Subgraph to categorize transactions or events. For example, when logging NFT sales, you can specify the marketplace involved in the trade using the enum. From c3a479a8558cf16a9aa511c3a4ca8084482d3715 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:18:43 -0500 Subject: [PATCH 0583/1789] New translations enums.mdx (French) --- website/src/pages/fr/subgraphs/cookbook/enums.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/fr/subgraphs/cookbook/enums.mdx b/website/src/pages/fr/subgraphs/cookbook/enums.mdx index 5784cb991330..a0a6b93d75b9 100644 --- a/website/src/pages/fr/subgraphs/cookbook/enums.mdx +++ b/website/src/pages/fr/subgraphs/cookbook/enums.mdx @@ -10,7 +10,7 @@ Les Enums, ou types d'énumération, sont un type de données spécifique qui vo ### Exemple d'Enums dans Votre Schéma -Si vous construisez un subgraph pour suivre l'historique de propriété des tokens sur une marketplace, chaque token peut passer par différentes propriétés, telles que`OriginalOwner`, `SecondOwner`, et `ThirdOwner`. En utilisant des enums, vous pouvez définir ces propriétés spécifiques, garantissant que seules des valeurs prédéfinies sont utilisées. +If you're building a Subgraph to track the ownership history of tokens on a marketplace, each token might go through different ownerships, such as `OriginalOwner`, `SecondOwner`, and `ThirdOwner`. By using enums, you can define these specific ownerships, ensuring only predefined values are assigned. Vous pouvez définir des enums dans votre schéma et, une fois définis, vous pouvez utiliser la représentation en chaîne de caractères des valeurs enum pour définir un champ enum sur une entité. @@ -65,7 +65,7 @@ Les Enums assurent la sécurité des types, minimisent les risques de fautes de > Note: Le guide suivant utilise le smart contract CryptoCoven NFT. -Pour définir des enums pour les différents marketplaces où les NFTs sont échangés, utilisez ce qui suit dans votre schéma de subgraph : +To define enums for the various marketplaces where NFTs are traded, use the following in your Subgraph schema: ```gql # Enum pour les Marketplaces avec lesquelles le contrat CryptoCoven a interagi (probablement une vente ou un mint) @@ -80,7 +80,7 @@ enum Marketplace { ## Utilisation des Enums pour les Marketplaces NFT -Une fois définis, les enums peuvent être utilisés tout au long de votre subgraph pour catégoriser les transactions ou les événements. +Once defined, enums can be used throughout your Subgraph to categorize transactions or events. Par exemple, lors de la journalisation des ventes de NFT, vous pouvez spécifier la marketplace impliqué dans la transaction en utilisant l'enum. From 17df88f8d11c449990ab6406c8f3898e25aa677c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:18:44 -0500 Subject: [PATCH 0584/1789] New translations enums.mdx (Spanish) --- website/src/pages/es/subgraphs/cookbook/enums.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/es/subgraphs/cookbook/enums.mdx b/website/src/pages/es/subgraphs/cookbook/enums.mdx index 29b5b2d0bf38..8a3da763d6e2 100644 --- a/website/src/pages/es/subgraphs/cookbook/enums.mdx +++ b/website/src/pages/es/subgraphs/cookbook/enums.mdx @@ -10,7 +10,7 @@ Enums, or enumeration types, are a specific data type that allows you to define ### Example of Enums in Your Schema -If you're building a subgraph to track the ownership history of tokens on a marketplace, each token might go through different ownerships, such as `OriginalOwner`, `SecondOwner`, and `ThirdOwner`. By using enums, you can define these specific ownerships, ensuring only predefined values are assigned. +If you're building a Subgraph to track the ownership history of tokens on a marketplace, each token might go through different ownerships, such as `OriginalOwner`, `SecondOwner`, and `ThirdOwner`. By using enums, you can define these specific ownerships, ensuring only predefined values are assigned. You can define enums in your schema, and once defined, you can use the string representation of the enum values to set an enum field on an entity. @@ -65,7 +65,7 @@ Enums provide type safety, minimize typo risks, and ensure consistent and reliab > Note: The following guide uses the CryptoCoven NFT smart contract. -To define enums for the various marketplaces where NFTs are traded, use the following in your subgraph schema: +To define enums for the various marketplaces where NFTs are traded, use the following in your Subgraph schema: ```gql # Enum for Marketplaces that the CryptoCoven contract interacted with(likely a Trade/Mint) @@ -80,7 +80,7 @@ enum Marketplace { ## Using Enums for NFT Marketplaces -Once defined, enums can be used throughout your subgraph to categorize transactions or events. +Once defined, enums can be used throughout your Subgraph to categorize transactions or events. For example, when logging NFT sales, you can specify the marketplace involved in the trade using the enum. From 21ac43f653868c75665f92ed380892dce343bafb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:18:46 -0500 Subject: [PATCH 0585/1789] New translations enums.mdx (Arabic) --- website/src/pages/ar/subgraphs/cookbook/enums.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/ar/subgraphs/cookbook/enums.mdx b/website/src/pages/ar/subgraphs/cookbook/enums.mdx index 9508aa864b6c..846faecc1706 100644 --- a/website/src/pages/ar/subgraphs/cookbook/enums.mdx +++ b/website/src/pages/ar/subgraphs/cookbook/enums.mdx @@ -10,7 +10,7 @@ Enums, or enumeration types, are a specific data type that allows you to define ### Example of Enums in Your Schema -If you're building a subgraph to track the ownership history of tokens on a marketplace, each token might go through different ownerships, such as `OriginalOwner`, `SecondOwner`, and `ThirdOwner`. By using enums, you can define these specific ownerships, ensuring only predefined values are assigned. +If you're building a Subgraph to track the ownership history of tokens on a marketplace, each token might go through different ownerships, such as `OriginalOwner`, `SecondOwner`, and `ThirdOwner`. By using enums, you can define these specific ownerships, ensuring only predefined values are assigned. You can define enums in your schema, and once defined, you can use the string representation of the enum values to set an enum field on an entity. @@ -65,7 +65,7 @@ Enums provide type safety, minimize typo risks, and ensure consistent and reliab > Note: The following guide uses the CryptoCoven NFT smart contract. -To define enums for the various marketplaces where NFTs are traded, use the following in your subgraph schema: +To define enums for the various marketplaces where NFTs are traded, use the following in your Subgraph schema: ```gql # Enum for Marketplaces that the CryptoCoven contract interacted with(likely a Trade/Mint) @@ -80,7 +80,7 @@ enum Marketplace { ## Using Enums for NFT Marketplaces -Once defined, enums can be used throughout your subgraph to categorize transactions or events. +Once defined, enums can be used throughout your Subgraph to categorize transactions or events. For example, when logging NFT sales, you can specify the marketplace involved in the trade using the enum. From ad9b3630397c70fb84e01458c060bf32adf683b3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:18:46 -0500 Subject: [PATCH 0586/1789] New translations enums.mdx (Czech) --- website/src/pages/cs/subgraphs/cookbook/enums.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/cs/subgraphs/cookbook/enums.mdx b/website/src/pages/cs/subgraphs/cookbook/enums.mdx index 71f3f784a0eb..7cc0e6c0ed78 100644 --- a/website/src/pages/cs/subgraphs/cookbook/enums.mdx +++ b/website/src/pages/cs/subgraphs/cookbook/enums.mdx @@ -10,7 +10,7 @@ Enums, or enumeration types, are a specific data type that allows you to define ### Example of Enums in Your Schema -If you're building a subgraph to track the ownership history of tokens on a marketplace, each token might go through different ownerships, such as `OriginalOwner`, `SecondOwner`, and `ThirdOwner`. By using enums, you can define these specific ownerships, ensuring only predefined values are assigned. +If you're building a Subgraph to track the ownership history of tokens on a marketplace, each token might go through different ownerships, such as `OriginalOwner`, `SecondOwner`, and `ThirdOwner`. By using enums, you can define these specific ownerships, ensuring only predefined values are assigned. You can define enums in your schema, and once defined, you can use the string representation of the enum values to set an enum field on an entity. @@ -65,7 +65,7 @@ Enums provide type safety, minimize typo risks, and ensure consistent and reliab > Note: The following guide uses the CryptoCoven NFT smart contract. -To define enums for the various marketplaces where NFTs are traded, use the following in your subgraph schema: +To define enums for the various marketplaces where NFTs are traded, use the following in your Subgraph schema: ```gql # Enum for Marketplaces that the CryptoCoven contract interacted with(likely a Trade/Mint) @@ -80,7 +80,7 @@ enum Marketplace { ## Using Enums for NFT Marketplaces -Once defined, enums can be used throughout your subgraph to categorize transactions or events. +Once defined, enums can be used throughout your Subgraph to categorize transactions or events. For example, when logging NFT sales, you can specify the marketplace involved in the trade using the enum. From a49773a042b08e6baa170d4a387fc2ba0fd2f9af Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:18:47 -0500 Subject: [PATCH 0587/1789] New translations enums.mdx (German) --- website/src/pages/de/subgraphs/cookbook/enums.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/de/subgraphs/cookbook/enums.mdx b/website/src/pages/de/subgraphs/cookbook/enums.mdx index 0b2fe58b4e34..911f6f54a340 100644 --- a/website/src/pages/de/subgraphs/cookbook/enums.mdx +++ b/website/src/pages/de/subgraphs/cookbook/enums.mdx @@ -10,7 +10,7 @@ Enums, or enumeration types, are a specific data type that allows you to define ### Example of Enums in Your Schema -If you're building a subgraph to track the ownership history of tokens on a marketplace, each token might go through different ownerships, such as `OriginalOwner`, `SecondOwner`, and `ThirdOwner`. By using enums, you can define these specific ownerships, ensuring only predefined values are assigned. +If you're building a Subgraph to track the ownership history of tokens on a marketplace, each token might go through different ownerships, such as `OriginalOwner`, `SecondOwner`, and `ThirdOwner`. By using enums, you can define these specific ownerships, ensuring only predefined values are assigned. You can define enums in your schema, and once defined, you can use the string representation of the enum values to set an enum field on an entity. @@ -65,7 +65,7 @@ Enums provide type safety, minimize typo risks, and ensure consistent and reliab > Note: The following guide uses the CryptoCoven NFT smart contract. -To define enums for the various marketplaces where NFTs are traded, use the following in your subgraph schema: +To define enums for the various marketplaces where NFTs are traded, use the following in your Subgraph schema: ```gql # Enum for Marketplaces that the CryptoCoven contract interacted with(likely a Trade/Mint) @@ -80,7 +80,7 @@ enum Marketplace { ## Using Enums for NFT Marketplaces -Once defined, enums can be used throughout your subgraph to categorize transactions or events. +Once defined, enums can be used throughout your Subgraph to categorize transactions or events. For example, when logging NFT sales, you can specify the marketplace involved in the trade using the enum. From 25968be182e78fe407933152e64f0c9bcc62c697 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:18:48 -0500 Subject: [PATCH 0588/1789] New translations enums.mdx (Italian) --- website/src/pages/it/subgraphs/cookbook/enums.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/it/subgraphs/cookbook/enums.mdx b/website/src/pages/it/subgraphs/cookbook/enums.mdx index a10970c1539f..9f55ae07c54b 100644 --- a/website/src/pages/it/subgraphs/cookbook/enums.mdx +++ b/website/src/pages/it/subgraphs/cookbook/enums.mdx @@ -10,7 +10,7 @@ Enums, or enumeration types, are a specific data type that allows you to define ### Example of Enums in Your Schema -If you're building a subgraph to track the ownership history of tokens on a marketplace, each token might go through different ownerships, such as `OriginalOwner`, `SecondOwner`, and `ThirdOwner`. By using enums, you can define these specific ownerships, ensuring only predefined values are assigned. +If you're building a Subgraph to track the ownership history of tokens on a marketplace, each token might go through different ownerships, such as `OriginalOwner`, `SecondOwner`, and `ThirdOwner`. By using enums, you can define these specific ownerships, ensuring only predefined values are assigned. You can define enums in your schema, and once defined, you can use the string representation of the enum values to set an enum field on an entity. @@ -65,7 +65,7 @@ Enums provide type safety, minimize typo risks, and ensure consistent and reliab > Note: The following guide uses the CryptoCoven NFT smart contract. -To define enums for the various marketplaces where NFTs are traded, use the following in your subgraph schema: +To define enums for the various marketplaces where NFTs are traded, use the following in your Subgraph schema: ```gql # Enum for Marketplaces that the CryptoCoven contract interacted with(likely a Trade/Mint) @@ -80,7 +80,7 @@ enum Marketplace { ## Using Enums for NFT Marketplaces -Once defined, enums can be used throughout your subgraph to categorize transactions or events. +Once defined, enums can be used throughout your Subgraph to categorize transactions or events. For example, when logging NFT sales, you can specify the marketplace involved in the trade using the enum. From be1cf3f8437f5b809ecb626381707faed4c75f97 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:18:49 -0500 Subject: [PATCH 0589/1789] New translations enums.mdx (Japanese) --- website/src/pages/ja/subgraphs/cookbook/enums.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/ja/subgraphs/cookbook/enums.mdx b/website/src/pages/ja/subgraphs/cookbook/enums.mdx index 8df21d2960f9..14c608584b8f 100644 --- a/website/src/pages/ja/subgraphs/cookbook/enums.mdx +++ b/website/src/pages/ja/subgraphs/cookbook/enums.mdx @@ -10,7 +10,7 @@ Enums, or enumeration types, are a specific data type that allows you to define ### Example of Enums in Your Schema -If you're building a subgraph to track the ownership history of tokens on a marketplace, each token might go through different ownerships, such as `OriginalOwner`, `SecondOwner`, and `ThirdOwner`. By using enums, you can define these specific ownerships, ensuring only predefined values are assigned. +If you're building a Subgraph to track the ownership history of tokens on a marketplace, each token might go through different ownerships, such as `OriginalOwner`, `SecondOwner`, and `ThirdOwner`. By using enums, you can define these specific ownerships, ensuring only predefined values are assigned. You can define enums in your schema, and once defined, you can use the string representation of the enum values to set an enum field on an entity. @@ -65,7 +65,7 @@ Enums provide type safety, minimize typo risks, and ensure consistent and reliab > Note: The following guide uses the CryptoCoven NFT smart contract. -To define enums for the various marketplaces where NFTs are traded, use the following in your subgraph schema: +To define enums for the various marketplaces where NFTs are traded, use the following in your Subgraph schema: ```gql # Enum for Marketplaces that the CryptoCoven contract interacted with(likely a Trade/Mint) @@ -80,7 +80,7 @@ enum Marketplace { ## Using Enums for NFT Marketplaces -Once defined, enums can be used throughout your subgraph to categorize transactions or events. +Once defined, enums can be used throughout your Subgraph to categorize transactions or events. For example, when logging NFT sales, you can specify the marketplace involved in the trade using the enum. From 824bcc778543ca0e6f29eabde5bfb1095db4fbb5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:18:50 -0500 Subject: [PATCH 0590/1789] New translations enums.mdx (Korean) --- website/src/pages/ko/subgraphs/cookbook/enums.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/ko/subgraphs/cookbook/enums.mdx b/website/src/pages/ko/subgraphs/cookbook/enums.mdx index a10970c1539f..9f55ae07c54b 100644 --- a/website/src/pages/ko/subgraphs/cookbook/enums.mdx +++ b/website/src/pages/ko/subgraphs/cookbook/enums.mdx @@ -10,7 +10,7 @@ Enums, or enumeration types, are a specific data type that allows you to define ### Example of Enums in Your Schema -If you're building a subgraph to track the ownership history of tokens on a marketplace, each token might go through different ownerships, such as `OriginalOwner`, `SecondOwner`, and `ThirdOwner`. By using enums, you can define these specific ownerships, ensuring only predefined values are assigned. +If you're building a Subgraph to track the ownership history of tokens on a marketplace, each token might go through different ownerships, such as `OriginalOwner`, `SecondOwner`, and `ThirdOwner`. By using enums, you can define these specific ownerships, ensuring only predefined values are assigned. You can define enums in your schema, and once defined, you can use the string representation of the enum values to set an enum field on an entity. @@ -65,7 +65,7 @@ Enums provide type safety, minimize typo risks, and ensure consistent and reliab > Note: The following guide uses the CryptoCoven NFT smart contract. -To define enums for the various marketplaces where NFTs are traded, use the following in your subgraph schema: +To define enums for the various marketplaces where NFTs are traded, use the following in your Subgraph schema: ```gql # Enum for Marketplaces that the CryptoCoven contract interacted with(likely a Trade/Mint) @@ -80,7 +80,7 @@ enum Marketplace { ## Using Enums for NFT Marketplaces -Once defined, enums can be used throughout your subgraph to categorize transactions or events. +Once defined, enums can be used throughout your Subgraph to categorize transactions or events. For example, when logging NFT sales, you can specify the marketplace involved in the trade using the enum. From c33808089856862d9ca253d580964a1c4736c5b8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:18:51 -0500 Subject: [PATCH 0591/1789] New translations enums.mdx (Dutch) --- website/src/pages/nl/subgraphs/cookbook/enums.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/nl/subgraphs/cookbook/enums.mdx b/website/src/pages/nl/subgraphs/cookbook/enums.mdx index a10970c1539f..9f55ae07c54b 100644 --- a/website/src/pages/nl/subgraphs/cookbook/enums.mdx +++ b/website/src/pages/nl/subgraphs/cookbook/enums.mdx @@ -10,7 +10,7 @@ Enums, or enumeration types, are a specific data type that allows you to define ### Example of Enums in Your Schema -If you're building a subgraph to track the ownership history of tokens on a marketplace, each token might go through different ownerships, such as `OriginalOwner`, `SecondOwner`, and `ThirdOwner`. By using enums, you can define these specific ownerships, ensuring only predefined values are assigned. +If you're building a Subgraph to track the ownership history of tokens on a marketplace, each token might go through different ownerships, such as `OriginalOwner`, `SecondOwner`, and `ThirdOwner`. By using enums, you can define these specific ownerships, ensuring only predefined values are assigned. You can define enums in your schema, and once defined, you can use the string representation of the enum values to set an enum field on an entity. @@ -65,7 +65,7 @@ Enums provide type safety, minimize typo risks, and ensure consistent and reliab > Note: The following guide uses the CryptoCoven NFT smart contract. -To define enums for the various marketplaces where NFTs are traded, use the following in your subgraph schema: +To define enums for the various marketplaces where NFTs are traded, use the following in your Subgraph schema: ```gql # Enum for Marketplaces that the CryptoCoven contract interacted with(likely a Trade/Mint) @@ -80,7 +80,7 @@ enum Marketplace { ## Using Enums for NFT Marketplaces -Once defined, enums can be used throughout your subgraph to categorize transactions or events. +Once defined, enums can be used throughout your Subgraph to categorize transactions or events. For example, when logging NFT sales, you can specify the marketplace involved in the trade using the enum. From 306e73ddb0af07b568670f827b3ea41a81363c8b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:18:52 -0500 Subject: [PATCH 0592/1789] New translations enums.mdx (Polish) --- website/src/pages/pl/subgraphs/cookbook/enums.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/pl/subgraphs/cookbook/enums.mdx b/website/src/pages/pl/subgraphs/cookbook/enums.mdx index a10970c1539f..9f55ae07c54b 100644 --- a/website/src/pages/pl/subgraphs/cookbook/enums.mdx +++ b/website/src/pages/pl/subgraphs/cookbook/enums.mdx @@ -10,7 +10,7 @@ Enums, or enumeration types, are a specific data type that allows you to define ### Example of Enums in Your Schema -If you're building a subgraph to track the ownership history of tokens on a marketplace, each token might go through different ownerships, such as `OriginalOwner`, `SecondOwner`, and `ThirdOwner`. By using enums, you can define these specific ownerships, ensuring only predefined values are assigned. +If you're building a Subgraph to track the ownership history of tokens on a marketplace, each token might go through different ownerships, such as `OriginalOwner`, `SecondOwner`, and `ThirdOwner`. By using enums, you can define these specific ownerships, ensuring only predefined values are assigned. You can define enums in your schema, and once defined, you can use the string representation of the enum values to set an enum field on an entity. @@ -65,7 +65,7 @@ Enums provide type safety, minimize typo risks, and ensure consistent and reliab > Note: The following guide uses the CryptoCoven NFT smart contract. -To define enums for the various marketplaces where NFTs are traded, use the following in your subgraph schema: +To define enums for the various marketplaces where NFTs are traded, use the following in your Subgraph schema: ```gql # Enum for Marketplaces that the CryptoCoven contract interacted with(likely a Trade/Mint) @@ -80,7 +80,7 @@ enum Marketplace { ## Using Enums for NFT Marketplaces -Once defined, enums can be used throughout your subgraph to categorize transactions or events. +Once defined, enums can be used throughout your Subgraph to categorize transactions or events. For example, when logging NFT sales, you can specify the marketplace involved in the trade using the enum. From 2fd2a9647d8b51c3713eec43386f836d83bf2285 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:18:53 -0500 Subject: [PATCH 0593/1789] New translations enums.mdx (Portuguese) --- website/src/pages/pt/subgraphs/cookbook/enums.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/pt/subgraphs/cookbook/enums.mdx b/website/src/pages/pt/subgraphs/cookbook/enums.mdx index d76ea4c23c4b..d59761dcb9a7 100644 --- a/website/src/pages/pt/subgraphs/cookbook/enums.mdx +++ b/website/src/pages/pt/subgraphs/cookbook/enums.mdx @@ -10,7 +10,7 @@ Enums, ou tipos de enumeração, são um tipo de dados específico que permite d ### Exemplo de Enums no seu Schema -Se estiver a construir um subgraph para rastrear o histórico de posse de tokens em um marketplace, cada token pode passar por posses diferentes, como `OriginalOwner`, `SecondOwner`, e `ThirdOwner`. Ao usar enums, é possível definir essas posses específicas, assim garantindo que só são nomeados valores predefinidos. +If you're building a Subgraph to track the ownership history of tokens on a marketplace, each token might go through different ownerships, such as `OriginalOwner`, `SecondOwner`, and `ThirdOwner`. By using enums, you can define these specific ownerships, ensuring only predefined values are assigned. É possível definir enums no seu schema; assim definidos, a representação de string dos valores de enum podem ser usados para configurar um campo de enum numa entidade. @@ -65,7 +65,7 @@ Enums provém segurança de dados, minimizam os riscos de erros de digitação, > Nota: o guia a seguir usa o contrato inteligente de NFTs CryptoCoven. -Para definir enums para os vários marketplaces com apoio a troca de NFTs, use o seguinte no seu schema de subgraph: +To define enums for the various marketplaces where NFTs are traded, use the following in your Subgraph schema: ```gql # Enum para Marketplaces com que o contrato CryptoCoven interagiu(provavelmente Troca/Mint) @@ -80,7 +80,7 @@ enum Marketplace { ## Como Usar Enums para Marketplaces de NFT -Quando definidos, enums podem ser usados no seu subgraph para categorizar transações ou eventos. +Once defined, enums can be used throughout your Subgraph to categorize transactions or events. Por exemplo: ao registrar vendas de NFT, é possível usar o enum para especificar o marketplace envolvido na ação. From 1b22834f44399e8aa5e1b02a0965a92bb8696a39 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:18:54 -0500 Subject: [PATCH 0594/1789] New translations enums.mdx (Russian) --- website/src/pages/ru/subgraphs/cookbook/enums.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/ru/subgraphs/cookbook/enums.mdx b/website/src/pages/ru/subgraphs/cookbook/enums.mdx index 204b35851fc3..65f7091b08ae 100644 --- a/website/src/pages/ru/subgraphs/cookbook/enums.mdx +++ b/website/src/pages/ru/subgraphs/cookbook/enums.mdx @@ -10,7 +10,7 @@ title: Категоризация маркетплейсов NFT с исполь ### Пример использования Enums (перечислений) в Вашей схеме -Если вы создаете субграф для отслеживания истории владения токенами на рынке, каждый токен может переходить через разные стадии владения, такие как `OriginalOwner` (Первоначальный Владелец), `SecondOwner` (Второй Владелец) и `ThirdOwner` (Третий Владелец). Используя перечисления (enums), Вы можете определить эти конкретные стадии владения, обеспечивая присвоение только заранее определенных значений. +If you're building a Subgraph to track the ownership history of tokens on a marketplace, each token might go through different ownerships, such as `OriginalOwner`, `SecondOwner`, and `ThirdOwner`. By using enums, you can define these specific ownerships, ensuring only predefined values are assigned. Вы можете определить перечисления (enums) в своей схеме, и после их определения Вы можете использовать строковое представление значений перечислений для установки значения поля перечисления в объекты. @@ -65,14 +65,14 @@ type Token @entity { > Примечание: Следующее руководство использует смарт-контракт NFT CryptoCoven. -Чтобы определить перечисления для различных маркетплейсов, на которых торгуются NFT, используйте следующее в своей схеме субграфа: +To define enums for the various marketplaces where NFTs are traded, use the following in your Subgraph schema: ```gql # Перечисление для маркетплейсов, с которыми взаимодействовал смарт-контракт CryptoCoven (вероятно, торговля или минт) enum Marketplace { OpenSeaV1 # Представляет случай, когда NFT CryptoCoven торгуется на маркетплейсе OpenSeaV1 OpenSeaV2 # Представляет случай, когда NFT CryptoCoven торгуется на маркетплейсе OpenSeaV2 - SeaPort # Представляет случай, когда NFT CryptoCoven торгуется на маркетплейсе SeaPort + SeaPort # Представляет случай, когда NFT CryptoCoven торгуется на маркетплейсе SeaPort LooksRare # Представляет случай, когда NFT CryptoCoven торгуется на маркетплейсе LooksRare # ...и другие рынки } @@ -80,7 +80,7 @@ enum Marketplace { ## Использование перечислений (Enums) для Маркетплейсов NFT -После определения перечисления (enums) могут использоваться в Вашем субграфе для категоризации транзакций или событий. +Once defined, enums can be used throughout your Subgraph to categorize transactions or events. Например, при регистрации продаж NFT можно указать маркетплейс, на котором произошла сделка, используя перечисление. From faa6915d9a473d9e1b6cf01cdc3457e4abe82581 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:18:55 -0500 Subject: [PATCH 0595/1789] New translations enums.mdx (Swedish) --- website/src/pages/sv/subgraphs/cookbook/enums.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/sv/subgraphs/cookbook/enums.mdx b/website/src/pages/sv/subgraphs/cookbook/enums.mdx index 2fc0efcc5831..3b90caab564e 100644 --- a/website/src/pages/sv/subgraphs/cookbook/enums.mdx +++ b/website/src/pages/sv/subgraphs/cookbook/enums.mdx @@ -10,7 +10,7 @@ Enums, or enumeration types, are a specific data type that allows you to define ### Example of Enums in Your Schema -If you're building a subgraph to track the ownership history of tokens on a marketplace, each token might go through different ownerships, such as `OriginalOwner`, `SecondOwner`, and `ThirdOwner`. By using enums, you can define these specific ownerships, ensuring only predefined values are assigned. +If you're building a Subgraph to track the ownership history of tokens on a marketplace, each token might go through different ownerships, such as `OriginalOwner`, `SecondOwner`, and `ThirdOwner`. By using enums, you can define these specific ownerships, ensuring only predefined values are assigned. You can define enums in your schema, and once defined, you can use the string representation of the enum values to set an enum field on an entity. @@ -65,7 +65,7 @@ Enums provide type safety, minimize typo risks, and ensure consistent and reliab > Note: The following guide uses the CryptoCoven NFT smart contract. -To define enums for the various marketplaces where NFTs are traded, use the following in your subgraph schema: +To define enums for the various marketplaces where NFTs are traded, use the following in your Subgraph schema: ```gql # Enum for Marketplaces that the CryptoCoven contract interacted with(likely a Trade/Mint) @@ -80,7 +80,7 @@ enum Marketplace { ## Using Enums for NFT Marketplaces -Once defined, enums can be used throughout your subgraph to categorize transactions or events. +Once defined, enums can be used throughout your Subgraph to categorize transactions or events. For example, when logging NFT sales, you can specify the marketplace involved in the trade using the enum. From ab26de0570b8a9ead147e90246770c9c3bdbe4bf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:18:56 -0500 Subject: [PATCH 0596/1789] New translations enums.mdx (Turkish) --- website/src/pages/tr/subgraphs/cookbook/enums.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/tr/subgraphs/cookbook/enums.mdx b/website/src/pages/tr/subgraphs/cookbook/enums.mdx index 1aaf953bc2e4..8b5ab54177af 100644 --- a/website/src/pages/tr/subgraphs/cookbook/enums.mdx +++ b/website/src/pages/tr/subgraphs/cookbook/enums.mdx @@ -10,7 +10,7 @@ Enum'lar veya numaralandırma türleri, bir dizi izin verilen değeri tanımlama ### Şemanızda Enum Örnekleri -Bir pazar yerinde token sahiplik geçmişini izlemek için bir subgraph oluşturuyorsanız, her token `OriginalOwner`, `SecondOwner` ve `ThirdOwner` gibi farklı sahipliklerden geçebilir. Enum'ları kullanarak, bu belirli sahiplikleri tanımlayabilir ve yalnızca önceden tanımlanmış değerlerin atanmasını sağlayabilirsiniz. +If you're building a Subgraph to track the ownership history of tokens on a marketplace, each token might go through different ownerships, such as `OriginalOwner`, `SecondOwner`, and `ThirdOwner`. By using enums, you can define these specific ownerships, ensuring only predefined values are assigned. Şemanızda enum tanımlayabilir ve bir kez tanımlandığında, bir varlık üzerinde bir enum alanı ayarlamak için enum değerlerinin dizi (string) gösterimini kullanabilirsiniz. @@ -65,7 +65,7 @@ Enumlar; tür güvenliği sağlar, yazım hatası riskini en aza indirir ve tuta > Not: Aşağıdaki kılavuz CryptoCoven NFT akıllı sözleşmesini kullanmaktadır. -NFT'lerin ticaretinin yapıldığı çeşitli pazar yerleri için enum tanımlamak için subgraph şemanızda aşağıdakini kullanın: +To define enums for the various marketplaces where NFTs are traded, use the following in your Subgraph schema: ```gql # CryptoCoven sözleşmesinin etkileşimde bulunduğu pazar yerleri için Enum (muhtemel bir Takas/Basım) @@ -80,7 +80,7 @@ enum Marketplace { ## NFT Pazar Yerleri için Enum Kullanımı -Tanımlandıktan sonra, enum'lar işlemleri veya olayları kategorize etmek için subgraph'inizde kullanılabilir. +Once defined, enums can be used throughout your Subgraph to categorize transactions or events. Örneğin, NFT satışlarını kaydederken takasta yer alan pazar yerini enum kullanarak belirleyebilirsiniz. @@ -92,7 +92,7 @@ Enum'dan pazar yeri adını bir dize olarak almak için bir fonksiyonu şöyle u export function getMarketplaceName(marketplace: Marketplace): string { // Enum değerini bir dizeye eşlemek için if-else ifadelerini kullanma if (marketplace === Marketplace.OpenSeaV1) { - return 'OpenSeaV1' // I Eğer pazar yeri OpenSea ise, onun dize temsilini döndür + return 'OpenSeaV1' // I Eğer pazar yeri OpenSea ise, onun dize temsilini döndür } else if (marketplace === Marketplace.OpenSeaV2) { return 'OpenSeaV2' } else if (marketplace === Marketplace.SeaPort) { From 44b34c483920950ba6a7b89de1f68dd936095a50 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:18:57 -0500 Subject: [PATCH 0597/1789] New translations enums.mdx (Ukrainian) --- website/src/pages/uk/subgraphs/cookbook/enums.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/uk/subgraphs/cookbook/enums.mdx b/website/src/pages/uk/subgraphs/cookbook/enums.mdx index 4fa07dc05765..195d3bb7ee84 100644 --- a/website/src/pages/uk/subgraphs/cookbook/enums.mdx +++ b/website/src/pages/uk/subgraphs/cookbook/enums.mdx @@ -10,7 +10,7 @@ Enums, or enumeration types, are a specific data type that allows you to define ### Example of Enums in Your Schema -If you're building a subgraph to track the ownership history of tokens on a marketplace, each token might go through different ownerships, such as `OriginalOwner`, `SecondOwner`, and `ThirdOwner`. By using enums, you can define these specific ownerships, ensuring only predefined values are assigned. +If you're building a Subgraph to track the ownership history of tokens on a marketplace, each token might go through different ownerships, such as `OriginalOwner`, `SecondOwner`, and `ThirdOwner`. By using enums, you can define these specific ownerships, ensuring only predefined values are assigned. You can define enums in your schema, and once defined, you can use the string representation of the enum values to set an enum field on an entity. @@ -65,7 +65,7 @@ Enums provide type safety, minimize typo risks, and ensure consistent and reliab > Note: The following guide uses the CryptoCoven NFT smart contract. -To define enums for the various marketplaces where NFTs are traded, use the following in your subgraph schema: +To define enums for the various marketplaces where NFTs are traded, use the following in your Subgraph schema: ```gql # Enum for Marketplaces that the CryptoCoven contract interacted with(likely a Trade/Mint) @@ -80,7 +80,7 @@ enum Marketplace { ## Using Enums for NFT Marketplaces -Once defined, enums can be used throughout your subgraph to categorize transactions or events. +Once defined, enums can be used throughout your Subgraph to categorize transactions or events. For example, when logging NFT sales, you can specify the marketplace involved in the trade using the enum. From 13d7db0d7f8e832043b7297361ba61d468ebc7e6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:18:58 -0500 Subject: [PATCH 0598/1789] New translations enums.mdx (Chinese Simplified) --- website/src/pages/zh/subgraphs/cookbook/enums.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/zh/subgraphs/cookbook/enums.mdx b/website/src/pages/zh/subgraphs/cookbook/enums.mdx index 43d1ad4cc0c2..9f55ae07c54b 100644 --- a/website/src/pages/zh/subgraphs/cookbook/enums.mdx +++ b/website/src/pages/zh/subgraphs/cookbook/enums.mdx @@ -10,7 +10,7 @@ Enums, or enumeration types, are a specific data type that allows you to define ### Example of Enums in Your Schema -If you're building a subgraph to track the ownership history of tokens on a marketplace, each token might go through different ownerships, such as `OriginalOwner`, `SecondOwner`, and `ThirdOwner`. By using enums, you can define these specific ownerships, ensuring only predefined values are assigned. +If you're building a Subgraph to track the ownership history of tokens on a marketplace, each token might go through different ownerships, such as `OriginalOwner`, `SecondOwner`, and `ThirdOwner`. By using enums, you can define these specific ownerships, ensuring only predefined values are assigned. You can define enums in your schema, and once defined, you can use the string representation of the enum values to set an enum field on an entity. @@ -65,7 +65,7 @@ Enums provide type safety, minimize typo risks, and ensure consistent and reliab > Note: The following guide uses the CryptoCoven NFT smart contract. -To define enums for the various marketplaces where NFTs are traded, use the following in your subgraph schema: +To define enums for the various marketplaces where NFTs are traded, use the following in your Subgraph schema: ```gql # Enum for Marketplaces that the CryptoCoven contract interacted with(likely a Trade/Mint) @@ -80,7 +80,7 @@ enum Marketplace { ## Using Enums for NFT Marketplaces -Once defined, enums can be used throughout your subgraph to categorize transactions or events. +Once defined, enums can be used throughout your Subgraph to categorize transactions or events. For example, when logging NFT sales, you can specify the marketplace involved in the trade using the enum. @@ -269,6 +269,6 @@ Expected output includes the marketplaces that meet the criteria, each represent } ``` -## 其他资源 +## Additional Resources For additional information, check out this guide's [repo](https://github.com/chidubemokeke/Subgraph-Tutorial-Enums). From 4fbb34ce62067e02b6d0a5c16b895a286a2df203 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:18:59 -0500 Subject: [PATCH 0599/1789] New translations enums.mdx (Urdu (Pakistan)) --- website/src/pages/ur/subgraphs/cookbook/enums.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/ur/subgraphs/cookbook/enums.mdx b/website/src/pages/ur/subgraphs/cookbook/enums.mdx index 8991c45dd81a..97a0c22fd89e 100644 --- a/website/src/pages/ur/subgraphs/cookbook/enums.mdx +++ b/website/src/pages/ur/subgraphs/cookbook/enums.mdx @@ -10,7 +10,7 @@ Enums, or enumeration types, are a specific data type that allows you to define ### Example of Enums in Your Schema -If you're building a subgraph to track the ownership history of tokens on a marketplace, each token might go through different ownerships, such as `OriginalOwner`, `SecondOwner`, and `ThirdOwner`. By using enums, you can define these specific ownerships, ensuring only predefined values are assigned. +If you're building a Subgraph to track the ownership history of tokens on a marketplace, each token might go through different ownerships, such as `OriginalOwner`, `SecondOwner`, and `ThirdOwner`. By using enums, you can define these specific ownerships, ensuring only predefined values are assigned. You can define enums in your schema, and once defined, you can use the string representation of the enum values to set an enum field on an entity. @@ -65,7 +65,7 @@ Enums provide type safety, minimize typo risks, and ensure consistent and reliab > Note: The following guide uses the CryptoCoven NFT smart contract. -To define enums for the various marketplaces where NFTs are traded, use the following in your subgraph schema: +To define enums for the various marketplaces where NFTs are traded, use the following in your Subgraph schema: ```gql # Enum for Marketplaces that the CryptoCoven contract interacted with(likely a Trade/Mint) @@ -80,7 +80,7 @@ enum Marketplace { ## Using Enums for NFT Marketplaces -Once defined, enums can be used throughout your subgraph to categorize transactions or events. +Once defined, enums can be used throughout your Subgraph to categorize transactions or events. For example, when logging NFT sales, you can specify the marketplace involved in the trade using the enum. From c57db7893ea1f95f484884c72cfdc5d98915241b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:19:00 -0500 Subject: [PATCH 0600/1789] New translations enums.mdx (Vietnamese) --- website/src/pages/vi/subgraphs/cookbook/enums.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/vi/subgraphs/cookbook/enums.mdx b/website/src/pages/vi/subgraphs/cookbook/enums.mdx index a10970c1539f..9f55ae07c54b 100644 --- a/website/src/pages/vi/subgraphs/cookbook/enums.mdx +++ b/website/src/pages/vi/subgraphs/cookbook/enums.mdx @@ -10,7 +10,7 @@ Enums, or enumeration types, are a specific data type that allows you to define ### Example of Enums in Your Schema -If you're building a subgraph to track the ownership history of tokens on a marketplace, each token might go through different ownerships, such as `OriginalOwner`, `SecondOwner`, and `ThirdOwner`. By using enums, you can define these specific ownerships, ensuring only predefined values are assigned. +If you're building a Subgraph to track the ownership history of tokens on a marketplace, each token might go through different ownerships, such as `OriginalOwner`, `SecondOwner`, and `ThirdOwner`. By using enums, you can define these specific ownerships, ensuring only predefined values are assigned. You can define enums in your schema, and once defined, you can use the string representation of the enum values to set an enum field on an entity. @@ -65,7 +65,7 @@ Enums provide type safety, minimize typo risks, and ensure consistent and reliab > Note: The following guide uses the CryptoCoven NFT smart contract. -To define enums for the various marketplaces where NFTs are traded, use the following in your subgraph schema: +To define enums for the various marketplaces where NFTs are traded, use the following in your Subgraph schema: ```gql # Enum for Marketplaces that the CryptoCoven contract interacted with(likely a Trade/Mint) @@ -80,7 +80,7 @@ enum Marketplace { ## Using Enums for NFT Marketplaces -Once defined, enums can be used throughout your subgraph to categorize transactions or events. +Once defined, enums can be used throughout your Subgraph to categorize transactions or events. For example, when logging NFT sales, you can specify the marketplace involved in the trade using the enum. From 9e730c628bc5f50697549b7fca2121413e468c7e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:19:01 -0500 Subject: [PATCH 0601/1789] New translations enums.mdx (Marathi) --- website/src/pages/mr/subgraphs/cookbook/enums.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/mr/subgraphs/cookbook/enums.mdx b/website/src/pages/mr/subgraphs/cookbook/enums.mdx index 081add904f9a..c2f2a41791f3 100644 --- a/website/src/pages/mr/subgraphs/cookbook/enums.mdx +++ b/website/src/pages/mr/subgraphs/cookbook/enums.mdx @@ -10,7 +10,7 @@ Enums, or enumeration types, are a specific data type that allows you to define ### Example of Enums in Your Schema -If you're building a subgraph to track the ownership history of tokens on a marketplace, each token might go through different ownerships, such as `OriginalOwner`, `SecondOwner`, and `ThirdOwner`. By using enums, you can define these specific ownerships, ensuring only predefined values are assigned. +If you're building a Subgraph to track the ownership history of tokens on a marketplace, each token might go through different ownerships, such as `OriginalOwner`, `SecondOwner`, and `ThirdOwner`. By using enums, you can define these specific ownerships, ensuring only predefined values are assigned. You can define enums in your schema, and once defined, you can use the string representation of the enum values to set an enum field on an entity. @@ -65,7 +65,7 @@ Enums provide type safety, minimize typo risks, and ensure consistent and reliab > Note: The following guide uses the CryptoCoven NFT smart contract. -To define enums for the various marketplaces where NFTs are traded, use the following in your subgraph schema: +To define enums for the various marketplaces where NFTs are traded, use the following in your Subgraph schema: ```gql # Enum for Marketplaces that the CryptoCoven contract interacted with(likely a Trade/Mint) @@ -80,7 +80,7 @@ enum Marketplace { ## Using Enums for NFT Marketplaces -Once defined, enums can be used throughout your subgraph to categorize transactions or events. +Once defined, enums can be used throughout your Subgraph to categorize transactions or events. For example, when logging NFT sales, you can specify the marketplace involved in the trade using the enum. From bb41524b799be7b794cee6f6ba4a95e5cd17c52b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:19:02 -0500 Subject: [PATCH 0602/1789] New translations enums.mdx (Hindi) --- website/src/pages/hi/subgraphs/cookbook/enums.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/hi/subgraphs/cookbook/enums.mdx b/website/src/pages/hi/subgraphs/cookbook/enums.mdx index 3c588eace670..5721d23638de 100644 --- a/website/src/pages/hi/subgraphs/cookbook/enums.mdx +++ b/website/src/pages/hi/subgraphs/cookbook/enums.mdx @@ -10,7 +10,7 @@ Enums, या enumeration types, एक विशिष्ट डेटा प ### अपने Schema में Enums का उदाहरण -यदि आप एक subgraph बना रहे हैं जो एक मार्केटप्लेस पर टोकन के स्वामित्व इतिहास को ट्रैक करता है, तो प्रत्येक टोकन विभिन्न स्वामित्वों से गुजर सकता है, जैसे कि OriginalOwner, SecondOwner, और ThirdOwner। enums का उपयोग करके, आप इन विशिष्ट स्वामित्वों को परिभाषित कर सकते हैं, यह सुनिश्चित करते हुए कि केवल पूर्वनिर्धारित मान ही सौंपे जाएं। +If you're building a Subgraph to track the ownership history of tokens on a marketplace, each token might go through different ownerships, such as `OriginalOwner`, `SecondOwner`, and `ThirdOwner`. By using enums, you can define these specific ownerships, ensuring only predefined values are assigned. आप अपनी स्कीमा में एन्सम्स (enums) को परिभाषित कर सकते हैं, और एक बार परिभाषित हो जाने के बाद, आप एन्सम के मानों की स्ट्रिंग प्रस्तुति का उपयोग करके एक एन्सम फ़ील्ड को एक entities पर सेट कर सकते हैं। @@ -65,10 +65,10 @@ Enums प्रकार सुरक्षा प्रदान करते > नोट: निम्नलिखित guide CryptoCoven NFT स्मार्ट कॉन्ट्रैक्ट का उपयोग करती है। -NFTs जहां ट्रेड होते हैं, उन विभिन्न मार्केटप्लेस के लिए enums को परिभाषित करने के लिए, अपने Subgraph स्कीमा में निम्नलिखित का उपयोग करें: +To define enums for the various marketplaces where NFTs are traded, use the following in your Subgraph schema: ```gql -#मार्केटप्लेस के लिए Enum जो CryptoCoven कॉन्ट्रैक्ट के साथ इंटरएक्टेड हैं (संभवत: ट्रेड/मिंट) +#मार्केटप्लेस के लिए Enum जो CryptoCoven कॉन्ट्रैक्ट के साथ इंटरएक्टेड हैं (संभवत: ट्रेड/मिंट) enum Marketplace { OpenSeaV1 # जब CryptoCoven NFT को इस बाजार में व्यापार किया जाता है OpenSeaV2 # जब CryptoCoven NFT को OpenSeaV2 बाजार में व्यापार किया जाता है @@ -80,7 +80,7 @@ enum Marketplace { ## NFT Marketplaces के लिए Enums का उपयोग -एक बार परिभाषित होने पर, enums का उपयोग आपके subgraph में transactions या events को श्रेणीबद्ध करने के लिए किया जा सकता है। +Once defined, enums can be used throughout your Subgraph to categorize transactions or events. उदाहरण के लिए, जब logging NFT बिक्री लॉग करते हैं, तो आप ट्रेड में शामिल मार्केटप्लेस को enum का उपयोग करके निर्दिष्ट कर सकते हैं। From ff1f455892ea2a7b4c322b624c73e81c785c2091 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:19:03 -0500 Subject: [PATCH 0603/1789] New translations enums.mdx (Swahili) --- .../src/pages/sw/subgraphs/cookbook/enums.mdx | 274 ++++++++++++++++++ 1 file changed, 274 insertions(+) create mode 100644 website/src/pages/sw/subgraphs/cookbook/enums.mdx diff --git a/website/src/pages/sw/subgraphs/cookbook/enums.mdx b/website/src/pages/sw/subgraphs/cookbook/enums.mdx new file mode 100644 index 000000000000..9f55ae07c54b --- /dev/null +++ b/website/src/pages/sw/subgraphs/cookbook/enums.mdx @@ -0,0 +1,274 @@ +--- +title: Categorize NFT Marketplaces Using Enums +--- + +Use Enums to make your code cleaner and less error-prone. Here's a full example of using Enums on NFT marketplaces. + +## What are Enums? + +Enums, or enumeration types, are a specific data type that allows you to define a set of specific, allowed values. + +### Example of Enums in Your Schema + +If you're building a Subgraph to track the ownership history of tokens on a marketplace, each token might go through different ownerships, such as `OriginalOwner`, `SecondOwner`, and `ThirdOwner`. By using enums, you can define these specific ownerships, ensuring only predefined values are assigned. + +You can define enums in your schema, and once defined, you can use the string representation of the enum values to set an enum field on an entity. + +Here's what an enum definition might look like in your schema, based on the example above: + +```graphql +enum TokenStatus { + OriginalOwner + SecondOwner + ThirdOwner +} +``` + +This means that when you use the `TokenStatus` type in your schema, you expect it to be exactly one of predefined values: `OriginalOwner`, `SecondOwner`, or `ThirdOwner`, ensuring consistency and validity. + +To learn more about enums, check out [Creating a Subgraph](/developing/creating-a-subgraph/#enums) and [GraphQL documentation](https://graphql.org/learn/schema/#enumeration-types). + +## Benefits of Using Enums + +- **Clarity:** Enums provide meaningful names for values, making data easier to understand. +- **Validation:** Enums enforce strict value definitions, preventing invalid data entries. +- **Maintainability:** When you need to change or add new categories, enums allow you to do this in a focused manner. + +### Without Enums + +If you choose to define the type as a string instead of using an Enum, your code might look like this: + +```graphql +type Token @entity { + id: ID! + tokenId: BigInt! + owner: Bytes! # Owner of the token + tokenStatus: String! # String field to track token status + timestamp: BigInt! +} +``` + +In this schema, `TokenStatus` is a simple string with no specific, allowed values. + +#### Why is this a problem? + +- There's no restriction of `TokenStatus` values, so any string can be accidentally assigned. This makes it hard to ensure that only valid statuses like `OriginalOwner`, `SecondOwner`, or `ThirdOwner` are set. +- It's easy to make typos such as `Orgnalowner` instead of `OriginalOwner`, making the data and potential queries unreliable. + +### With Enums + +Instead of assigning free-form strings, you can define an enum for `TokenStatus` with specific values: `OriginalOwner`, `SecondOwner`, or `ThirdOwner`. Using an enum ensures only allowed values are used. + +Enums provide type safety, minimize typo risks, and ensure consistent and reliable results. + +## Defining Enums for NFT Marketplaces + +> Note: The following guide uses the CryptoCoven NFT smart contract. + +To define enums for the various marketplaces where NFTs are traded, use the following in your Subgraph schema: + +```gql +# Enum for Marketplaces that the CryptoCoven contract interacted with(likely a Trade/Mint) +enum Marketplace { + OpenSeaV1 # Represents when a CryptoCoven NFT is traded on the marketplace + OpenSeaV2 # Represents when a CryptoCoven NFT is traded on the OpenSeaV2 marketplace + SeaPort # Represents when a CryptoCoven NFT is traded on the SeaPort marketplace + LooksRare # Represents when a CryptoCoven NFT is traded on the LookRare marketplace + # ...and other marketplaces +} +``` + +## Using Enums for NFT Marketplaces + +Once defined, enums can be used throughout your Subgraph to categorize transactions or events. + +For example, when logging NFT sales, you can specify the marketplace involved in the trade using the enum. + +### Implementing a Function for NFT Marketplaces + +Here's how you can implement a function to retrieve the marketplace name from the enum as a string: + +```ts +export function getMarketplaceName(marketplace: Marketplace): string { + // Using if-else statements to map the enum value to a string + if (marketplace === Marketplace.OpenSeaV1) { + return 'OpenSeaV1' // If the marketplace is OpenSea, return its string representation + } else if (marketplace === Marketplace.OpenSeaV2) { + return 'OpenSeaV2' + } else if (marketplace === Marketplace.SeaPort) { + return 'SeaPort' // If the marketplace is SeaPort, return its string representation + } else if (marketplace === Marketplace.LooksRare) { + return 'LooksRare' // If the marketplace is LooksRare, return its string representation + // ... and other market places + } +} +``` + +## Best Practices for Using Enums + +- **Consistent Naming:** Use clear, descriptive names for enum values to improve readability. +- **Centralized Management:** Keep enums in a single file for consistency. This makes enums easier to update and ensures they are the single source of truth. +- **Documentation:** Add comments to enum to clarify their purpose and usage. + +## Using Enums in Queries + +Enums in queries help you improve data quality and make your results easier to interpret. They function as filters and response elements, ensuring consistency and reducing errors in marketplace values. + +**Specifics** + +- **Filtering with Enums:** Enums provide clear filters, allowing you to confidently include or exclude specific marketplaces. +- **Enums in Responses:** Enums guarantee that only recognized marketplace names are returned, making the results standardized and accurate. + +### Sample Queries + +#### Query 1: Account With The Highest NFT Marketplace Interactions + +This query does the following: + +- It finds the account with the highest unique NFT marketplace interactions, which is great for analyzing cross-marketplace activity. +- The marketplaces field uses the marketplace enum, ensuring consistent and validated marketplace values in the response. + +```gql +{ + accounts(first: 1, orderBy: uniqueMarketplacesCount, orderDirection: desc) { + id + sendCount + receiveCount + totalSpent + uniqueMarketplacesCount + marketplaces { + marketplace # This field returns the enum value representing the marketplace + } + } +} +``` + +#### Returns + +This response provides account details and a list of unique marketplace interactions with enum values for standardized clarity: + +```gql +{ + "data": { + "accounts": [ + { + "id": "0xb3abc96cb9a61576c03c955d75b703a890a14aa0", + "sendCount": "44", + "receiveCount": "44", + "totalSpent": "1197500000000000000", + "uniqueMarketplacesCount": "7", + "marketplaces": [ + { + "marketplace": "OpenSeaV1" + }, + { + "marketplace": "OpenSeaV2" + }, + { + "marketplace": "GenieSwap" + }, + { + "marketplace": "CryptoCoven" + }, + { + "marketplace": "Unknown" + }, + { + "marketplace": "LooksRare" + }, + { + "marketplace": "NFTX" + } + ] + } + ] + } +} +``` + +#### Query 2: Most Active Marketplace for CryptoCoven transactions + +This query does the following: + +- It identifies the marketplace with the highest volume of CryptoCoven transactions. +- It uses the marketplace enum to ensure that only valid marketplace types appear in the response, adding reliability and consistency to your data. + +```gql +{ + marketplaceInteractions(first: 1, orderBy: transactionCount, orderDirection: desc) { + marketplace + transactionCount + } +} +``` + +#### Result 2 + +The expected response includes the marketplace and the corresponding transaction count, using the enum to indicate the marketplace type: + +```gql +{ + "data": { + "marketplaceInteractions": [ + { + "marketplace": "Unknown", + "transactionCount": "222" + } + ] + } +} +``` + +#### Query 3: Marketplace Interactions with High Transaction Counts + +This query does the following: + +- It retrieves the top four marketplaces with over 100 transactions, excluding "Unknown" marketplaces. +- It uses enums as filters to ensure that only valid marketplace types are included, increasing accuracy. + +```gql +{ + marketplaceInteractions( + first: 4 + orderBy: transactionCount + orderDirection: desc + where: { transactionCount_gt: "100", marketplace_not: "Unknown" } + ) { + marketplace + transactionCount + } +} +``` + +#### Result 3 + +Expected output includes the marketplaces that meet the criteria, each represented by an enum value: + +```gql +{ + "data": { + "marketplaceInteractions": [ + { + "marketplace": "NFTX", + "transactionCount": "201" + }, + { + "marketplace": "OpenSeaV1", + "transactionCount": "148" + }, + { + "marketplace": "CryptoCoven", + "transactionCount": "117" + }, + { + "marketplace": "OpenSeaV1", + "transactionCount": "111" + } + ] + } +} +``` + +## Additional Resources + +For additional information, check out this guide's [repo](https://github.com/chidubemokeke/Subgraph-Tutorial-Enums). From a15264af4da9ae20fa48a37998b835d379f184b3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:19:04 -0500 Subject: [PATCH 0604/1789] New translations grafting.mdx (Romanian) --- .../pages/ro/subgraphs/cookbook/grafting.mdx | 50 +++++++++---------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/website/src/pages/ro/subgraphs/cookbook/grafting.mdx b/website/src/pages/ro/subgraphs/cookbook/grafting.mdx index 57d5169830a7..0a0d7b3bb370 100644 --- a/website/src/pages/ro/subgraphs/cookbook/grafting.mdx +++ b/website/src/pages/ro/subgraphs/cookbook/grafting.mdx @@ -2,13 +2,13 @@ title: Replace a Contract and Keep its History With Grafting --- -In this guide, you will learn how to build and deploy new subgraphs by grafting existing subgraphs. +In this guide, you will learn how to build and deploy new Subgraphs by grafting existing Subgraphs. ## What is Grafting? -Grafting reuses the data from an existing subgraph and starts indexing it at a later block. This is useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. Also, it can be used when adding a feature to a subgraph that takes long to index from scratch. +Grafting reuses the data from an existing Subgraph and starts indexing it at a later block. This is useful during development to get past simple errors in the mappings quickly or to temporarily get an existing Subgraph working again after it has failed. Also, it can be used when adding a feature to a Subgraph that takes long to index from scratch. -The grafted subgraph can use a GraphQL schema that is not identical to the one of the base subgraph, but merely compatible with it. It has to be a valid subgraph schema in its own right, but may deviate from the base subgraph's schema in the following ways: +The grafted Subgraph can use a GraphQL schema that is not identical to the one of the base Subgraph, but merely compatible with it. It has to be a valid Subgraph schema in its own right, but may deviate from the base Subgraph's schema in the following ways: - It adds or removes entity types - It removes attributes from entity types @@ -22,35 +22,35 @@ For more information, you can check: - [Grafting](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) -In this tutorial, we will be covering a basic use case. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing subgraph onto the "base" subgraph that tracks the new contract. +In this tutorial, we will be covering a basic use case. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing Subgraph onto the "base" Subgraph that tracks the new contract. ## Important Note on Grafting When Upgrading to the Network -> **Caution**: It is recommended to not use grafting for subgraphs published to The Graph Network +> **Caution**: It is recommended to not use grafting for Subgraphs published to The Graph Network ### Why Is This Important? -Grafting is a powerful feature that allows you to "graft" one subgraph onto another, effectively transferring historical data from the existing subgraph to a new version. It is not possible to graft a subgraph from The Graph Network back to Subgraph Studio. +Grafting is a powerful feature that allows you to "graft" one Subgraph onto another, effectively transferring historical data from the existing Subgraph to a new version. It is not possible to graft a Subgraph from The Graph Network back to Subgraph Studio. ### Best Practices -**Initial Migration**: when you first deploy your subgraph to the decentralized network, do so without grafting. Ensure that the subgraph is stable and functioning as expected. +**Initial Migration**: when you first deploy your Subgraph to the decentralized network, do so without grafting. Ensure that the Subgraph is stable and functioning as expected. -**Subsequent Updates**: once your subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. +**Subsequent Updates**: once your Subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. By adhering to these guidelines, you minimize risks and ensure a smoother migration process. ## Building an Existing Subgraph -Building subgraphs is an essential part of The Graph, described more in depth [here](/subgraphs/quick-start/). To be able to build and deploy the existing subgraph used in this tutorial, the following repo is provided: +Building Subgraphs is an essential part of The Graph, described more in depth [here](/subgraphs/quick-start/). To be able to build and deploy the existing Subgraph used in this tutorial, the following repo is provided: - [Subgraph example repo](https://github.com/Shiyasmohd/grafting-tutorial) -> Note: The contract used in the subgraph was taken from the following [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). +> Note: The contract used in the Subgraph was taken from the following [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). ## Subgraph Manifest Definition -The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest that you will use: +The Subgraph manifest `subgraph.yaml` identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest that you will use: ```yaml specVersion: 0.0.4 @@ -85,27 +85,27 @@ dataSources: ## Grafting Manifest Definition -Grafting requires adding two new items to the original subgraph manifest: +Grafting requires adding two new items to the original Subgraph manifest: ```yaml --- features: - grafting # feature name graft: - base: Qm... # subgraph ID of base subgraph + base: Qm... # Subgraph ID of base Subgraph block: 5956000 # block number ``` - `features:` is a list of all used [feature names](/developing/creating-a-subgraph/#experimental-features). -- `graft:` is a map of the `base` subgraph and the block to graft on to. The `block` is the block number to start indexing from. The Graph will copy the data of the base subgraph up to and including the given block and then continue indexing the new subgraph from that block on. +- `graft:` is a map of the `base` Subgraph and the block to graft on to. The `block` is the block number to start indexing from. The Graph will copy the data of the base Subgraph up to and including the given block and then continue indexing the new Subgraph from that block on. -The `base` and `block` values can be found by deploying two subgraphs: one for the base indexing and one with grafting +The `base` and `block` values can be found by deploying two Subgraphs: one for the base indexing and one with grafting ## Deploying the Base Subgraph -1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-example` -2. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-example` folder from the repo -3. Once finished, verify the subgraph is indexing properly. If you run the following command in The Graph Playground +1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a Subgraph on Sepolia testnet called `graft-example` +2. Follow the directions in the `AUTH & DEPLOY` section on your Subgraph page in the `graft-example` folder from the repo +3. Once finished, verify the Subgraph is indexing properly. If you run the following command in The Graph Playground ```graphql { @@ -138,16 +138,16 @@ It returns something like this: } ``` -Once you have verified the subgraph is indexing properly, you can quickly update the subgraph with grafting. +Once you have verified the Subgraph is indexing properly, you can quickly update the Subgraph with grafting. ## Deploying the Grafting Subgraph The graft replacement subgraph.yaml will have a new contract address. This could happen when you update your dapp, redeploy a contract, etc. -1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-replacement` -2. Create a new manifest. The `subgraph.yaml` for `graph-replacement` contains a different contract address and new information about how it should graft. These are the `block` of the [last event emitted](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452) you care about by the old contract and the `base` of the old subgraph. The `base` subgraph ID is the `Deployment ID` of your original `graph-example` subgraph. You can find this in Subgraph Studio. -3. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-replacement` folder from the repo -4. Once finished, verify the subgraph is indexing properly. If you run the following command in The Graph Playground +1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a Subgraph on Sepolia testnet called `graft-replacement` +2. Create a new manifest. The `subgraph.yaml` for `graph-replacement` contains a different contract address and new information about how it should graft. These are the `block` of the [last event emitted](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452) you care about by the old contract and the `base` of the old Subgraph. The `base` Subgraph ID is the `Deployment ID` of your original `graph-example` Subgraph. You can find this in Subgraph Studio. +3. Follow the directions in the `AUTH & DEPLOY` section on your Subgraph page in the `graft-replacement` folder from the repo +4. Once finished, verify the Subgraph is indexing properly. If you run the following command in The Graph Playground ```graphql { @@ -185,9 +185,9 @@ It should return the following: } ``` -You can see that the `graft-replacement` subgraph is indexing from older `graph-example` data and newer data from the new contract address. The original contract emitted two `Withdrawal` events, [Event 1](https://sepolia.etherscan.io/tx/0xe8323d21c4f104607b10b0fff9fc24b9612b9488795dea8196b2d5f980d3dc1d) and [Event 2](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452). The new contract emitted one `Withdrawal` after, [Event 3](https://sepolia.etherscan.io/tx/0x2410475f76a44754bae66d293d14eac34f98ec03a3689cbbb56a716d20b209af). The two previously indexed transactions (Event 1 and 2) and the new transaction (Event 3) were combined together in the `graft-replacement` subgraph. +You can see that the `graft-replacement` Subgraph is indexing from older `graph-example` data and newer data from the new contract address. The original contract emitted two `Withdrawal` events, [Event 1](https://sepolia.etherscan.io/tx/0xe8323d21c4f104607b10b0fff9fc24b9612b9488795dea8196b2d5f980d3dc1d) and [Event 2](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452). The new contract emitted one `Withdrawal` after, [Event 3](https://sepolia.etherscan.io/tx/0x2410475f76a44754bae66d293d14eac34f98ec03a3689cbbb56a716d20b209af). The two previously indexed transactions (Event 1 and 2) and the new transaction (Event 3) were combined together in the `graft-replacement` Subgraph. -Congrats! You have successfully grafted a subgraph onto another subgraph. +Congrats! You have successfully grafted a Subgraph onto another Subgraph. ## Additional Resources From c3a8d3306be994773f1055d8c04be8184a2f5e1f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:19:05 -0500 Subject: [PATCH 0605/1789] New translations grafting.mdx (French) --- .../pages/fr/subgraphs/cookbook/grafting.mdx | 70 +++++++++---------- 1 file changed, 35 insertions(+), 35 deletions(-) diff --git a/website/src/pages/fr/subgraphs/cookbook/grafting.mdx b/website/src/pages/fr/subgraphs/cookbook/grafting.mdx index a81cf0ddf30a..24733b98c98c 100644 --- a/website/src/pages/fr/subgraphs/cookbook/grafting.mdx +++ b/website/src/pages/fr/subgraphs/cookbook/grafting.mdx @@ -2,13 +2,13 @@ title: Remplacer un contrat et conserver son historique grâce au « greffage » --- -Dans ce guide, vous apprendrez à construire et à déployer de nouveaux subgraphs en utilisant le greffage sur des subgraphs existants. +In this guide, you will learn how to build and deploy new Subgraphs by grafting existing Subgraphs. ## Qu'est-ce qu'une greffe ? -C'est une méthode qui réutilise les données d'un subgraph existant et commence à les indexer à un bloc ultérieur. Elle est utile lors du développement pour contourner rapidement les erreurs simples dans les mappings ou pour remettre temporairement en service un subgraph existant qui a échoué. Elle peut également être utilisée pour ajouter une fonctionnalité à un subgraphe dont l'indexation depuis la genèse prend un temps considérable. +Grafting reuses the data from an existing Subgraph and starts indexing it at a later block. This is useful during development to get past simple errors in the mappings quickly or to temporarily get an existing Subgraph working again after it has failed. Also, it can be used when adding a feature to a Subgraph that takes long to index from scratch. -Le subgraph greffé peut utiliser un schema GraphQL qui n'est pas identique à celui du subgraph de base, mais simplement compatible avec lui. Il doit s'agir d'un schema de subgraph valide en tant que tel, mais il peut s'écarter du schema du subgraph de base de la manière suivante : +The grafted Subgraph can use a GraphQL schema that is not identical to the one of the base Subgraph, but merely compatible with it. It has to be a valid Subgraph schema in its own right, but may deviate from the base Subgraph's schema in the following ways: - Il ajoute ou supprime des types d'entité - Il supprime les attributs des types d'entité @@ -20,37 +20,37 @@ Le subgraph greffé peut utiliser un schema GraphQL qui n'est pas identique à c Pour plus d’informations, vous pouvez vérifier : -- [Grafting](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) +- [Greffage](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) -Dans ce tutoriel, nous couvrirons un cas d'utilisation de base. Nous remplacerons un contrat existant par un contrat identique (avec une nouvelle adresse, mais le même code). Ensuite, nous grefferons le subgraph existant sur le subgraph "de base" qui suit le nouveau contrat. +In this tutorial, we will be covering a basic use case. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing Subgraph onto the "base" Subgraph that tracks the new contract. ## Remarque importante sur le greffage lors de la mise à niveau vers le réseau -> **Caution**: It is recommended to not use grafting for subgraphs published to The Graph Network +> **Caution**: It is recommended to not use grafting for Subgraphs published to The Graph Network ### Pourquoi est-ce important? -Le greffage est une fonctionnalité puissante qui vous permet de "greffer" un subgraph sur un autre, transférant efficacement les données historiques du subgraph existant vers une nouvelle version. Il n'est pas possible de greffer un subgraph de The Graph Network vers Subgraph Studio. +Grafting is a powerful feature that allows you to "graft" one Subgraph onto another, effectively transferring historical data from the existing Subgraph to a new version. It is not possible to graft a Subgraph from The Graph Network back to Subgraph Studio. ### Les meilleures pratiques -**Initial Migration**: when you first deploy your subgraph to the decentralized network, do so without grafting. Ensure that the subgraph is stable and functioning as expected. +**Initial Migration**: when you first deploy your Subgraph to the decentralized network, do so without grafting. Ensure that the Subgraph is stable and functioning as expected. -**Subsequent Updates**: once your subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. +**Subsequent Updates**: once your Subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. En respectant ces lignes directrices, vous minimisez les risques et vous vous assurez que le processus de migration se déroule sans heurts. ## Création d'un subgraph existant -Building subgraphs is an essential part of The Graph, described more in depth [here](/subgraphs/quick-start/). To be able to build and deploy the existing subgraph used in this tutorial, the following repo is provided: +Building Subgraphs is an essential part of The Graph, described more in depth [here](/subgraphs/quick-start/). To be able to build and deploy the existing Subgraph used in this tutorial, the following repo is provided: -- [Subgraph example repo](https://github.com/Shiyasmohd/grafting-tutorial) +- [Dépôt d'exemples de subgraphs](https://github.com/Shiyasmohd/grafting-tutorial) -> Note: The contract used in the subgraph was taken from the following [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). +> Note: The contract used in the Subgraph was taken from the following [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). ## Définition du manifeste du subgraph -The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest that you will use: +The Subgraph manifest `subgraph.yaml` identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest that you will use: ```yaml specVersion: 0.0.4 @@ -79,33 +79,33 @@ dataSources: file: ./src/lock.ts ``` -- The `Lock` data source is the abi and contract address we will get when we compile and deploy the contract -- The network should correspond to an indexed network being queried. Since we're running on Sepolia testnet, the network is `sepolia` -- The `mapping` section defines the triggers of interest and the functions that should be run in response to those triggers. In this case, we are listening for the `Withdrawal` event and calling the `handleWithdrawal` function when it is emitted. +- La source de données `Lock` est l'adresse de l'abi et du contrat que nous obtiendrons lorsque nous compilerons et déploierons le contrat +- Le réseau doit correspondre à un réseau indexé qui est interrogé. Comme nous fonctionnons sur le réseau de test Sepolia, le réseau est `sepolia` +- La section `mapping` définit les déclencheurs intéressants et les fonctions qui doivent être exécutées en réponse à ces déclencheurs. Dans ce cas, nous écoutons l'événement `Withdrawal` et appelons la fonction `handleWithdrawal` lorsqu'il est émis. ## Définition de manifeste de greffage -Le greffage nécessite l'ajout de deux nouveaux éléments au manifeste du subgraph original : +Grafting requires adding two new items to the original Subgraph manifest: ```yaml --- features: - - grafting # nom de la fonctionnalité + - grafting # feature name graft: - base: Qm... # ID du subgraph de base - block: 5956000 # numéro du bloc + base: Qm... # Subgraph ID of base Subgraph + block: 5956000 # block number ``` -- `features:` is a list of all used [feature names](/developing/creating-a-subgraph/#experimental-features). -- `graft:` is a map of the `base` subgraph and the block to graft on to. The `block` is the block number to start indexing from. The Graph will copy the data of the base subgraph up to and including the given block and then continue indexing the new subgraph from that block on. +- `features:` est une liste de tous les [noms de fonctionnalités](/developing/creating-a-subgraph/#experimental-features) utilisées. +- `graft:` is a map of the `base` Subgraph and the block to graft on to. The `block` is the block number to start indexing from. The Graph will copy the data of the base Subgraph up to and including the given block and then continue indexing the new Subgraph from that block on. -The `base` and `block` values can be found by deploying two subgraphs: one for the base indexing and one with grafting +The `base` and `block` values can be found by deploying two Subgraphs: one for the base indexing and one with grafting ## Déploiement du subgraph de base -1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-example` -2. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-example` folder from the repo -3. Une fois terminé, vérifiez que le subgraph s'indexe correctement. Si vous exécutez la commande suivante dans The Graph Playground +1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a Subgraph on Sepolia testnet called `graft-example` +2. Follow the directions in the `AUTH & DEPLOY` section on your Subgraph page in the `graft-example` folder from the repo +3. Once finished, verify the Subgraph is indexing properly. If you run the following command in The Graph Playground ```graphql { @@ -138,16 +138,16 @@ Cela renvoie quelque chose comme ceci : } ``` -Une fois que vous avez vérifié que le subgraph s'indexe correctement, vous pouvez rapidement le mettre à jour grâce à la méthode du graffage. +Once you have verified the Subgraph is indexing properly, you can quickly update the Subgraph with grafting. ## Déploiement du subgraph greffé Le subgraph.yaml de remplacement du greffon aura une nouvelle adresse de contrat. Cela peut arriver lorsque vous mettez à jour votre dapp, redéployez un contrat, etc. -1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-replacement` -2. Create a new manifest. The `subgraph.yaml` for `graph-replacement` contains a different contract address and new information about how it should graft. These are the `block` of the [last event emitted](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452) you care about by the old contract and the `base` of the old subgraph. The `base` subgraph ID is the `Deployment ID` of your original `graph-example` subgraph. You can find this in Subgraph Studio. -3. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-replacement` folder from the repo -4. Une fois terminé, vérifiez que le subgraph s'indexe correctement. Si vous exécutez la commande suivante dans The Graph Playground +1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a Subgraph on Sepolia testnet called `graft-replacement` +2. Create a new manifest. The `subgraph.yaml` for `graph-replacement` contains a different contract address and new information about how it should graft. These are the `block` of the [last event emitted](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452) you care about by the old contract and the `base` of the old Subgraph. The `base` Subgraph ID is the `Deployment ID` of your original `graph-example` Subgraph. You can find this in Subgraph Studio. +3. Follow the directions in the `AUTH & DEPLOY` section on your Subgraph page in the `graft-replacement` folder from the repo +4. Once finished, verify the Subgraph is indexing properly. If you run the following command in The Graph Playground ```graphql { @@ -185,9 +185,9 @@ Le résultat devrait être le suivant : } ``` -You can see that the `graft-replacement` subgraph is indexing from older `graph-example` data and newer data from the new contract address. The original contract emitted two `Withdrawal` events, [Event 1](https://sepolia.etherscan.io/tx/0xe8323d21c4f104607b10b0fff9fc24b9612b9488795dea8196b2d5f980d3dc1d) and [Event 2](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452). The new contract emitted one `Withdrawal` after, [Event 3](https://sepolia.etherscan.io/tx/0x2410475f76a44754bae66d293d14eac34f98ec03a3689cbbb56a716d20b209af). The two previously indexed transactions (Event 1 and 2) and the new transaction (Event 3) were combined together in the `graft-replacement` subgraph. +You can see that the `graft-replacement` Subgraph is indexing from older `graph-example` data and newer data from the new contract address. The original contract emitted two `Withdrawal` events, [Event 1](https://sepolia.etherscan.io/tx/0xe8323d21c4f104607b10b0fff9fc24b9612b9488795dea8196b2d5f980d3dc1d) and [Event 2](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452). The new contract emitted one `Withdrawal` after, [Event 3](https://sepolia.etherscan.io/tx/0x2410475f76a44754bae66d293d14eac34f98ec03a3689cbbb56a716d20b209af). The two previously indexed transactions (Event 1 and 2) and the new transaction (Event 3) were combined together in the `graft-replacement` Subgraph. -Félicitations ! Vous avez réussi à greffer un subgraph sur un autre subgraph. +Congrats! You have successfully grafted a Subgraph onto another Subgraph. ## Ressources supplémentaires @@ -197,6 +197,6 @@ Si vous souhaitez acquérir plus d'expérience avec le greffage, voici quelques - [ERC-721](https://github.com/messari/subgraphs/blob/master/subgraphs/erc721-metadata/subgraph.yaml) - [Uniswap](https://github.com/messari/subgraphs/blob/master/subgraphs/uniswap-v3-forks/protocols/uniswap-v3/config/templates/uniswapV3Template.yaml), -To become even more of a Graph expert, consider learning about other ways to handle changes in underlying datasources. Alternatives like [Data Source Templates](/developing/creating-a-subgraph/#data-source-templates) can achieve similar results +Pour devenir encore plus expert sur The Graph, vous pouvez vous familiariser avec d'autres méthodes de gestion des modifications apportées aux sources de données sous-jacentes. Des alternatives comme des [Modèles de sources de données](/developing/creating-a-subgraph/#data-source-templates) permettent d'obtenir des résultats similaires -> Note: A lot of material from this article was taken from the previously published [Arweave article](/subgraphs/cookbook/arweave/) +> Note : De nombreux éléments de cet article ont été repris de l'article [Arweave](/subgraphs/cookbook/arweave/) publié précédemment From c49ac2f21f20d44f67378562a3059ea64c9380b5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:19:07 -0500 Subject: [PATCH 0606/1789] New translations grafting.mdx (Spanish) --- .../pages/es/subgraphs/cookbook/grafting.mdx | 50 +++++++++---------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/website/src/pages/es/subgraphs/cookbook/grafting.mdx b/website/src/pages/es/subgraphs/cookbook/grafting.mdx index 4a98c7ab352b..1e20b8802154 100644 --- a/website/src/pages/es/subgraphs/cookbook/grafting.mdx +++ b/website/src/pages/es/subgraphs/cookbook/grafting.mdx @@ -2,13 +2,13 @@ title: Reemplazar un contrato y mantener su historia con el grafting --- -En esta guía, aprenderás a construir y deployar nuevos subgrafos mediante grafting (injerto) de subgrafos existentes. +In this guide, you will learn how to build and deploy new Subgraphs by grafting existing Subgraphs. ## ¿Qué es el Grafting? -El grafting reutiliza los datos de un subgrafo existente y comienza a indexarlo en un bloque posterior. Esto es útil durante el desarrollo para superar rápidamente errores simples en los mapeos o para hacer funcionar temporalmente un subgrafo existente después de que haya fallado. También se puede utilizar cuando se añade un feature a un subgrafo que tarda en indexarse desde cero. +Grafting reuses the data from an existing Subgraph and starts indexing it at a later block. This is useful during development to get past simple errors in the mappings quickly or to temporarily get an existing Subgraph working again after it has failed. Also, it can be used when adding a feature to a Subgraph that takes long to index from scratch. -El subgrafo grafteado puede utilizar un esquema GraphQL que no es idéntico al del subgrafo base, sino simplemente compatible con él. Tiene que ser un esquema de subgrafo válido por sí mismo, pero puede diferir del esquema del subgrafo base de las siguientes maneras: +The grafted Subgraph can use a GraphQL schema that is not identical to the one of the base Subgraph, but merely compatible with it. It has to be a valid Subgraph schema in its own right, but may deviate from the base Subgraph's schema in the following ways: - Agrega o elimina tipos de entidades - Elimina los atributos de los tipos de entidad @@ -22,35 +22,35 @@ Para más información, puedes consultar: - [Grafting](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) -In this tutorial, we will be covering a basic use case. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing subgraph onto the "base" subgraph that tracks the new contract. +In this tutorial, we will be covering a basic use case. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing Subgraph onto the "base" Subgraph that tracks the new contract. ## Important Note on Grafting When Upgrading to the Network -> **Caution**: It is recommended to not use grafting for subgraphs published to The Graph Network +> **Caution**: It is recommended to not use grafting for Subgraphs published to The Graph Network ### Why Is This Important? -Grafting is a powerful feature that allows you to "graft" one subgraph onto another, effectively transferring historical data from the existing subgraph to a new version. It is not possible to graft a subgraph from The Graph Network back to Subgraph Studio. +Grafting is a powerful feature that allows you to "graft" one Subgraph onto another, effectively transferring historical data from the existing Subgraph to a new version. It is not possible to graft a Subgraph from The Graph Network back to Subgraph Studio. ### Best Practices -**Initial Migration**: when you first deploy your subgraph to the decentralized network, do so without grafting. Ensure that the subgraph is stable and functioning as expected. +**Initial Migration**: when you first deploy your Subgraph to the decentralized network, do so without grafting. Ensure that the Subgraph is stable and functioning as expected. -**Subsequent Updates**: once your subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. +**Subsequent Updates**: once your Subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. By adhering to these guidelines, you minimize risks and ensure a smoother migration process. ## Construcción de un subgrafo existente -Building subgraphs is an essential part of The Graph, described more in depth [here](/subgraphs/quick-start/). To be able to build and deploy the existing subgraph used in this tutorial, the following repo is provided: +Building Subgraphs is an essential part of The Graph, described more in depth [here](/subgraphs/quick-start/). To be able to build and deploy the existing Subgraph used in this tutorial, the following repo is provided: - [Subgraph example repo](https://github.com/Shiyasmohd/grafting-tutorial) -> Note: The contract used in the subgraph was taken from the following [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). +> Note: The contract used in the Subgraph was taken from the following [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). ## Definición de manifiesto del subgrafo -The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest that you will use: +The Subgraph manifest `subgraph.yaml` identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest that you will use: ```yaml specVersion: 0.0.4 @@ -85,27 +85,27 @@ dataSources: ## Definición del manifiesto de grafting -El grafting requiere añadir dos nuevos items al manifiesto original del subgrafo: +Grafting requires adding two new items to the original Subgraph manifest: ```yaml --- features: - grafting # feature name graft: - base: Qm... # subgraph ID of base subgraph + base: Qm... # Subgraph ID of base Subgraph block: 5956000 # block number ``` - `features:` is a list of all used [feature names](/developing/creating-a-subgraph/#experimental-features). -- `graft:` is a map of the `base` subgraph and the block to graft on to. The `block` is the block number to start indexing from. The Graph will copy the data of the base subgraph up to and including the given block and then continue indexing the new subgraph from that block on. +- `graft:` is a map of the `base` Subgraph and the block to graft on to. The `block` is the block number to start indexing from. The Graph will copy the data of the base Subgraph up to and including the given block and then continue indexing the new Subgraph from that block on. -The `base` and `block` values can be found by deploying two subgraphs: one for the base indexing and one with grafting +The `base` and `block` values can be found by deploying two Subgraphs: one for the base indexing and one with grafting ## Deploy del subgrafo base -1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-example` -2. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-example` folder from the repo -3. Una vez que hayas terminado, verifica que el subgrafo se está indexando correctamente. Si ejecutas el siguiente comando en The Graph Playground +1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a Subgraph on Sepolia testnet called `graft-example` +2. Follow the directions in the `AUTH & DEPLOY` section on your Subgraph page in the `graft-example` folder from the repo +3. Once finished, verify the Subgraph is indexing properly. If you run the following command in The Graph Playground ```graphql { @@ -138,16 +138,16 @@ Devuelve algo como esto: } ``` -Una vez que hayas verificado que el subgrafo se está indexando correctamente, puedes actualizar rápidamente el subgrafo con grafting. +Once you have verified the Subgraph is indexing properly, you can quickly update the Subgraph with grafting. ## Deploy del subgrafo grafting El subgraph.yaml de sustitución del graft tendrá una nueva dirección de contrato. Esto podría ocurrir cuando actualices tu dApp, vuelvas a deployar un contrato, etc. -1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-replacement` -2. Create a new manifest. The `subgraph.yaml` for `graph-replacement` contains a different contract address and new information about how it should graft. These are the `block` of the [last event emitted](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452) you care about by the old contract and the `base` of the old subgraph. The `base` subgraph ID is the `Deployment ID` of your original `graph-example` subgraph. You can find this in Subgraph Studio. -3. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-replacement` folder from the repo -4. Una vez que hayas terminado, verifica que el subgrafo se está indexando correctamente. Si ejecutas el siguiente comando en The Graph Playground +1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a Subgraph on Sepolia testnet called `graft-replacement` +2. Create a new manifest. The `subgraph.yaml` for `graph-replacement` contains a different contract address and new information about how it should graft. These are the `block` of the [last event emitted](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452) you care about by the old contract and the `base` of the old Subgraph. The `base` Subgraph ID is the `Deployment ID` of your original `graph-example` Subgraph. You can find this in Subgraph Studio. +3. Follow the directions in the `AUTH & DEPLOY` section on your Subgraph page in the `graft-replacement` folder from the repo +4. Once finished, verify the Subgraph is indexing properly. If you run the following command in The Graph Playground ```graphql { @@ -185,9 +185,9 @@ Debería devolver lo siguiente: } ``` -You can see that the `graft-replacement` subgraph is indexing from older `graph-example` data and newer data from the new contract address. The original contract emitted two `Withdrawal` events, [Event 1](https://sepolia.etherscan.io/tx/0xe8323d21c4f104607b10b0fff9fc24b9612b9488795dea8196b2d5f980d3dc1d) and [Event 2](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452). The new contract emitted one `Withdrawal` after, [Event 3](https://sepolia.etherscan.io/tx/0x2410475f76a44754bae66d293d14eac34f98ec03a3689cbbb56a716d20b209af). The two previously indexed transactions (Event 1 and 2) and the new transaction (Event 3) were combined together in the `graft-replacement` subgraph. +You can see that the `graft-replacement` Subgraph is indexing from older `graph-example` data and newer data from the new contract address. The original contract emitted two `Withdrawal` events, [Event 1](https://sepolia.etherscan.io/tx/0xe8323d21c4f104607b10b0fff9fc24b9612b9488795dea8196b2d5f980d3dc1d) and [Event 2](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452). The new contract emitted one `Withdrawal` after, [Event 3](https://sepolia.etherscan.io/tx/0x2410475f76a44754bae66d293d14eac34f98ec03a3689cbbb56a716d20b209af). The two previously indexed transactions (Event 1 and 2) and the new transaction (Event 3) were combined together in the `graft-replacement` Subgraph. -Congrats! You have successfully grafted a subgraph onto another subgraph. +Congrats! You have successfully grafted a Subgraph onto another Subgraph. ## Recursos Adicionales From 3466990c678379b13eb9945538ef5d6929e9d162 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:19:08 -0500 Subject: [PATCH 0607/1789] New translations grafting.mdx (Arabic) --- .../pages/ar/subgraphs/cookbook/grafting.mdx | 50 +++++++++---------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/website/src/pages/ar/subgraphs/cookbook/grafting.mdx b/website/src/pages/ar/subgraphs/cookbook/grafting.mdx index 704e7df3f3f6..ef7c8fc242d7 100644 --- a/website/src/pages/ar/subgraphs/cookbook/grafting.mdx +++ b/website/src/pages/ar/subgraphs/cookbook/grafting.mdx @@ -2,13 +2,13 @@ title: Replace a Contract and Keep its History With Grafting --- -In this guide, you will learn how to build and deploy new subgraphs by grafting existing subgraphs. +In this guide, you will learn how to build and deploy new Subgraphs by grafting existing Subgraphs. ## What is Grafting? -Grafting reuses the data from an existing subgraph and starts indexing it at a later block. This is useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. Also, it can be used when adding a feature to a subgraph that takes long to index from scratch. +Grafting reuses the data from an existing Subgraph and starts indexing it at a later block. This is useful during development to get past simple errors in the mappings quickly or to temporarily get an existing Subgraph working again after it has failed. Also, it can be used when adding a feature to a Subgraph that takes long to index from scratch. -The grafted subgraph can use a GraphQL schema that is not identical to the one of the base subgraph, but merely compatible with it. It has to be a valid subgraph schema in its own right, but may deviate from the base subgraph's schema in the following ways: +The grafted Subgraph can use a GraphQL schema that is not identical to the one of the base Subgraph, but merely compatible with it. It has to be a valid Subgraph schema in its own right, but may deviate from the base Subgraph's schema in the following ways: - يضيف أو يزيل أنواع الكيانات - يزيل الصفات من أنواع الكيانات @@ -22,35 +22,35 @@ For more information, you can check: - [Grafting](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) -In this tutorial, we will be covering a basic use case. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing subgraph onto the "base" subgraph that tracks the new contract. +In this tutorial, we will be covering a basic use case. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing Subgraph onto the "base" Subgraph that tracks the new contract. ## Important Note on Grafting When Upgrading to the Network -> **Caution**: It is recommended to not use grafting for subgraphs published to The Graph Network +> **Caution**: It is recommended to not use grafting for Subgraphs published to The Graph Network ### Why Is This Important? -Grafting is a powerful feature that allows you to "graft" one subgraph onto another, effectively transferring historical data from the existing subgraph to a new version. It is not possible to graft a subgraph from The Graph Network back to Subgraph Studio. +Grafting is a powerful feature that allows you to "graft" one Subgraph onto another, effectively transferring historical data from the existing Subgraph to a new version. It is not possible to graft a Subgraph from The Graph Network back to Subgraph Studio. ### Best Practices -**Initial Migration**: when you first deploy your subgraph to the decentralized network, do so without grafting. Ensure that the subgraph is stable and functioning as expected. +**Initial Migration**: when you first deploy your Subgraph to the decentralized network, do so without grafting. Ensure that the Subgraph is stable and functioning as expected. -**Subsequent Updates**: once your subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. +**Subsequent Updates**: once your Subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. By adhering to these guidelines, you minimize risks and ensure a smoother migration process. ## Building an Existing Subgraph -Building subgraphs is an essential part of The Graph, described more in depth [here](/subgraphs/quick-start/). To be able to build and deploy the existing subgraph used in this tutorial, the following repo is provided: +Building Subgraphs is an essential part of The Graph, described more in depth [here](/subgraphs/quick-start/). To be able to build and deploy the existing Subgraph used in this tutorial, the following repo is provided: - [Subgraph example repo](https://github.com/Shiyasmohd/grafting-tutorial) -> Note: The contract used in the subgraph was taken from the following [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). +> Note: The contract used in the Subgraph was taken from the following [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). ## تعريف Subgraph Manifest -The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest that you will use: +The Subgraph manifest `subgraph.yaml` identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest that you will use: ```yaml specVersion: 0.0.4 @@ -85,27 +85,27 @@ dataSources: ## Grafting Manifest Definition -Grafting requires adding two new items to the original subgraph manifest: +Grafting requires adding two new items to the original Subgraph manifest: ```yaml --- features: - grafting # feature name graft: - base: Qm... # subgraph ID of base subgraph + base: Qm... # Subgraph ID of base Subgraph block: 5956000 # block number ``` - `features:` is a list of all used [feature names](/developing/creating-a-subgraph/#experimental-features). -- `graft:` is a map of the `base` subgraph and the block to graft on to. The `block` is the block number to start indexing from. The Graph will copy the data of the base subgraph up to and including the given block and then continue indexing the new subgraph from that block on. +- `graft:` is a map of the `base` Subgraph and the block to graft on to. The `block` is the block number to start indexing from. The Graph will copy the data of the base Subgraph up to and including the given block and then continue indexing the new Subgraph from that block on. -The `base` and `block` values can be found by deploying two subgraphs: one for the base indexing and one with grafting +The `base` and `block` values can be found by deploying two Subgraphs: one for the base indexing and one with grafting ## Deploying the Base Subgraph -1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-example` -2. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-example` folder from the repo -3. Once finished, verify the subgraph is indexing properly. If you run the following command in The Graph Playground +1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a Subgraph on Sepolia testnet called `graft-example` +2. Follow the directions in the `AUTH & DEPLOY` section on your Subgraph page in the `graft-example` folder from the repo +3. Once finished, verify the Subgraph is indexing properly. If you run the following command in The Graph Playground ```graphql { @@ -138,16 +138,16 @@ It returns something like this: } ``` -Once you have verified the subgraph is indexing properly, you can quickly update the subgraph with grafting. +Once you have verified the Subgraph is indexing properly, you can quickly update the Subgraph with grafting. ## Deploying the Grafting Subgraph The graft replacement subgraph.yaml will have a new contract address. This could happen when you update your dapp, redeploy a contract, etc. -1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-replacement` -2. Create a new manifest. The `subgraph.yaml` for `graph-replacement` contains a different contract address and new information about how it should graft. These are the `block` of the [last event emitted](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452) you care about by the old contract and the `base` of the old subgraph. The `base` subgraph ID is the `Deployment ID` of your original `graph-example` subgraph. You can find this in Subgraph Studio. -3. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-replacement` folder from the repo -4. Once finished, verify the subgraph is indexing properly. If you run the following command in The Graph Playground +1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a Subgraph on Sepolia testnet called `graft-replacement` +2. Create a new manifest. The `subgraph.yaml` for `graph-replacement` contains a different contract address and new information about how it should graft. These are the `block` of the [last event emitted](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452) you care about by the old contract and the `base` of the old Subgraph. The `base` Subgraph ID is the `Deployment ID` of your original `graph-example` Subgraph. You can find this in Subgraph Studio. +3. Follow the directions in the `AUTH & DEPLOY` section on your Subgraph page in the `graft-replacement` folder from the repo +4. Once finished, verify the Subgraph is indexing properly. If you run the following command in The Graph Playground ```graphql { @@ -185,9 +185,9 @@ It should return the following: } ``` -You can see that the `graft-replacement` subgraph is indexing from older `graph-example` data and newer data from the new contract address. The original contract emitted two `Withdrawal` events, [Event 1](https://sepolia.etherscan.io/tx/0xe8323d21c4f104607b10b0fff9fc24b9612b9488795dea8196b2d5f980d3dc1d) and [Event 2](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452). The new contract emitted one `Withdrawal` after, [Event 3](https://sepolia.etherscan.io/tx/0x2410475f76a44754bae66d293d14eac34f98ec03a3689cbbb56a716d20b209af). The two previously indexed transactions (Event 1 and 2) and the new transaction (Event 3) were combined together in the `graft-replacement` subgraph. +You can see that the `graft-replacement` Subgraph is indexing from older `graph-example` data and newer data from the new contract address. The original contract emitted two `Withdrawal` events, [Event 1](https://sepolia.etherscan.io/tx/0xe8323d21c4f104607b10b0fff9fc24b9612b9488795dea8196b2d5f980d3dc1d) and [Event 2](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452). The new contract emitted one `Withdrawal` after, [Event 3](https://sepolia.etherscan.io/tx/0x2410475f76a44754bae66d293d14eac34f98ec03a3689cbbb56a716d20b209af). The two previously indexed transactions (Event 1 and 2) and the new transaction (Event 3) were combined together in the `graft-replacement` Subgraph. -Congrats! You have successfully grafted a subgraph onto another subgraph. +Congrats! You have successfully grafted a Subgraph onto another Subgraph. ## مصادر إضافية From e0c65ce0582def98492135d5bea267b086577748 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:19:09 -0500 Subject: [PATCH 0608/1789] New translations grafting.mdx (Czech) --- .../pages/cs/subgraphs/cookbook/grafting.mdx | 50 +++++++++---------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/website/src/pages/cs/subgraphs/cookbook/grafting.mdx b/website/src/pages/cs/subgraphs/cookbook/grafting.mdx index ca0ab0367451..856b8ea2ea34 100644 --- a/website/src/pages/cs/subgraphs/cookbook/grafting.mdx +++ b/website/src/pages/cs/subgraphs/cookbook/grafting.mdx @@ -2,13 +2,13 @@ title: Nahrazení smlouvy a zachování její historie pomocí roubování --- -V této příručce se dozvíte, jak vytvářet a nasazovat nové podgrafy roubováním stávajících podgrafů. +In this guide, you will learn how to build and deploy new Subgraphs by grafting existing Subgraphs. ## Co je to roubování? -Při roubování se znovu použijí data z existujícího podgrafu a začne se indexovat v pozdějším bloku. To je užitečné během vývoje, abyste se rychle dostali přes jednoduché chyby v mapování nebo abyste dočasně znovu zprovoznili existující podgraf po jeho selhání. Také ji lze použít při přidávání funkce do podgrafu, které trvá dlouho, než se indexuje od začátku. +Grafting reuses the data from an existing Subgraph and starts indexing it at a later block. This is useful during development to get past simple errors in the mappings quickly or to temporarily get an existing Subgraph working again after it has failed. Also, it can be used when adding a feature to a Subgraph that takes long to index from scratch. -Štěpovaný podgraf může používat schéma GraphQL, které není totožné se schématem základního podgrafu, ale je s ním pouze kompatibilní. Musí to být platné schéma podgrafu jako takové, ale může se od schématu základního podgrafu odchýlit následujícími způsoby: +The grafted Subgraph can use a GraphQL schema that is not identical to the one of the base Subgraph, but merely compatible with it. It has to be a valid Subgraph schema in its own right, but may deviate from the base Subgraph's schema in the following ways: - Přidává nebo odebírá typy entit - Odstraňuje atributy z typů entit @@ -22,35 +22,35 @@ Další informace naleznete na: - [Grafting](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) -In this tutorial, we will be covering a basic use case. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing subgraph onto the "base" subgraph that tracks the new contract. +In this tutorial, we will be covering a basic use case. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing Subgraph onto the "base" Subgraph that tracks the new contract. ## Důležité upozornění k roubování při aktualizaci na síť -> **Caution**: It is recommended to not use grafting for subgraphs published to The Graph Network +> **Caution**: It is recommended to not use grafting for Subgraphs published to The Graph Network ### Proč je to důležité? -Štěpování je výkonná funkce, která umožňuje "naroubovat" jeden podgraf na druhý, čímž efektivně přenese historická data ze stávajícího podgrafu do nové verze. Podgraf není možné naroubovat ze Sítě grafů zpět do Podgraf Studio. +Grafting is a powerful feature that allows you to "graft" one Subgraph onto another, effectively transferring historical data from the existing Subgraph to a new version. It is not possible to graft a Subgraph from The Graph Network back to Subgraph Studio. ### Osvědčené postupy -**Initial Migration**: when you first deploy your subgraph to the decentralized network, do so without grafting. Ensure that the subgraph is stable and functioning as expected. +**Initial Migration**: when you first deploy your Subgraph to the decentralized network, do so without grafting. Ensure that the Subgraph is stable and functioning as expected. -**Subsequent Updates**: once your subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. +**Subsequent Updates**: once your Subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. Dodržováním těchto pokynů minimalizujete rizika a zajistíte hladší průběh migrace. ## Vytvoření existujícího podgrafu -Building subgraphs is an essential part of The Graph, described more in depth [here](/subgraphs/quick-start/). To be able to build and deploy the existing subgraph used in this tutorial, the following repo is provided: +Building Subgraphs is an essential part of The Graph, described more in depth [here](/subgraphs/quick-start/). To be able to build and deploy the existing Subgraph used in this tutorial, the following repo is provided: - [Subgraph example repo](https://github.com/Shiyasmohd/grafting-tutorial) -> Note: The contract used in the subgraph was taken from the following [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). +> Note: The contract used in the Subgraph was taken from the following [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). ## Definice podgrafu Manifest -The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest that you will use: +The Subgraph manifest `subgraph.yaml` identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest that you will use: ```yaml specVersion: 0.0.4 @@ -85,27 +85,27 @@ dataSources: ## Definice manifestu roubování -Roubování vyžaduje přidání dvou nových položek do původního manifestu podgrafu: +Grafting requires adding two new items to the original Subgraph manifest: ```yaml --- features: - grafting # feature name graft: - base: Qm... # subgraph ID of base subgraph + base: Qm... # Subgraph ID of base Subgraph block: 5956000 # block number ``` - `features:` is a list of all used [feature names](/developing/creating-a-subgraph/#experimental-features). -- `graft:` is a map of the `base` subgraph and the block to graft on to. The `block` is the block number to start indexing from. The Graph will copy the data of the base subgraph up to and including the given block and then continue indexing the new subgraph from that block on. +- `graft:` is a map of the `base` Subgraph and the block to graft on to. The `block` is the block number to start indexing from. The Graph will copy the data of the base Subgraph up to and including the given block and then continue indexing the new Subgraph from that block on. -The `base` and `block` values can be found by deploying two subgraphs: one for the base indexing and one with grafting +The `base` and `block` values can be found by deploying two Subgraphs: one for the base indexing and one with grafting ## Nasazení základního podgrafu -1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-example` -2. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-example` folder from the repo -3. Po dokončení ověřte, zda se podgraf správně indexuje. Pokud spustíte následující příkaz v The Graph Playground +1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a Subgraph on Sepolia testnet called `graft-example` +2. Follow the directions in the `AUTH & DEPLOY` section on your Subgraph page in the `graft-example` folder from the repo +3. Once finished, verify the Subgraph is indexing properly. If you run the following command in The Graph Playground ```graphql { @@ -138,16 +138,16 @@ Vrátí něco takového: } ``` -Jakmile ověříte, že se podgraf správně indexuje, můžete jej rychle aktualizovat pomocí roubování. +Once you have verified the Subgraph is indexing properly, you can quickly update the Subgraph with grafting. ## Nasazení podgrafu roubování Náhradní podgraf.yaml bude mít novou adresu smlouvy. K tomu může dojít při aktualizaci dapp, novém nasazení kontraktu atd. -1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-replacement` -2. Create a new manifest. The `subgraph.yaml` for `graph-replacement` contains a different contract address and new information about how it should graft. These are the `block` of the [last event emitted](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452) you care about by the old contract and the `base` of the old subgraph. The `base` subgraph ID is the `Deployment ID` of your original `graph-example` subgraph. You can find this in Subgraph Studio. -3. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-replacement` folder from the repo -4. Po dokončení ověřte, zda se podgraf správně indexuje. Pokud spustíte následující příkaz v The Graph Playground +1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a Subgraph on Sepolia testnet called `graft-replacement` +2. Create a new manifest. The `subgraph.yaml` for `graph-replacement` contains a different contract address and new information about how it should graft. These are the `block` of the [last event emitted](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452) you care about by the old contract and the `base` of the old Subgraph. The `base` Subgraph ID is the `Deployment ID` of your original `graph-example` Subgraph. You can find this in Subgraph Studio. +3. Follow the directions in the `AUTH & DEPLOY` section on your Subgraph page in the `graft-replacement` folder from the repo +4. Once finished, verify the Subgraph is indexing properly. If you run the following command in The Graph Playground ```graphql { @@ -185,9 +185,9 @@ Měla by vrátit následující: } ``` -You can see that the `graft-replacement` subgraph is indexing from older `graph-example` data and newer data from the new contract address. The original contract emitted two `Withdrawal` events, [Event 1](https://sepolia.etherscan.io/tx/0xe8323d21c4f104607b10b0fff9fc24b9612b9488795dea8196b2d5f980d3dc1d) and [Event 2](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452). The new contract emitted one `Withdrawal` after, [Event 3](https://sepolia.etherscan.io/tx/0x2410475f76a44754bae66d293d14eac34f98ec03a3689cbbb56a716d20b209af). The two previously indexed transactions (Event 1 and 2) and the new transaction (Event 3) were combined together in the `graft-replacement` subgraph. +You can see that the `graft-replacement` Subgraph is indexing from older `graph-example` data and newer data from the new contract address. The original contract emitted two `Withdrawal` events, [Event 1](https://sepolia.etherscan.io/tx/0xe8323d21c4f104607b10b0fff9fc24b9612b9488795dea8196b2d5f980d3dc1d) and [Event 2](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452). The new contract emitted one `Withdrawal` after, [Event 3](https://sepolia.etherscan.io/tx/0x2410475f76a44754bae66d293d14eac34f98ec03a3689cbbb56a716d20b209af). The two previously indexed transactions (Event 1 and 2) and the new transaction (Event 3) were combined together in the `graft-replacement` Subgraph. -Gratulujeme! Úspěšně jste naroubovali podgraf na jiný podgraf. +Congrats! You have successfully grafted a Subgraph onto another Subgraph. ## Další zdroje From a2c8f89039ecbd26e662f0127a52f8a4ebea0b65 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:19:10 -0500 Subject: [PATCH 0609/1789] New translations grafting.mdx (German) --- .../pages/de/subgraphs/cookbook/grafting.mdx | 50 +++++++++---------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/website/src/pages/de/subgraphs/cookbook/grafting.mdx b/website/src/pages/de/subgraphs/cookbook/grafting.mdx index ee92710b3059..7474e4fe759d 100644 --- a/website/src/pages/de/subgraphs/cookbook/grafting.mdx +++ b/website/src/pages/de/subgraphs/cookbook/grafting.mdx @@ -2,13 +2,13 @@ title: Replace a Contract and Keep its History With Grafting --- -In this guide, you will learn how to build and deploy new subgraphs by grafting existing subgraphs. +In this guide, you will learn how to build and deploy new Subgraphs by grafting existing Subgraphs. ## What is Grafting? -Grafting reuses the data from an existing subgraph and starts indexing it at a later block. This is useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. Also, it can be used when adding a feature to a subgraph that takes long to index from scratch. +Grafting reuses the data from an existing Subgraph and starts indexing it at a later block. This is useful during development to get past simple errors in the mappings quickly or to temporarily get an existing Subgraph working again after it has failed. Also, it can be used when adding a feature to a Subgraph that takes long to index from scratch. -The grafted subgraph can use a GraphQL schema that is not identical to the one of the base subgraph, but merely compatible with it. It has to be a valid subgraph schema in its own right, but may deviate from the base subgraph's schema in the following ways: +The grafted Subgraph can use a GraphQL schema that is not identical to the one of the base Subgraph, but merely compatible with it. It has to be a valid Subgraph schema in its own right, but may deviate from the base Subgraph's schema in the following ways: - It adds or removes entity types - It removes attributes from entity types @@ -22,35 +22,35 @@ For more information, you can check: - [Grafting](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) -In this tutorial, we will be covering a basic use case. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing subgraph onto the "base" subgraph that tracks the new contract. +In this tutorial, we will be covering a basic use case. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing Subgraph onto the "base" Subgraph that tracks the new contract. ## Important Note on Grafting When Upgrading to the Network -> **Caution**: It is recommended to not use grafting for subgraphs published to The Graph Network +> **Caution**: It is recommended to not use grafting for Subgraphs published to The Graph Network ### Why Is This Important? -Grafting is a powerful feature that allows you to "graft" one subgraph onto another, effectively transferring historical data from the existing subgraph to a new version. It is not possible to graft a subgraph from The Graph Network back to Subgraph Studio. +Grafting is a powerful feature that allows you to "graft" one Subgraph onto another, effectively transferring historical data from the existing Subgraph to a new version. It is not possible to graft a Subgraph from The Graph Network back to Subgraph Studio. ### Best Practices -**Initial Migration**: when you first deploy your subgraph to the decentralized network, do so without grafting. Ensure that the subgraph is stable and functioning as expected. +**Initial Migration**: when you first deploy your Subgraph to the decentralized network, do so without grafting. Ensure that the Subgraph is stable and functioning as expected. -**Subsequent Updates**: once your subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. +**Subsequent Updates**: once your Subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. By adhering to these guidelines, you minimize risks and ensure a smoother migration process. ## Building an Existing Subgraph -Building subgraphs is an essential part of The Graph, described more in depth [here](/subgraphs/quick-start/). To be able to build and deploy the existing subgraph used in this tutorial, the following repo is provided: +Building Subgraphs is an essential part of The Graph, described more in depth [here](/subgraphs/quick-start/). To be able to build and deploy the existing Subgraph used in this tutorial, the following repo is provided: - [Subgraph example repo](https://github.com/Shiyasmohd/grafting-tutorial) -> Note: The contract used in the subgraph was taken from the following [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). +> Note: The contract used in the Subgraph was taken from the following [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). ## Subgraf-Manifest-Definition -The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest that you will use: +The Subgraph manifest `subgraph.yaml` identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest that you will use: ```yaml specVersion: 0.0.4 @@ -85,27 +85,27 @@ dataSources: ## Grafting Manifest Definition -Grafting requires adding two new items to the original subgraph manifest: +Grafting requires adding two new items to the original Subgraph manifest: ```yaml --- features: - grafting # feature name graft: - base: Qm... # subgraph ID of base subgraph + base: Qm... # Subgraph ID of base Subgraph block: 5956000 # block number ``` - `features:` is a list of all used [feature names](/developing/creating-a-subgraph/#experimental-features). -- `graft:` is a map of the `base` subgraph and the block to graft on to. The `block` is the block number to start indexing from. The Graph will copy the data of the base subgraph up to and including the given block and then continue indexing the new subgraph from that block on. +- `graft:` is a map of the `base` Subgraph and the block to graft on to. The `block` is the block number to start indexing from. The Graph will copy the data of the base Subgraph up to and including the given block and then continue indexing the new Subgraph from that block on. -The `base` and `block` values can be found by deploying two subgraphs: one for the base indexing and one with grafting +The `base` and `block` values can be found by deploying two Subgraphs: one for the base indexing and one with grafting ## Deploying the Base Subgraph -1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-example` -2. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-example` folder from the repo -3. Once finished, verify the subgraph is indexing properly. If you run the following command in The Graph Playground +1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a Subgraph on Sepolia testnet called `graft-example` +2. Follow the directions in the `AUTH & DEPLOY` section on your Subgraph page in the `graft-example` folder from the repo +3. Once finished, verify the Subgraph is indexing properly. If you run the following command in The Graph Playground ```graphql { @@ -138,16 +138,16 @@ It returns something like this: } ``` -Once you have verified the subgraph is indexing properly, you can quickly update the subgraph with grafting. +Once you have verified the Subgraph is indexing properly, you can quickly update the Subgraph with grafting. ## Deploying the Grafting Subgraph The graft replacement subgraph.yaml will have a new contract address. This could happen when you update your dapp, redeploy a contract, etc. -1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-replacement` -2. Create a new manifest. The `subgraph.yaml` for `graph-replacement` contains a different contract address and new information about how it should graft. These are the `block` of the [last event emitted](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452) you care about by the old contract and the `base` of the old subgraph. The `base` subgraph ID is the `Deployment ID` of your original `graph-example` subgraph. You can find this in Subgraph Studio. -3. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-replacement` folder from the repo -4. Once finished, verify the subgraph is indexing properly. If you run the following command in The Graph Playground +1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a Subgraph on Sepolia testnet called `graft-replacement` +2. Create a new manifest. The `subgraph.yaml` for `graph-replacement` contains a different contract address and new information about how it should graft. These are the `block` of the [last event emitted](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452) you care about by the old contract and the `base` of the old Subgraph. The `base` Subgraph ID is the `Deployment ID` of your original `graph-example` Subgraph. You can find this in Subgraph Studio. +3. Follow the directions in the `AUTH & DEPLOY` section on your Subgraph page in the `graft-replacement` folder from the repo +4. Once finished, verify the Subgraph is indexing properly. If you run the following command in The Graph Playground ```graphql { @@ -185,9 +185,9 @@ It should return the following: } ``` -You can see that the `graft-replacement` subgraph is indexing from older `graph-example` data and newer data from the new contract address. The original contract emitted two `Withdrawal` events, [Event 1](https://sepolia.etherscan.io/tx/0xe8323d21c4f104607b10b0fff9fc24b9612b9488795dea8196b2d5f980d3dc1d) and [Event 2](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452). The new contract emitted one `Withdrawal` after, [Event 3](https://sepolia.etherscan.io/tx/0x2410475f76a44754bae66d293d14eac34f98ec03a3689cbbb56a716d20b209af). The two previously indexed transactions (Event 1 and 2) and the new transaction (Event 3) were combined together in the `graft-replacement` subgraph. +You can see that the `graft-replacement` Subgraph is indexing from older `graph-example` data and newer data from the new contract address. The original contract emitted two `Withdrawal` events, [Event 1](https://sepolia.etherscan.io/tx/0xe8323d21c4f104607b10b0fff9fc24b9612b9488795dea8196b2d5f980d3dc1d) and [Event 2](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452). The new contract emitted one `Withdrawal` after, [Event 3](https://sepolia.etherscan.io/tx/0x2410475f76a44754bae66d293d14eac34f98ec03a3689cbbb56a716d20b209af). The two previously indexed transactions (Event 1 and 2) and the new transaction (Event 3) were combined together in the `graft-replacement` Subgraph. -Congrats! You have successfully grafted a subgraph onto another subgraph. +Congrats! You have successfully grafted a Subgraph onto another Subgraph. ## Zusätzliche Ressourcen From dd3f13494e4841b8749f1affcac76c5824a1896e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:19:11 -0500 Subject: [PATCH 0610/1789] New translations grafting.mdx (Italian) --- .../pages/it/subgraphs/cookbook/grafting.mdx | 50 +++++++++---------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/website/src/pages/it/subgraphs/cookbook/grafting.mdx b/website/src/pages/it/subgraphs/cookbook/grafting.mdx index 57d5169830a7..0a0d7b3bb370 100644 --- a/website/src/pages/it/subgraphs/cookbook/grafting.mdx +++ b/website/src/pages/it/subgraphs/cookbook/grafting.mdx @@ -2,13 +2,13 @@ title: Replace a Contract and Keep its History With Grafting --- -In this guide, you will learn how to build and deploy new subgraphs by grafting existing subgraphs. +In this guide, you will learn how to build and deploy new Subgraphs by grafting existing Subgraphs. ## What is Grafting? -Grafting reuses the data from an existing subgraph and starts indexing it at a later block. This is useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. Also, it can be used when adding a feature to a subgraph that takes long to index from scratch. +Grafting reuses the data from an existing Subgraph and starts indexing it at a later block. This is useful during development to get past simple errors in the mappings quickly or to temporarily get an existing Subgraph working again after it has failed. Also, it can be used when adding a feature to a Subgraph that takes long to index from scratch. -The grafted subgraph can use a GraphQL schema that is not identical to the one of the base subgraph, but merely compatible with it. It has to be a valid subgraph schema in its own right, but may deviate from the base subgraph's schema in the following ways: +The grafted Subgraph can use a GraphQL schema that is not identical to the one of the base Subgraph, but merely compatible with it. It has to be a valid Subgraph schema in its own right, but may deviate from the base Subgraph's schema in the following ways: - It adds or removes entity types - It removes attributes from entity types @@ -22,35 +22,35 @@ For more information, you can check: - [Grafting](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) -In this tutorial, we will be covering a basic use case. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing subgraph onto the "base" subgraph that tracks the new contract. +In this tutorial, we will be covering a basic use case. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing Subgraph onto the "base" Subgraph that tracks the new contract. ## Important Note on Grafting When Upgrading to the Network -> **Caution**: It is recommended to not use grafting for subgraphs published to The Graph Network +> **Caution**: It is recommended to not use grafting for Subgraphs published to The Graph Network ### Why Is This Important? -Grafting is a powerful feature that allows you to "graft" one subgraph onto another, effectively transferring historical data from the existing subgraph to a new version. It is not possible to graft a subgraph from The Graph Network back to Subgraph Studio. +Grafting is a powerful feature that allows you to "graft" one Subgraph onto another, effectively transferring historical data from the existing Subgraph to a new version. It is not possible to graft a Subgraph from The Graph Network back to Subgraph Studio. ### Best Practices -**Initial Migration**: when you first deploy your subgraph to the decentralized network, do so without grafting. Ensure that the subgraph is stable and functioning as expected. +**Initial Migration**: when you first deploy your Subgraph to the decentralized network, do so without grafting. Ensure that the Subgraph is stable and functioning as expected. -**Subsequent Updates**: once your subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. +**Subsequent Updates**: once your Subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. By adhering to these guidelines, you minimize risks and ensure a smoother migration process. ## Building an Existing Subgraph -Building subgraphs is an essential part of The Graph, described more in depth [here](/subgraphs/quick-start/). To be able to build and deploy the existing subgraph used in this tutorial, the following repo is provided: +Building Subgraphs is an essential part of The Graph, described more in depth [here](/subgraphs/quick-start/). To be able to build and deploy the existing Subgraph used in this tutorial, the following repo is provided: - [Subgraph example repo](https://github.com/Shiyasmohd/grafting-tutorial) -> Note: The contract used in the subgraph was taken from the following [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). +> Note: The contract used in the Subgraph was taken from the following [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). ## Subgraph Manifest Definition -The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest that you will use: +The Subgraph manifest `subgraph.yaml` identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest that you will use: ```yaml specVersion: 0.0.4 @@ -85,27 +85,27 @@ dataSources: ## Grafting Manifest Definition -Grafting requires adding two new items to the original subgraph manifest: +Grafting requires adding two new items to the original Subgraph manifest: ```yaml --- features: - grafting # feature name graft: - base: Qm... # subgraph ID of base subgraph + base: Qm... # Subgraph ID of base Subgraph block: 5956000 # block number ``` - `features:` is a list of all used [feature names](/developing/creating-a-subgraph/#experimental-features). -- `graft:` is a map of the `base` subgraph and the block to graft on to. The `block` is the block number to start indexing from. The Graph will copy the data of the base subgraph up to and including the given block and then continue indexing the new subgraph from that block on. +- `graft:` is a map of the `base` Subgraph and the block to graft on to. The `block` is the block number to start indexing from. The Graph will copy the data of the base Subgraph up to and including the given block and then continue indexing the new Subgraph from that block on. -The `base` and `block` values can be found by deploying two subgraphs: one for the base indexing and one with grafting +The `base` and `block` values can be found by deploying two Subgraphs: one for the base indexing and one with grafting ## Deploying the Base Subgraph -1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-example` -2. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-example` folder from the repo -3. Once finished, verify the subgraph is indexing properly. If you run the following command in The Graph Playground +1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a Subgraph on Sepolia testnet called `graft-example` +2. Follow the directions in the `AUTH & DEPLOY` section on your Subgraph page in the `graft-example` folder from the repo +3. Once finished, verify the Subgraph is indexing properly. If you run the following command in The Graph Playground ```graphql { @@ -138,16 +138,16 @@ It returns something like this: } ``` -Once you have verified the subgraph is indexing properly, you can quickly update the subgraph with grafting. +Once you have verified the Subgraph is indexing properly, you can quickly update the Subgraph with grafting. ## Deploying the Grafting Subgraph The graft replacement subgraph.yaml will have a new contract address. This could happen when you update your dapp, redeploy a contract, etc. -1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-replacement` -2. Create a new manifest. The `subgraph.yaml` for `graph-replacement` contains a different contract address and new information about how it should graft. These are the `block` of the [last event emitted](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452) you care about by the old contract and the `base` of the old subgraph. The `base` subgraph ID is the `Deployment ID` of your original `graph-example` subgraph. You can find this in Subgraph Studio. -3. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-replacement` folder from the repo -4. Once finished, verify the subgraph is indexing properly. If you run the following command in The Graph Playground +1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a Subgraph on Sepolia testnet called `graft-replacement` +2. Create a new manifest. The `subgraph.yaml` for `graph-replacement` contains a different contract address and new information about how it should graft. These are the `block` of the [last event emitted](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452) you care about by the old contract and the `base` of the old Subgraph. The `base` Subgraph ID is the `Deployment ID` of your original `graph-example` Subgraph. You can find this in Subgraph Studio. +3. Follow the directions in the `AUTH & DEPLOY` section on your Subgraph page in the `graft-replacement` folder from the repo +4. Once finished, verify the Subgraph is indexing properly. If you run the following command in The Graph Playground ```graphql { @@ -185,9 +185,9 @@ It should return the following: } ``` -You can see that the `graft-replacement` subgraph is indexing from older `graph-example` data and newer data from the new contract address. The original contract emitted two `Withdrawal` events, [Event 1](https://sepolia.etherscan.io/tx/0xe8323d21c4f104607b10b0fff9fc24b9612b9488795dea8196b2d5f980d3dc1d) and [Event 2](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452). The new contract emitted one `Withdrawal` after, [Event 3](https://sepolia.etherscan.io/tx/0x2410475f76a44754bae66d293d14eac34f98ec03a3689cbbb56a716d20b209af). The two previously indexed transactions (Event 1 and 2) and the new transaction (Event 3) were combined together in the `graft-replacement` subgraph. +You can see that the `graft-replacement` Subgraph is indexing from older `graph-example` data and newer data from the new contract address. The original contract emitted two `Withdrawal` events, [Event 1](https://sepolia.etherscan.io/tx/0xe8323d21c4f104607b10b0fff9fc24b9612b9488795dea8196b2d5f980d3dc1d) and [Event 2](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452). The new contract emitted one `Withdrawal` after, [Event 3](https://sepolia.etherscan.io/tx/0x2410475f76a44754bae66d293d14eac34f98ec03a3689cbbb56a716d20b209af). The two previously indexed transactions (Event 1 and 2) and the new transaction (Event 3) were combined together in the `graft-replacement` Subgraph. -Congrats! You have successfully grafted a subgraph onto another subgraph. +Congrats! You have successfully grafted a Subgraph onto another Subgraph. ## Additional Resources From 81660f5328dd62804b3356e83249969bb815018d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:19:12 -0500 Subject: [PATCH 0611/1789] New translations grafting.mdx (Japanese) --- .../pages/ja/subgraphs/cookbook/grafting.mdx | 50 +++++++++---------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/website/src/pages/ja/subgraphs/cookbook/grafting.mdx b/website/src/pages/ja/subgraphs/cookbook/grafting.mdx index 0be8b13c8dbd..8a65183f274f 100644 --- a/website/src/pages/ja/subgraphs/cookbook/grafting.mdx +++ b/website/src/pages/ja/subgraphs/cookbook/grafting.mdx @@ -2,13 +2,13 @@ title: グラフティングでコントラクトを取り替え、履歴を残す --- -このガイドでは、既存のサブグラフをグラフティングして新しいサブグラフを構築し、配備する方法を学びます。 +In this guide, you will learn how to build and deploy new Subgraphs by grafting existing Subgraphs. ## グラフティングとは? -グラフティングは、既存のサブグラフからデータを再利用し、後のブロックからインデックスを作成します。これは、開発中にマッピングの単純なエラーを素早く乗り越えるため、または、既存のサブグラフが失敗した後に一時的に再び動作させるために有用です。また、ゼロからインデックスを作成するのに時間がかかる機能をサブグラフに追加する場合にも使用できます。 +Grafting reuses the data from an existing Subgraph and starts indexing it at a later block. This is useful during development to get past simple errors in the mappings quickly or to temporarily get an existing Subgraph working again after it has failed. Also, it can be used when adding a feature to a Subgraph that takes long to index from scratch. -グラフト化されたサブグラフは、ベースとなるサブグラフのスキーマと同一ではなく、単に互換性のある GraphQL スキーマを使用することができます。また、それ自体は有効なサブグラフのスキーマでなければなりませんが、以下の方法でベースサブグラフのスキーマから逸脱することができます。 +The grafted Subgraph can use a GraphQL schema that is not identical to the one of the base Subgraph, but merely compatible with it. It has to be a valid Subgraph schema in its own right, but may deviate from the base Subgraph's schema in the following ways: - エンティティタイプを追加または削除する - エンティティタイプから属性を削除する @@ -22,35 +22,35 @@ title: グラフティングでコントラクトを取り替え、履歴を残 - [Grafting](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) -In this tutorial, we will be covering a basic use case. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing subgraph onto the "base" subgraph that tracks the new contract. +In this tutorial, we will be covering a basic use case. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing Subgraph onto the "base" Subgraph that tracks the new contract. ## ネットワークにアップグレードする際の移植に関する重要な注意事項 -> **Caution**: It is recommended to not use grafting for subgraphs published to The Graph Network +> **Caution**: It is recommended to not use grafting for Subgraphs published to The Graph Network ### 何でこれが大切ですか? -Grafting is a powerful feature that allows you to "graft" one subgraph onto another, effectively transferring historical data from the existing subgraph to a new version. It is not possible to graft a subgraph from The Graph Network back to Subgraph Studio. +Grafting is a powerful feature that allows you to "graft" one Subgraph onto another, effectively transferring historical data from the existing Subgraph to a new version. It is not possible to graft a Subgraph from The Graph Network back to Subgraph Studio. ### ベストプラクティス -**Initial Migration**: when you first deploy your subgraph to the decentralized network, do so without grafting. Ensure that the subgraph is stable and functioning as expected. +**Initial Migration**: when you first deploy your Subgraph to the decentralized network, do so without grafting. Ensure that the Subgraph is stable and functioning as expected. -**Subsequent Updates**: once your subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. +**Subsequent Updates**: once your Subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. これらのガイドラインに従うことで、リスクを最小限に抑え、よりスムーズな移行プロセスを確保できます。 ## 既存のサブグラフの構築 -Building subgraphs is an essential part of The Graph, described more in depth [here](/subgraphs/quick-start/). To be able to build and deploy the existing subgraph used in this tutorial, the following repo is provided: +Building Subgraphs is an essential part of The Graph, described more in depth [here](/subgraphs/quick-start/). To be able to build and deploy the existing Subgraph used in this tutorial, the following repo is provided: - [Subgraph example repo](https://github.com/Shiyasmohd/grafting-tutorial) -> Note: The contract used in the subgraph was taken from the following [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). +> Note: The contract used in the Subgraph was taken from the following [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). ## サブグラフマニフェストの定義 -The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest that you will use: +The Subgraph manifest `subgraph.yaml` identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest that you will use: ```yaml specVersion: 0.0.4 @@ -85,27 +85,27 @@ dataSources: ## グラフティングマニフェストの定義 -グラフティングは、元のサブグラフ・マニフェストに新しい2つの項目を追加する必要があります。 +Grafting requires adding two new items to the original Subgraph manifest: ```yaml --- features: - grafting # feature name graft: - base: Qm... # subgraph ID of base subgraph + base: Qm... # Subgraph ID of base Subgraph block: 5956000 # block number ``` - `features:` is a list of all used [feature names](/developing/creating-a-subgraph/#experimental-features). -- `graft:` is a map of the `base` subgraph and the block to graft on to. The `block` is the block number to start indexing from. The Graph will copy the data of the base subgraph up to and including the given block and then continue indexing the new subgraph from that block on. +- `graft:` is a map of the `base` Subgraph and the block to graft on to. The `block` is the block number to start indexing from. The Graph will copy the data of the base Subgraph up to and including the given block and then continue indexing the new Subgraph from that block on. -The `base` and `block` values can be found by deploying two subgraphs: one for the base indexing and one with grafting +The `base` and `block` values can be found by deploying two Subgraphs: one for the base indexing and one with grafting ## ベースサブグラフの起動 -1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-example` -2. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-example` folder from the repo -3. 終了後、サブグラフが正しくインデックスされていることを確認します。The Graph Playgroundで以下のコマンドを実行すると、サブグラフが正常にインデックスされます。 +1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a Subgraph on Sepolia testnet called `graft-example` +2. Follow the directions in the `AUTH & DEPLOY` section on your Subgraph page in the `graft-example` folder from the repo +3. Once finished, verify the Subgraph is indexing properly. If you run the following command in The Graph Playground ```graphql { @@ -138,16 +138,16 @@ The `base` and `block` values can be found by deploying two subgraphs: one for t } ``` -サブグラフが正しくインデックスされていることを確認したら、グラフティングで素早くサブグラフを更新することができます。 +Once you have verified the Subgraph is indexing properly, you can quickly update the Subgraph with grafting. ## グラフティングサブグラフの展開 グラフト置換されたsubgraph.yamlは、新しいコントラクトのアドレスを持つことになります。これは、ダンプを更新したり、コントラクトを再デプロイしたりしたときに起こりうることです。 -1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-replacement` -2. Create a new manifest. The `subgraph.yaml` for `graph-replacement` contains a different contract address and new information about how it should graft. These are the `block` of the [last event emitted](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452) you care about by the old contract and the `base` of the old subgraph. The `base` subgraph ID is the `Deployment ID` of your original `graph-example` subgraph. You can find this in Subgraph Studio. -3. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-replacement` folder from the repo -4. 終了後、サブグラフが正しくインデックスされていることを確認します。The Graph Playgroundで以下のコマンドを実行すると、サブグラフが正常にインデックスされます。 +1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a Subgraph on Sepolia testnet called `graft-replacement` +2. Create a new manifest. The `subgraph.yaml` for `graph-replacement` contains a different contract address and new information about how it should graft. These are the `block` of the [last event emitted](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452) you care about by the old contract and the `base` of the old Subgraph. The `base` Subgraph ID is the `Deployment ID` of your original `graph-example` Subgraph. You can find this in Subgraph Studio. +3. Follow the directions in the `AUTH & DEPLOY` section on your Subgraph page in the `graft-replacement` folder from the repo +4. Once finished, verify the Subgraph is indexing properly. If you run the following command in The Graph Playground ```graphql { @@ -185,9 +185,9 @@ The `base` and `block` values can be found by deploying two subgraphs: one for t } ``` -You can see that the `graft-replacement` subgraph is indexing from older `graph-example` data and newer data from the new contract address. The original contract emitted two `Withdrawal` events, [Event 1](https://sepolia.etherscan.io/tx/0xe8323d21c4f104607b10b0fff9fc24b9612b9488795dea8196b2d5f980d3dc1d) and [Event 2](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452). The new contract emitted one `Withdrawal` after, [Event 3](https://sepolia.etherscan.io/tx/0x2410475f76a44754bae66d293d14eac34f98ec03a3689cbbb56a716d20b209af). The two previously indexed transactions (Event 1 and 2) and the new transaction (Event 3) were combined together in the `graft-replacement` subgraph. +You can see that the `graft-replacement` Subgraph is indexing from older `graph-example` data and newer data from the new contract address. The original contract emitted two `Withdrawal` events, [Event 1](https://sepolia.etherscan.io/tx/0xe8323d21c4f104607b10b0fff9fc24b9612b9488795dea8196b2d5f980d3dc1d) and [Event 2](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452). The new contract emitted one `Withdrawal` after, [Event 3](https://sepolia.etherscan.io/tx/0x2410475f76a44754bae66d293d14eac34f98ec03a3689cbbb56a716d20b209af). The two previously indexed transactions (Event 1 and 2) and the new transaction (Event 3) were combined together in the `graft-replacement` Subgraph. -Congrats! You have successfully grafted a subgraph onto another subgraph. +Congrats! You have successfully grafted a Subgraph onto another Subgraph. ## その他のリソース From 3f260daf99896a63ea8bc8afeccc591f277c178f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:19:13 -0500 Subject: [PATCH 0612/1789] New translations grafting.mdx (Korean) --- .../pages/ko/subgraphs/cookbook/grafting.mdx | 50 +++++++++---------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/website/src/pages/ko/subgraphs/cookbook/grafting.mdx b/website/src/pages/ko/subgraphs/cookbook/grafting.mdx index 57d5169830a7..0a0d7b3bb370 100644 --- a/website/src/pages/ko/subgraphs/cookbook/grafting.mdx +++ b/website/src/pages/ko/subgraphs/cookbook/grafting.mdx @@ -2,13 +2,13 @@ title: Replace a Contract and Keep its History With Grafting --- -In this guide, you will learn how to build and deploy new subgraphs by grafting existing subgraphs. +In this guide, you will learn how to build and deploy new Subgraphs by grafting existing Subgraphs. ## What is Grafting? -Grafting reuses the data from an existing subgraph and starts indexing it at a later block. This is useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. Also, it can be used when adding a feature to a subgraph that takes long to index from scratch. +Grafting reuses the data from an existing Subgraph and starts indexing it at a later block. This is useful during development to get past simple errors in the mappings quickly or to temporarily get an existing Subgraph working again after it has failed. Also, it can be used when adding a feature to a Subgraph that takes long to index from scratch. -The grafted subgraph can use a GraphQL schema that is not identical to the one of the base subgraph, but merely compatible with it. It has to be a valid subgraph schema in its own right, but may deviate from the base subgraph's schema in the following ways: +The grafted Subgraph can use a GraphQL schema that is not identical to the one of the base Subgraph, but merely compatible with it. It has to be a valid Subgraph schema in its own right, but may deviate from the base Subgraph's schema in the following ways: - It adds or removes entity types - It removes attributes from entity types @@ -22,35 +22,35 @@ For more information, you can check: - [Grafting](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) -In this tutorial, we will be covering a basic use case. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing subgraph onto the "base" subgraph that tracks the new contract. +In this tutorial, we will be covering a basic use case. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing Subgraph onto the "base" Subgraph that tracks the new contract. ## Important Note on Grafting When Upgrading to the Network -> **Caution**: It is recommended to not use grafting for subgraphs published to The Graph Network +> **Caution**: It is recommended to not use grafting for Subgraphs published to The Graph Network ### Why Is This Important? -Grafting is a powerful feature that allows you to "graft" one subgraph onto another, effectively transferring historical data from the existing subgraph to a new version. It is not possible to graft a subgraph from The Graph Network back to Subgraph Studio. +Grafting is a powerful feature that allows you to "graft" one Subgraph onto another, effectively transferring historical data from the existing Subgraph to a new version. It is not possible to graft a Subgraph from The Graph Network back to Subgraph Studio. ### Best Practices -**Initial Migration**: when you first deploy your subgraph to the decentralized network, do so without grafting. Ensure that the subgraph is stable and functioning as expected. +**Initial Migration**: when you first deploy your Subgraph to the decentralized network, do so without grafting. Ensure that the Subgraph is stable and functioning as expected. -**Subsequent Updates**: once your subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. +**Subsequent Updates**: once your Subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. By adhering to these guidelines, you minimize risks and ensure a smoother migration process. ## Building an Existing Subgraph -Building subgraphs is an essential part of The Graph, described more in depth [here](/subgraphs/quick-start/). To be able to build and deploy the existing subgraph used in this tutorial, the following repo is provided: +Building Subgraphs is an essential part of The Graph, described more in depth [here](/subgraphs/quick-start/). To be able to build and deploy the existing Subgraph used in this tutorial, the following repo is provided: - [Subgraph example repo](https://github.com/Shiyasmohd/grafting-tutorial) -> Note: The contract used in the subgraph was taken from the following [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). +> Note: The contract used in the Subgraph was taken from the following [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). ## Subgraph Manifest Definition -The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest that you will use: +The Subgraph manifest `subgraph.yaml` identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest that you will use: ```yaml specVersion: 0.0.4 @@ -85,27 +85,27 @@ dataSources: ## Grafting Manifest Definition -Grafting requires adding two new items to the original subgraph manifest: +Grafting requires adding two new items to the original Subgraph manifest: ```yaml --- features: - grafting # feature name graft: - base: Qm... # subgraph ID of base subgraph + base: Qm... # Subgraph ID of base Subgraph block: 5956000 # block number ``` - `features:` is a list of all used [feature names](/developing/creating-a-subgraph/#experimental-features). -- `graft:` is a map of the `base` subgraph and the block to graft on to. The `block` is the block number to start indexing from. The Graph will copy the data of the base subgraph up to and including the given block and then continue indexing the new subgraph from that block on. +- `graft:` is a map of the `base` Subgraph and the block to graft on to. The `block` is the block number to start indexing from. The Graph will copy the data of the base Subgraph up to and including the given block and then continue indexing the new Subgraph from that block on. -The `base` and `block` values can be found by deploying two subgraphs: one for the base indexing and one with grafting +The `base` and `block` values can be found by deploying two Subgraphs: one for the base indexing and one with grafting ## Deploying the Base Subgraph -1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-example` -2. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-example` folder from the repo -3. Once finished, verify the subgraph is indexing properly. If you run the following command in The Graph Playground +1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a Subgraph on Sepolia testnet called `graft-example` +2. Follow the directions in the `AUTH & DEPLOY` section on your Subgraph page in the `graft-example` folder from the repo +3. Once finished, verify the Subgraph is indexing properly. If you run the following command in The Graph Playground ```graphql { @@ -138,16 +138,16 @@ It returns something like this: } ``` -Once you have verified the subgraph is indexing properly, you can quickly update the subgraph with grafting. +Once you have verified the Subgraph is indexing properly, you can quickly update the Subgraph with grafting. ## Deploying the Grafting Subgraph The graft replacement subgraph.yaml will have a new contract address. This could happen when you update your dapp, redeploy a contract, etc. -1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-replacement` -2. Create a new manifest. The `subgraph.yaml` for `graph-replacement` contains a different contract address and new information about how it should graft. These are the `block` of the [last event emitted](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452) you care about by the old contract and the `base` of the old subgraph. The `base` subgraph ID is the `Deployment ID` of your original `graph-example` subgraph. You can find this in Subgraph Studio. -3. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-replacement` folder from the repo -4. Once finished, verify the subgraph is indexing properly. If you run the following command in The Graph Playground +1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a Subgraph on Sepolia testnet called `graft-replacement` +2. Create a new manifest. The `subgraph.yaml` for `graph-replacement` contains a different contract address and new information about how it should graft. These are the `block` of the [last event emitted](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452) you care about by the old contract and the `base` of the old Subgraph. The `base` Subgraph ID is the `Deployment ID` of your original `graph-example` Subgraph. You can find this in Subgraph Studio. +3. Follow the directions in the `AUTH & DEPLOY` section on your Subgraph page in the `graft-replacement` folder from the repo +4. Once finished, verify the Subgraph is indexing properly. If you run the following command in The Graph Playground ```graphql { @@ -185,9 +185,9 @@ It should return the following: } ``` -You can see that the `graft-replacement` subgraph is indexing from older `graph-example` data and newer data from the new contract address. The original contract emitted two `Withdrawal` events, [Event 1](https://sepolia.etherscan.io/tx/0xe8323d21c4f104607b10b0fff9fc24b9612b9488795dea8196b2d5f980d3dc1d) and [Event 2](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452). The new contract emitted one `Withdrawal` after, [Event 3](https://sepolia.etherscan.io/tx/0x2410475f76a44754bae66d293d14eac34f98ec03a3689cbbb56a716d20b209af). The two previously indexed transactions (Event 1 and 2) and the new transaction (Event 3) were combined together in the `graft-replacement` subgraph. +You can see that the `graft-replacement` Subgraph is indexing from older `graph-example` data and newer data from the new contract address. The original contract emitted two `Withdrawal` events, [Event 1](https://sepolia.etherscan.io/tx/0xe8323d21c4f104607b10b0fff9fc24b9612b9488795dea8196b2d5f980d3dc1d) and [Event 2](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452). The new contract emitted one `Withdrawal` after, [Event 3](https://sepolia.etherscan.io/tx/0x2410475f76a44754bae66d293d14eac34f98ec03a3689cbbb56a716d20b209af). The two previously indexed transactions (Event 1 and 2) and the new transaction (Event 3) were combined together in the `graft-replacement` Subgraph. -Congrats! You have successfully grafted a subgraph onto another subgraph. +Congrats! You have successfully grafted a Subgraph onto another Subgraph. ## Additional Resources From 5e7477dec0848a7576efd7bed1647a353a61f491 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:19:14 -0500 Subject: [PATCH 0613/1789] New translations grafting.mdx (Dutch) --- .../pages/nl/subgraphs/cookbook/grafting.mdx | 50 +++++++++---------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/website/src/pages/nl/subgraphs/cookbook/grafting.mdx b/website/src/pages/nl/subgraphs/cookbook/grafting.mdx index 57d5169830a7..0a0d7b3bb370 100644 --- a/website/src/pages/nl/subgraphs/cookbook/grafting.mdx +++ b/website/src/pages/nl/subgraphs/cookbook/grafting.mdx @@ -2,13 +2,13 @@ title: Replace a Contract and Keep its History With Grafting --- -In this guide, you will learn how to build and deploy new subgraphs by grafting existing subgraphs. +In this guide, you will learn how to build and deploy new Subgraphs by grafting existing Subgraphs. ## What is Grafting? -Grafting reuses the data from an existing subgraph and starts indexing it at a later block. This is useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. Also, it can be used when adding a feature to a subgraph that takes long to index from scratch. +Grafting reuses the data from an existing Subgraph and starts indexing it at a later block. This is useful during development to get past simple errors in the mappings quickly or to temporarily get an existing Subgraph working again after it has failed. Also, it can be used when adding a feature to a Subgraph that takes long to index from scratch. -The grafted subgraph can use a GraphQL schema that is not identical to the one of the base subgraph, but merely compatible with it. It has to be a valid subgraph schema in its own right, but may deviate from the base subgraph's schema in the following ways: +The grafted Subgraph can use a GraphQL schema that is not identical to the one of the base Subgraph, but merely compatible with it. It has to be a valid Subgraph schema in its own right, but may deviate from the base Subgraph's schema in the following ways: - It adds or removes entity types - It removes attributes from entity types @@ -22,35 +22,35 @@ For more information, you can check: - [Grafting](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) -In this tutorial, we will be covering a basic use case. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing subgraph onto the "base" subgraph that tracks the new contract. +In this tutorial, we will be covering a basic use case. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing Subgraph onto the "base" Subgraph that tracks the new contract. ## Important Note on Grafting When Upgrading to the Network -> **Caution**: It is recommended to not use grafting for subgraphs published to The Graph Network +> **Caution**: It is recommended to not use grafting for Subgraphs published to The Graph Network ### Why Is This Important? -Grafting is a powerful feature that allows you to "graft" one subgraph onto another, effectively transferring historical data from the existing subgraph to a new version. It is not possible to graft a subgraph from The Graph Network back to Subgraph Studio. +Grafting is a powerful feature that allows you to "graft" one Subgraph onto another, effectively transferring historical data from the existing Subgraph to a new version. It is not possible to graft a Subgraph from The Graph Network back to Subgraph Studio. ### Best Practices -**Initial Migration**: when you first deploy your subgraph to the decentralized network, do so without grafting. Ensure that the subgraph is stable and functioning as expected. +**Initial Migration**: when you first deploy your Subgraph to the decentralized network, do so without grafting. Ensure that the Subgraph is stable and functioning as expected. -**Subsequent Updates**: once your subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. +**Subsequent Updates**: once your Subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. By adhering to these guidelines, you minimize risks and ensure a smoother migration process. ## Building an Existing Subgraph -Building subgraphs is an essential part of The Graph, described more in depth [here](/subgraphs/quick-start/). To be able to build and deploy the existing subgraph used in this tutorial, the following repo is provided: +Building Subgraphs is an essential part of The Graph, described more in depth [here](/subgraphs/quick-start/). To be able to build and deploy the existing Subgraph used in this tutorial, the following repo is provided: - [Subgraph example repo](https://github.com/Shiyasmohd/grafting-tutorial) -> Note: The contract used in the subgraph was taken from the following [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). +> Note: The contract used in the Subgraph was taken from the following [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). ## Subgraph Manifest Definition -The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest that you will use: +The Subgraph manifest `subgraph.yaml` identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest that you will use: ```yaml specVersion: 0.0.4 @@ -85,27 +85,27 @@ dataSources: ## Grafting Manifest Definition -Grafting requires adding two new items to the original subgraph manifest: +Grafting requires adding two new items to the original Subgraph manifest: ```yaml --- features: - grafting # feature name graft: - base: Qm... # subgraph ID of base subgraph + base: Qm... # Subgraph ID of base Subgraph block: 5956000 # block number ``` - `features:` is a list of all used [feature names](/developing/creating-a-subgraph/#experimental-features). -- `graft:` is a map of the `base` subgraph and the block to graft on to. The `block` is the block number to start indexing from. The Graph will copy the data of the base subgraph up to and including the given block and then continue indexing the new subgraph from that block on. +- `graft:` is a map of the `base` Subgraph and the block to graft on to. The `block` is the block number to start indexing from. The Graph will copy the data of the base Subgraph up to and including the given block and then continue indexing the new Subgraph from that block on. -The `base` and `block` values can be found by deploying two subgraphs: one for the base indexing and one with grafting +The `base` and `block` values can be found by deploying two Subgraphs: one for the base indexing and one with grafting ## Deploying the Base Subgraph -1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-example` -2. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-example` folder from the repo -3. Once finished, verify the subgraph is indexing properly. If you run the following command in The Graph Playground +1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a Subgraph on Sepolia testnet called `graft-example` +2. Follow the directions in the `AUTH & DEPLOY` section on your Subgraph page in the `graft-example` folder from the repo +3. Once finished, verify the Subgraph is indexing properly. If you run the following command in The Graph Playground ```graphql { @@ -138,16 +138,16 @@ It returns something like this: } ``` -Once you have verified the subgraph is indexing properly, you can quickly update the subgraph with grafting. +Once you have verified the Subgraph is indexing properly, you can quickly update the Subgraph with grafting. ## Deploying the Grafting Subgraph The graft replacement subgraph.yaml will have a new contract address. This could happen when you update your dapp, redeploy a contract, etc. -1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-replacement` -2. Create a new manifest. The `subgraph.yaml` for `graph-replacement` contains a different contract address and new information about how it should graft. These are the `block` of the [last event emitted](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452) you care about by the old contract and the `base` of the old subgraph. The `base` subgraph ID is the `Deployment ID` of your original `graph-example` subgraph. You can find this in Subgraph Studio. -3. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-replacement` folder from the repo -4. Once finished, verify the subgraph is indexing properly. If you run the following command in The Graph Playground +1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a Subgraph on Sepolia testnet called `graft-replacement` +2. Create a new manifest. The `subgraph.yaml` for `graph-replacement` contains a different contract address and new information about how it should graft. These are the `block` of the [last event emitted](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452) you care about by the old contract and the `base` of the old Subgraph. The `base` Subgraph ID is the `Deployment ID` of your original `graph-example` Subgraph. You can find this in Subgraph Studio. +3. Follow the directions in the `AUTH & DEPLOY` section on your Subgraph page in the `graft-replacement` folder from the repo +4. Once finished, verify the Subgraph is indexing properly. If you run the following command in The Graph Playground ```graphql { @@ -185,9 +185,9 @@ It should return the following: } ``` -You can see that the `graft-replacement` subgraph is indexing from older `graph-example` data and newer data from the new contract address. The original contract emitted two `Withdrawal` events, [Event 1](https://sepolia.etherscan.io/tx/0xe8323d21c4f104607b10b0fff9fc24b9612b9488795dea8196b2d5f980d3dc1d) and [Event 2](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452). The new contract emitted one `Withdrawal` after, [Event 3](https://sepolia.etherscan.io/tx/0x2410475f76a44754bae66d293d14eac34f98ec03a3689cbbb56a716d20b209af). The two previously indexed transactions (Event 1 and 2) and the new transaction (Event 3) were combined together in the `graft-replacement` subgraph. +You can see that the `graft-replacement` Subgraph is indexing from older `graph-example` data and newer data from the new contract address. The original contract emitted two `Withdrawal` events, [Event 1](https://sepolia.etherscan.io/tx/0xe8323d21c4f104607b10b0fff9fc24b9612b9488795dea8196b2d5f980d3dc1d) and [Event 2](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452). The new contract emitted one `Withdrawal` after, [Event 3](https://sepolia.etherscan.io/tx/0x2410475f76a44754bae66d293d14eac34f98ec03a3689cbbb56a716d20b209af). The two previously indexed transactions (Event 1 and 2) and the new transaction (Event 3) were combined together in the `graft-replacement` Subgraph. -Congrats! You have successfully grafted a subgraph onto another subgraph. +Congrats! You have successfully grafted a Subgraph onto another Subgraph. ## Additional Resources From ad31f657efb67dd9f84f5c2d0e40e26587f8f7cc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:19:15 -0500 Subject: [PATCH 0614/1789] New translations grafting.mdx (Polish) --- .../pages/pl/subgraphs/cookbook/grafting.mdx | 50 +++++++++---------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/website/src/pages/pl/subgraphs/cookbook/grafting.mdx b/website/src/pages/pl/subgraphs/cookbook/grafting.mdx index 57d5169830a7..0a0d7b3bb370 100644 --- a/website/src/pages/pl/subgraphs/cookbook/grafting.mdx +++ b/website/src/pages/pl/subgraphs/cookbook/grafting.mdx @@ -2,13 +2,13 @@ title: Replace a Contract and Keep its History With Grafting --- -In this guide, you will learn how to build and deploy new subgraphs by grafting existing subgraphs. +In this guide, you will learn how to build and deploy new Subgraphs by grafting existing Subgraphs. ## What is Grafting? -Grafting reuses the data from an existing subgraph and starts indexing it at a later block. This is useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. Also, it can be used when adding a feature to a subgraph that takes long to index from scratch. +Grafting reuses the data from an existing Subgraph and starts indexing it at a later block. This is useful during development to get past simple errors in the mappings quickly or to temporarily get an existing Subgraph working again after it has failed. Also, it can be used when adding a feature to a Subgraph that takes long to index from scratch. -The grafted subgraph can use a GraphQL schema that is not identical to the one of the base subgraph, but merely compatible with it. It has to be a valid subgraph schema in its own right, but may deviate from the base subgraph's schema in the following ways: +The grafted Subgraph can use a GraphQL schema that is not identical to the one of the base Subgraph, but merely compatible with it. It has to be a valid Subgraph schema in its own right, but may deviate from the base Subgraph's schema in the following ways: - It adds or removes entity types - It removes attributes from entity types @@ -22,35 +22,35 @@ For more information, you can check: - [Grafting](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) -In this tutorial, we will be covering a basic use case. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing subgraph onto the "base" subgraph that tracks the new contract. +In this tutorial, we will be covering a basic use case. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing Subgraph onto the "base" Subgraph that tracks the new contract. ## Important Note on Grafting When Upgrading to the Network -> **Caution**: It is recommended to not use grafting for subgraphs published to The Graph Network +> **Caution**: It is recommended to not use grafting for Subgraphs published to The Graph Network ### Why Is This Important? -Grafting is a powerful feature that allows you to "graft" one subgraph onto another, effectively transferring historical data from the existing subgraph to a new version. It is not possible to graft a subgraph from The Graph Network back to Subgraph Studio. +Grafting is a powerful feature that allows you to "graft" one Subgraph onto another, effectively transferring historical data from the existing Subgraph to a new version. It is not possible to graft a Subgraph from The Graph Network back to Subgraph Studio. ### Best Practices -**Initial Migration**: when you first deploy your subgraph to the decentralized network, do so without grafting. Ensure that the subgraph is stable and functioning as expected. +**Initial Migration**: when you first deploy your Subgraph to the decentralized network, do so without grafting. Ensure that the Subgraph is stable and functioning as expected. -**Subsequent Updates**: once your subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. +**Subsequent Updates**: once your Subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. By adhering to these guidelines, you minimize risks and ensure a smoother migration process. ## Building an Existing Subgraph -Building subgraphs is an essential part of The Graph, described more in depth [here](/subgraphs/quick-start/). To be able to build and deploy the existing subgraph used in this tutorial, the following repo is provided: +Building Subgraphs is an essential part of The Graph, described more in depth [here](/subgraphs/quick-start/). To be able to build and deploy the existing Subgraph used in this tutorial, the following repo is provided: - [Subgraph example repo](https://github.com/Shiyasmohd/grafting-tutorial) -> Note: The contract used in the subgraph was taken from the following [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). +> Note: The contract used in the Subgraph was taken from the following [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). ## Subgraph Manifest Definition -The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest that you will use: +The Subgraph manifest `subgraph.yaml` identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest that you will use: ```yaml specVersion: 0.0.4 @@ -85,27 +85,27 @@ dataSources: ## Grafting Manifest Definition -Grafting requires adding two new items to the original subgraph manifest: +Grafting requires adding two new items to the original Subgraph manifest: ```yaml --- features: - grafting # feature name graft: - base: Qm... # subgraph ID of base subgraph + base: Qm... # Subgraph ID of base Subgraph block: 5956000 # block number ``` - `features:` is a list of all used [feature names](/developing/creating-a-subgraph/#experimental-features). -- `graft:` is a map of the `base` subgraph and the block to graft on to. The `block` is the block number to start indexing from. The Graph will copy the data of the base subgraph up to and including the given block and then continue indexing the new subgraph from that block on. +- `graft:` is a map of the `base` Subgraph and the block to graft on to. The `block` is the block number to start indexing from. The Graph will copy the data of the base Subgraph up to and including the given block and then continue indexing the new Subgraph from that block on. -The `base` and `block` values can be found by deploying two subgraphs: one for the base indexing and one with grafting +The `base` and `block` values can be found by deploying two Subgraphs: one for the base indexing and one with grafting ## Deploying the Base Subgraph -1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-example` -2. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-example` folder from the repo -3. Once finished, verify the subgraph is indexing properly. If you run the following command in The Graph Playground +1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a Subgraph on Sepolia testnet called `graft-example` +2. Follow the directions in the `AUTH & DEPLOY` section on your Subgraph page in the `graft-example` folder from the repo +3. Once finished, verify the Subgraph is indexing properly. If you run the following command in The Graph Playground ```graphql { @@ -138,16 +138,16 @@ It returns something like this: } ``` -Once you have verified the subgraph is indexing properly, you can quickly update the subgraph with grafting. +Once you have verified the Subgraph is indexing properly, you can quickly update the Subgraph with grafting. ## Deploying the Grafting Subgraph The graft replacement subgraph.yaml will have a new contract address. This could happen when you update your dapp, redeploy a contract, etc. -1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-replacement` -2. Create a new manifest. The `subgraph.yaml` for `graph-replacement` contains a different contract address and new information about how it should graft. These are the `block` of the [last event emitted](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452) you care about by the old contract and the `base` of the old subgraph. The `base` subgraph ID is the `Deployment ID` of your original `graph-example` subgraph. You can find this in Subgraph Studio. -3. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-replacement` folder from the repo -4. Once finished, verify the subgraph is indexing properly. If you run the following command in The Graph Playground +1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a Subgraph on Sepolia testnet called `graft-replacement` +2. Create a new manifest. The `subgraph.yaml` for `graph-replacement` contains a different contract address and new information about how it should graft. These are the `block` of the [last event emitted](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452) you care about by the old contract and the `base` of the old Subgraph. The `base` Subgraph ID is the `Deployment ID` of your original `graph-example` Subgraph. You can find this in Subgraph Studio. +3. Follow the directions in the `AUTH & DEPLOY` section on your Subgraph page in the `graft-replacement` folder from the repo +4. Once finished, verify the Subgraph is indexing properly. If you run the following command in The Graph Playground ```graphql { @@ -185,9 +185,9 @@ It should return the following: } ``` -You can see that the `graft-replacement` subgraph is indexing from older `graph-example` data and newer data from the new contract address. The original contract emitted two `Withdrawal` events, [Event 1](https://sepolia.etherscan.io/tx/0xe8323d21c4f104607b10b0fff9fc24b9612b9488795dea8196b2d5f980d3dc1d) and [Event 2](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452). The new contract emitted one `Withdrawal` after, [Event 3](https://sepolia.etherscan.io/tx/0x2410475f76a44754bae66d293d14eac34f98ec03a3689cbbb56a716d20b209af). The two previously indexed transactions (Event 1 and 2) and the new transaction (Event 3) were combined together in the `graft-replacement` subgraph. +You can see that the `graft-replacement` Subgraph is indexing from older `graph-example` data and newer data from the new contract address. The original contract emitted two `Withdrawal` events, [Event 1](https://sepolia.etherscan.io/tx/0xe8323d21c4f104607b10b0fff9fc24b9612b9488795dea8196b2d5f980d3dc1d) and [Event 2](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452). The new contract emitted one `Withdrawal` after, [Event 3](https://sepolia.etherscan.io/tx/0x2410475f76a44754bae66d293d14eac34f98ec03a3689cbbb56a716d20b209af). The two previously indexed transactions (Event 1 and 2) and the new transaction (Event 3) were combined together in the `graft-replacement` Subgraph. -Congrats! You have successfully grafted a subgraph onto another subgraph. +Congrats! You have successfully grafted a Subgraph onto another Subgraph. ## Additional Resources From 527e6ee5f031762da800b3e4fd0dfa1530769a1a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:19:16 -0500 Subject: [PATCH 0615/1789] New translations grafting.mdx (Portuguese) --- .../pages/pt/subgraphs/cookbook/grafting.mdx | 50 +++++++++---------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/website/src/pages/pt/subgraphs/cookbook/grafting.mdx b/website/src/pages/pt/subgraphs/cookbook/grafting.mdx index cbfc42ddc895..dc45f606ef40 100644 --- a/website/src/pages/pt/subgraphs/cookbook/grafting.mdx +++ b/website/src/pages/pt/subgraphs/cookbook/grafting.mdx @@ -2,13 +2,13 @@ title: Como Substituir um Contrato e Manter a sua História com Enxertos --- -Neste guia, aprenda como construir e lançar novos subgraphs com o enxerto de subgraphs existentes. +In this guide, you will learn how to build and deploy new Subgraphs by grafting existing Subgraphs. ## O que é Enxerto? -O processo de enxerto reutiliza os dados de um subgraph existente e o indexa em um bloco seguinte. Isto é útil durante o desenvolvimento para rapidamente superar erros simples nos mapeamentos, ou fazer um subgraph existente funcionar temporariamente após ele ter falhado. Ele também pode ser usado ao adicionar uma característica a um subgraph que demora para ser indexado do zero. +Grafting reuses the data from an existing Subgraph and starts indexing it at a later block. This is useful during development to get past simple errors in the mappings quickly or to temporarily get an existing Subgraph working again after it has failed. Also, it can be used when adding a feature to a Subgraph that takes long to index from scratch. -O subgraph enxertado pode usar um schema GraphQL que não é idêntico ao schema do subgraph base, mas é apenas compatível com ele. Ele deve ser um schema válido no seu próprio mérito, mas pode desviar do schema do subgraph base nas seguintes maneiras: +The grafted Subgraph can use a GraphQL schema that is not identical to the one of the base Subgraph, but merely compatible with it. It has to be a valid Subgraph schema in its own right, but may deviate from the base Subgraph's schema in the following ways: - Ele adiciona ou remove tipos de entidade - Ele retira atributos de tipos de entidade @@ -22,35 +22,35 @@ Para mais informações, confira: - [Enxertos](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) -Neste tutorial, cobriremos um caso de uso básico. Substituiremos um contrato existente com um contrato idêntico (com um novo endereço, mas o mesmo código). Depois, enxertaremos o subgraph existente ao subgraph "base" que rastreará o novo contrato. +In this tutorial, we will be covering a basic use case. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing Subgraph onto the "base" Subgraph that tracks the new contract. ## Notas Importantes sobre Enxertos ao Migrar Para a Graph Network -> **Cuidado**: Não é recomendado usar enxertos para subgraphs editados na The Graph Network +> **Caution**: It is recommended to not use grafting for Subgraphs published to The Graph Network ### Qual a Importância Disto? -Isto é um recurso poderoso que permite que os programadores "enxertem" um subgraph em outro, o que, efetivamente, transfere dados históricos do subgraph existente até uma versão nova. Não é possível enxertar um subgraph da Graph Network de volta ao Subgraph Studio. +Grafting is a powerful feature that allows you to "graft" one Subgraph onto another, effectively transferring historical data from the existing Subgraph to a new version. It is not possible to graft a Subgraph from The Graph Network back to Subgraph Studio. ### Boas práticas -**Migração Inicial**: Ao implantar o seu subgraph pela primeira vez na rede descentralizada, faça-o sem enxertos. Verifique se o subgraph está estável e funciona como esperado. +**Initial Migration**: when you first deploy your Subgraph to the decentralized network, do so without grafting. Ensure that the Subgraph is stable and functioning as expected. -**Atualizações Subsequentes**: quando o seu subgraph estiver ativo e estável na rede descentralizada, será possível usar enxertos para versões futuras, para tornar a transição mais suave e preservar dados históricos. +**Subsequent Updates**: once your Subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. Ao aderir a estas diretrizes, dá para minimizar riscos e garantir um processo de migração mais suave. ## Como Construir um Subgraph Existente -Construir subgraphs é uma parte essencial do The Graph, descrita mais profundamente [aqui](/subgraphs/quick-start/). Para poder construir e implementar o subgraph existente usado neste tutorial, veja o seguinte repositório: +Building Subgraphs is an essential part of The Graph, described more in depth [here](/subgraphs/quick-start/). To be able to build and deploy the existing Subgraph used in this tutorial, the following repo is provided: - [Exemplo de repositório de subgraph](https://github.com/Shiyasmohd/grafting-tutorial) -> Nota: O contrato usado no subgraph foi tirado do seguinte [Kit para Iniciantes de Hackathon](https://github.com/schmidsi/hackathon-starterkit). +> Note: The contract used in the Subgraph was taken from the following [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). ## Definição de Manifest de Subgraph -O manifest do subgraph `subgraph.yaml` identifica as fontes de dados para o subgraph, os gatilhos de interesse, e as funções que devem ser executadas em resposta a esses gatilhos. Veja abaixo um exemplo de manifesto de subgraph para usar: +The Subgraph manifest `subgraph.yaml` identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest that you will use: ```yaml specVersion: 0.0.4 @@ -85,27 +85,27 @@ dataSources: ## Definição de Manifest de Enxertos -Enxertos exigem a adição de dois novos itens ao manifest do subgraph original: +Grafting requires adding two new items to the original Subgraph manifest: ```yaml --- features: - grafting # feature name graft: - base: Qm... # subgraph ID of base subgraph + base: Qm... # Subgraph ID of base Subgraph block: 5956000 # block number ``` - `features:` é uma lista de todos os [nomes de função](/developing/creating-a-subgraph/#experimental-features) usados. -- `graft:` é um mapa do subgraph `base` e ​​do bloco para enxertar. `block` é o número do bloco para começar a indexação. The Graph copiará os dados do subgraph base até, e incluindo, o bloco fornecido, e então continuará a indexar o novo subgraph a partir desse bloco em diante. +- `graft:` is a map of the `base` Subgraph and the block to graft on to. The `block` is the block number to start indexing from. The Graph will copy the data of the base Subgraph up to and including the given block and then continue indexing the new Subgraph from that block on. -Os valores `base` e ​​`block` podem ser encontrados com a implantação de dois subgraphs: um para indexação de base e outro com enxerto +The `base` and `block` values can be found by deploying two Subgraphs: one for the base indexing and one with grafting ## Como Lançar o Subgraph Base -1. Vá para o [Subgraph Studio](https://thegraph.com/studio/) e crie um subgraph na testnet da Sepolia chamado `graft-example` -2. Siga as direções na seção `AUTH & DEPLOY` na sua página de subgraph, na pasta `graft-example` do repositório -3. Ao terminar, verifique que o subgraph está a indexar corretamente. Se executar o seguinte comando no The Graph Playground +1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a Subgraph on Sepolia testnet called `graft-example` +2. Follow the directions in the `AUTH & DEPLOY` section on your Subgraph page in the `graft-example` folder from the repo +3. Once finished, verify the Subgraph is indexing properly. If you run the following command in The Graph Playground ```graphql { @@ -138,16 +138,16 @@ Ele deve retornar algo assim: } ``` -Após verificar que o subgraph está a indexar corretamente, será possível atualizar rapidamente o subgraph com o enxerto. +Once you have verified the Subgraph is indexing properly, you can quickly update the Subgraph with grafting. ## Como Lançar o Subgraph de Enxerto O subgraph.yaml do substituto terá um novo endereço de contrato. Isto pode acontecer quando atualizar o seu dapp, relançar um contrato, etc. -1. Vá para o [Subgraph Studio](https://thegraph.com/studio/) e crie um subgraph na testnet da Sepolia chamado `graft-replacement` -2. Crie um novo manifesto. O `subgraph.yaml` para `graph-replacement` contém um endereço de contrato diferente e novas informações sobre como ele deve enxertar. Estes são o `block` do [último evento importante emitido](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452) pelo contrato antigo, e o `base` do subgraph antigo. A ID de subgraph `base` é a `Deployment ID` do seu subgraph `graph-example` original. Você pode encontrá-la no Subgraph Studio. -3. Siga as instruções na seção `AUTH & DEPLOY` da sua página de subgraph, na pasta `graft-replacement` do repositório -4. Ao terminar, verifique que o subgraph está a indexar corretamente. Se executar o seguinte comando no The Graph Playground +1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a Subgraph on Sepolia testnet called `graft-replacement` +2. Create a new manifest. The `subgraph.yaml` for `graph-replacement` contains a different contract address and new information about how it should graft. These are the `block` of the [last event emitted](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452) you care about by the old contract and the `base` of the old Subgraph. The `base` Subgraph ID is the `Deployment ID` of your original `graph-example` Subgraph. You can find this in Subgraph Studio. +3. Follow the directions in the `AUTH & DEPLOY` section on your Subgraph page in the `graft-replacement` folder from the repo +4. Once finished, verify the Subgraph is indexing properly. If you run the following command in The Graph Playground ```graphql { @@ -185,9 +185,9 @@ Ele deve retornar algo como: } ``` -Repare que o subgraph `graft-replacement` está a indexar a partir de dados `graph-example` mais antigos e dados mais novos do novo endereço de contrato. O contrato original emitiu dois eventos `Withdrawal`: [Evento 1](https://sepolia.etherscan.io/tx/0xe8323d21c4f104607b10b0fff9fc24b9612b9488795dea8196b2d5f980d3dc1d) e [Evento 2](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452). O novo contrato emitiu um `Withdrawal` após, [Evento 3](https://sepolia.etherscan.io/tx/0x2410475f76a44754bae66d293d14eac34f98ec03a3689cbbb56a716d20b209af). As duas transações indexadas anteriormente (Evento 1 e 2) e a nova transação (Evento 3) foram combinadas no subgraph `graft-replacement`. +You can see that the `graft-replacement` Subgraph is indexing from older `graph-example` data and newer data from the new contract address. The original contract emitted two `Withdrawal` events, [Event 1](https://sepolia.etherscan.io/tx/0xe8323d21c4f104607b10b0fff9fc24b9612b9488795dea8196b2d5f980d3dc1d) and [Event 2](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452). The new contract emitted one `Withdrawal` after, [Event 3](https://sepolia.etherscan.io/tx/0x2410475f76a44754bae66d293d14eac34f98ec03a3689cbbb56a716d20b209af). The two previously indexed transactions (Event 1 and 2) and the new transaction (Event 3) were combined together in the `graft-replacement` Subgraph. -Parabéns! Enxertaste um subgraph em outro subgraph. +Congrats! You have successfully grafted a Subgraph onto another Subgraph. ## Outros Recursos From c624d1d3c780ce030d519aa998a0e07cb95b29b5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:19:17 -0500 Subject: [PATCH 0616/1789] New translations grafting.mdx (Russian) --- .../pages/ru/subgraphs/cookbook/grafting.mdx | 54 +++++++++---------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/website/src/pages/ru/subgraphs/cookbook/grafting.mdx b/website/src/pages/ru/subgraphs/cookbook/grafting.mdx index 8605468ff4e7..ee4c1e36097e 100644 --- a/website/src/pages/ru/subgraphs/cookbook/grafting.mdx +++ b/website/src/pages/ru/subgraphs/cookbook/grafting.mdx @@ -2,13 +2,13 @@ title: Замените контракт и сохраните его историю с помощью Grafting --- -Из этого руководства Вы узнаете, как создавать и развертывать новые субграфы путем графтинга (переноса) существующих субграфов. +In this guide, you will learn how to build and deploy new Subgraphs by grafting existing Subgraphs. ## Что такое Grafting? -При графтинге (переносе) повторно используются данные из существующего субрафа и начинается их индексация в более позднем блоке. Это может быть полезно в период разработки, чтобы быстро устранить простые ошибки в мэппинге или временно восстановить работу существующего субграфа после его сбоя. Кроме того, его можно использовать при добавлении в субграф функции, индексация которой с нуля занимает много времени. +Grafting reuses the data from an existing Subgraph and starts indexing it at a later block. This is useful during development to get past simple errors in the mappings quickly or to temporarily get an existing Subgraph working again after it has failed. Also, it can be used when adding a feature to a Subgraph that takes long to index from scratch. -Перенесённый субграф может использовать схему GraphQL, которая не идентична схеме базового субграфа, а просто совместима с ней. Это должна быть автономно действующая схема субграфа, но она может отличаться от схемы базового субграфа следующим образом: +The grafted Subgraph can use a GraphQL schema that is not identical to the one of the base Subgraph, but merely compatible with it. It has to be a valid Subgraph schema in its own right, but may deviate from the base Subgraph's schema in the following ways: - Она добавляет или удаляет типы объектов - Она удаляет атрибуты из типов объектов @@ -22,35 +22,35 @@ title: Замените контракт и сохраните его истор - [Графтинг](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) -В этом руководстве мы рассмотрим базовый случай использования. Мы заменим существующий контракт идентичным (с новым адресом, но с тем же кодом). Затем подключим существующий субграф к "базовому" субграфу, который отслеживает новый контракт. +In this tutorial, we will be covering a basic use case. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing Subgraph onto the "base" Subgraph that tracks the new contract. ## Важное примечание о Grafting при обновлении до сети -> **Предупреждение**: Рекомендуется не использовать графтинг для субграфов, опубликованных в сети The Graph +> **Caution**: It is recommended to not use grafting for Subgraphs published to The Graph Network ### Почему это важно? -Grafting — это мощная функция, которая позволяет «переносить» один субграф в другой, фактически перенося исторические данные из существующего субграфа в новую версию. Однако перенос субграфа из The Graph Network обратно в Subgraph Studio невозможен. +Grafting is a powerful feature that allows you to "graft" one Subgraph onto another, effectively transferring historical data from the existing Subgraph to a new version. It is not possible to graft a Subgraph from The Graph Network back to Subgraph Studio. ### Лучшие практики -**Первоначальная миграция**: при первом развертывании субграфа в децентрализованной сети рекомендуется не использовать графтинг. Убедитесь, что субграф стабилен и работает должным образом. +**Initial Migration**: when you first deploy your Subgraph to the decentralized network, do so without grafting. Ensure that the Subgraph is stable and functioning as expected. -**Последующие обновления**: когда Ваш субграф будет развернут и стабилен в децентрализованной сети, Вы можете использовать графтинг для будущих версий, чтобы облегчить переход и сохранить исторические данные. +**Subsequent Updates**: once your Subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. Соблюдая эти рекомендации, Вы минимизируете риски и обеспечите более плавный процесс миграции. ## Создание существующего субграфа -Создание субграфов — это важная часть работы с The Graph, более подробно описанная [здесь](/subgraphs/quick-start/). Для того чтобы иметь возможность создать и развернуть существующий субграф, используемый в этом руководстве, предоставлен следующий репозиторий: +Building Subgraphs is an essential part of The Graph, described more in depth [here](/subgraphs/quick-start/). To be able to build and deploy the existing Subgraph used in this tutorial, the following repo is provided: - [Пример репозитория субграфа](https://github.com/Shiyasmohd/grafting-tutorial) -> Примечание: Контракт, используемый в субграфе, был взят из следующего [стартового набора Hackathon](https://github.com/schmidsi/hackathon-starterkit). +> Note: The contract used in the Subgraph was taken from the following [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). ## Определение манифеста субграфа -Манифест субграфа `subgraph.yaml` определяет источники данных для субграфа, триггеры, которые представляют интерес, и функции, которые должны быть выполнены в ответ на эти триггеры. Ниже приведен пример манифеста субграфа, который Вы будете использовать: +The Subgraph manifest `subgraph.yaml` identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest that you will use: ```yaml specVersion: 0.0.4 @@ -85,27 +85,27 @@ dataSources: ## Определение Манифеста Grafting -Grafting требует добавления двух новых элементов в исходный манифест субграфа: +Grafting requires adding two new items to the original Subgraph manifest: ```yaml --- features: - - grafting # наименование функции + - grafting # feature name graft: - base: Qm... # идентификатор субграфа базового субграфа - block: 5956000 # номер блока + base: Qm... # Subgraph ID of base Subgraph + block: 5956000 # block number ``` - `features:` — это список всех используемых [имен функций](/developing/creating-a-subgraph/#experimental-features). -- `graft:` — это отображение базового субграфа и блока, к которому применяется графтинг (перенос). `block` — это номер блока, с которого нужно начать индексирование. The Graph скопирует данные из базового субграфа до указанного блока включительно, а затем продолжит индексирование нового субграфа с этого блока. +- `graft:` is a map of the `base` Subgraph and the block to graft on to. The `block` is the block number to start indexing from. The Graph will copy the data of the base Subgraph up to and including the given block and then continue indexing the new Subgraph from that block on. -Значения `base` и `block` можно найти, развернув два субграфа: один для базового индексирования и один с графтингом +The `base` and `block` values can be found by deploying two Subgraphs: one for the base indexing and one with grafting ## Развертывание базового субграфа -1. Перейдите в [Subgraph Studio](https://thegraph.com/studio/) и создайте субграф в тестнете Sepolia с названием `graft-example` -2. Следуйте инструкциям в разделе `AUTH & DEPLOY` на странице своего субграфа в папке `graft-example` репозитория -3. После завершения убедитесь, что субграф правильно индексируется. Если Вы запустите следующую команду в The Graph Playground +1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a Subgraph on Sepolia testnet called `graft-example` +2. Follow the directions in the `AUTH & DEPLOY` section on your Subgraph page in the `graft-example` folder from the repo +3. Once finished, verify the Subgraph is indexing properly. If you run the following command in The Graph Playground ```graphql { @@ -138,16 +138,16 @@ graft: } ``` -Убедившись, что субграф индексируется правильно, Вы можете быстро обновить его с помощью графтинга. +Once you have verified the Subgraph is indexing properly, you can quickly update the Subgraph with grafting. ## Развертывание grafting субграфа Замененный subgraph.yaml будет иметь новый адрес контракта. Это может произойти, когда Вы обновите свое децентрализованное приложение, повторно развернете контракт и т. д. -1. Перейдите в [Subgraph Studio](https://thegraph.com/studio/) и создайте субграф в тестнете Sepolia с названием `graft-replacement` -2. Создайте новый манифест. `subgraph.yaml` для `graph-replacement` содержит другой адрес контракта и новую информацию о том, как он должен быть присоединен. Это `block` [последнего сгенерированного события], которое Вас интересует, вызванного старым контрактом, и `base` старого субграфа. Идентификатор субграфа `base` — это `Deployment ID` Вашего исходного субграфа `graph-example`. Вы можете найти его в Subgraph Studio. -3. Следуйте инструкциям в разделе `AUTH & DEPLOY` на странице своего субграфа в папке `graft-replacement` репозитория -4. После завершения убедитесь, что субграф правильно индексируется. Если Вы запустите следующую команду в The Graph Playground +1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a Subgraph on Sepolia testnet called `graft-replacement` +2. Create a new manifest. The `subgraph.yaml` for `graph-replacement` contains a different contract address and new information about how it should graft. These are the `block` of the [last event emitted](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452) you care about by the old contract and the `base` of the old Subgraph. The `base` Subgraph ID is the `Deployment ID` of your original `graph-example` Subgraph. You can find this in Subgraph Studio. +3. Follow the directions in the `AUTH & DEPLOY` section on your Subgraph page in the `graft-replacement` folder from the repo +4. Once finished, verify the Subgraph is indexing properly. If you run the following command in The Graph Playground ```graphql { @@ -185,9 +185,9 @@ graft: } ``` -Вы можете увидеть, что субграф `graft-replacement` индексирует данные как из старого субграфа `graph-example`, так и из новых данных из нового адреса контракта. Исходный контракт сгенерировал два события `Withdrawal`, [Событие 1](https://sepolia.etherscan.io/tx/0xe8323d21c4f104607b10b0fff9fc24b9612b9488795dea8196b2d5f980d3dc1d) и [Событие 2](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452). Новый контракт сгенерировал одно событие `Withdrawal` после этого, [Событие 3](https://sepolia.etherscan.io/tx/0x2410475f76a44754bae66d293d14eac34f98ec03a3689cbbb56a716d20b209af). Две ранее индексируемые транзакции (События 1 и 2) и новая транзакция (Событие 3) были объединены в субграфе `graft-replacement`. +You can see that the `graft-replacement` Subgraph is indexing from older `graph-example` data and newer data from the new contract address. The original contract emitted two `Withdrawal` events, [Event 1](https://sepolia.etherscan.io/tx/0xe8323d21c4f104607b10b0fff9fc24b9612b9488795dea8196b2d5f980d3dc1d) and [Event 2](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452). The new contract emitted one `Withdrawal` after, [Event 3](https://sepolia.etherscan.io/tx/0x2410475f76a44754bae66d293d14eac34f98ec03a3689cbbb56a716d20b209af). The two previously indexed transactions (Event 1 and 2) and the new transaction (Event 3) were combined together in the `graft-replacement` Subgraph. -Поздравляем! Вы успешно перенесли один субграф в другой. +Congrats! You have successfully grafted a Subgraph onto another Subgraph. ## Дополнительные ресурсы From 7c7c8545f965f0d7d6022d25f0126bdfb30092d3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:19:18 -0500 Subject: [PATCH 0617/1789] New translations grafting.mdx (Swedish) --- .../pages/sv/subgraphs/cookbook/grafting.mdx | 50 +++++++++---------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/website/src/pages/sv/subgraphs/cookbook/grafting.mdx b/website/src/pages/sv/subgraphs/cookbook/grafting.mdx index e43fd73014c3..9229085f73df 100644 --- a/website/src/pages/sv/subgraphs/cookbook/grafting.mdx +++ b/website/src/pages/sv/subgraphs/cookbook/grafting.mdx @@ -2,13 +2,13 @@ title: Byt ut ett kontrakt och behåll dess historia med ympning --- -I den här guiden kommer du att lära dig hur du bygger och distribuerar nya subgrafer genom att ympa befintliga subgrafer. +In this guide, you will learn how to build and deploy new Subgraphs by grafting existing Subgraphs. ## Vad är ympning? -Ympning återanvänder data från en befintlig subgraf och börjar indexera den vid ett senare block. Detta är användbart under utveckling för att snabbt komma förbi enkla fel i mappningarna eller för att tillfälligt få en befintlig subgraf att fungera igen efter att den har misslyckats. Det kan också användas när du lägger till en funktion till en subgraf som tar lång tid att indexera från början. +Grafting reuses the data from an existing Subgraph and starts indexing it at a later block. This is useful during development to get past simple errors in the mappings quickly or to temporarily get an existing Subgraph working again after it has failed. Also, it can be used when adding a feature to a Subgraph that takes long to index from scratch. -Den ympade subgrafen kan använda ett GraphQL-schema som inte är identiskt med det i bas subgrafen, utan bara är kompatibelt med det. Det måste vara ett giltigt subgraf schema i sig, men kan avvika från bas undergrafens schema på följande sätt: +The grafted Subgraph can use a GraphQL schema that is not identical to the one of the base Subgraph, but merely compatible with it. It has to be a valid Subgraph schema in its own right, but may deviate from the base Subgraph's schema in the following ways: - Den lägger till eller tar bort entitetstyper - Det tar bort attribut från entitetstyper @@ -22,35 +22,35 @@ För mer information kan du kontrollera: - [Grafting](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) -In this tutorial, we will be covering a basic use case. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing subgraph onto the "base" subgraph that tracks the new contract. +In this tutorial, we will be covering a basic use case. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing Subgraph onto the "base" Subgraph that tracks the new contract. ## Viktig anmärkning om ympning vid uppgradering till nätverket -> **Caution**: It is recommended to not use grafting for subgraphs published to The Graph Network +> **Caution**: It is recommended to not use grafting for Subgraphs published to The Graph Network ### Varför är detta viktigt? -Grafting is a powerful feature that allows you to "graft" one subgraph onto another, effectively transferring historical data from the existing subgraph to a new version. It is not possible to graft a subgraph from The Graph Network back to Subgraph Studio. +Grafting is a powerful feature that allows you to "graft" one Subgraph onto another, effectively transferring historical data from the existing Subgraph to a new version. It is not possible to graft a Subgraph from The Graph Network back to Subgraph Studio. ### Bästa praxis -**Initial Migration**: when you first deploy your subgraph to the decentralized network, do so without grafting. Ensure that the subgraph is stable and functioning as expected. +**Initial Migration**: when you first deploy your Subgraph to the decentralized network, do so without grafting. Ensure that the Subgraph is stable and functioning as expected. -**Subsequent Updates**: once your subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. +**Subsequent Updates**: once your Subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. Genom att följa dessa riktlinjer minimerar du riskerna och säkerställer en smidigare migreringsprocess. ## Bygga en befintlig subgraf -Building subgraphs is an essential part of The Graph, described more in depth [here](/subgraphs/quick-start/). To be able to build and deploy the existing subgraph used in this tutorial, the following repo is provided: +Building Subgraphs is an essential part of The Graph, described more in depth [here](/subgraphs/quick-start/). To be able to build and deploy the existing Subgraph used in this tutorial, the following repo is provided: - [Subgraph example repo](https://github.com/Shiyasmohd/grafting-tutorial) -> Note: The contract used in the subgraph was taken from the following [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). +> Note: The contract used in the Subgraph was taken from the following [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). ## Definition av subgraf manifestet -The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest that you will use: +The Subgraph manifest `subgraph.yaml` identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest that you will use: ```yaml specVersion: 0.0.4 @@ -85,27 +85,27 @@ dataSources: ## Ympnings manifest Definition -Ympning kräver att två nya objekt läggs till i det ursprungliga subgraf manifestet: +Grafting requires adding two new items to the original Subgraph manifest: ```yaml --- features: - grafting # feature name graft: - base: Qm... # subgraph ID of base subgraph + base: Qm... # Subgraph ID of base Subgraph block: 5956000 # block number ``` - `features:` is a list of all used [feature names](/developing/creating-a-subgraph/#experimental-features). -- `graft:` is a map of the `base` subgraph and the block to graft on to. The `block` is the block number to start indexing from. The Graph will copy the data of the base subgraph up to and including the given block and then continue indexing the new subgraph from that block on. +- `graft:` is a map of the `base` Subgraph and the block to graft on to. The `block` is the block number to start indexing from. The Graph will copy the data of the base Subgraph up to and including the given block and then continue indexing the new Subgraph from that block on. -The `base` and `block` values can be found by deploying two subgraphs: one for the base indexing and one with grafting +The `base` and `block` values can be found by deploying two Subgraphs: one for the base indexing and one with grafting ## Distribuera Bas Subgraf -1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-example` -2. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-example` folder from the repo -3. När du är klar kontrollerar du att subgrafen indexerar korrekt. Om du kör följande kommando i The Graph Playground +1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a Subgraph on Sepolia testnet called `graft-example` +2. Follow the directions in the `AUTH & DEPLOY` section on your Subgraph page in the `graft-example` folder from the repo +3. Once finished, verify the Subgraph is indexing properly. If you run the following command in The Graph Playground ```graphql { @@ -138,16 +138,16 @@ Den returnerar ungefär så här: } ``` -När du har verifierat att subgrafen indexerar korrekt kan du snabbt uppdatera subgrafen med ympning. +Once you have verified the Subgraph is indexing properly, you can quickly update the Subgraph with grafting. ## Utplacering av ympning subgraf Transplantatersättningen subgraph.yaml kommer att ha en ny kontraktsadress. Detta kan hända när du uppdaterar din dapp, omdisponerar ett kontrakt, etc. -1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-replacement` -2. Create a new manifest. The `subgraph.yaml` for `graph-replacement` contains a different contract address and new information about how it should graft. These are the `block` of the [last event emitted](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452) you care about by the old contract and the `base` of the old subgraph. The `base` subgraph ID is the `Deployment ID` of your original `graph-example` subgraph. You can find this in Subgraph Studio. -3. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-replacement` folder from the repo -4. När du är klar kontrollerar du att subgrafen indexerar korrekt. Om du kör följande kommando i The Graph Playground +1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a Subgraph on Sepolia testnet called `graft-replacement` +2. Create a new manifest. The `subgraph.yaml` for `graph-replacement` contains a different contract address and new information about how it should graft. These are the `block` of the [last event emitted](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452) you care about by the old contract and the `base` of the old Subgraph. The `base` Subgraph ID is the `Deployment ID` of your original `graph-example` Subgraph. You can find this in Subgraph Studio. +3. Follow the directions in the `AUTH & DEPLOY` section on your Subgraph page in the `graft-replacement` folder from the repo +4. Once finished, verify the Subgraph is indexing properly. If you run the following command in The Graph Playground ```graphql { @@ -185,9 +185,9 @@ Det bör returnera följande: } ``` -You can see that the `graft-replacement` subgraph is indexing from older `graph-example` data and newer data from the new contract address. The original contract emitted two `Withdrawal` events, [Event 1](https://sepolia.etherscan.io/tx/0xe8323d21c4f104607b10b0fff9fc24b9612b9488795dea8196b2d5f980d3dc1d) and [Event 2](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452). The new contract emitted one `Withdrawal` after, [Event 3](https://sepolia.etherscan.io/tx/0x2410475f76a44754bae66d293d14eac34f98ec03a3689cbbb56a716d20b209af). The two previously indexed transactions (Event 1 and 2) and the new transaction (Event 3) were combined together in the `graft-replacement` subgraph. +You can see that the `graft-replacement` Subgraph is indexing from older `graph-example` data and newer data from the new contract address. The original contract emitted two `Withdrawal` events, [Event 1](https://sepolia.etherscan.io/tx/0xe8323d21c4f104607b10b0fff9fc24b9612b9488795dea8196b2d5f980d3dc1d) and [Event 2](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452). The new contract emitted one `Withdrawal` after, [Event 3](https://sepolia.etherscan.io/tx/0x2410475f76a44754bae66d293d14eac34f98ec03a3689cbbb56a716d20b209af). The two previously indexed transactions (Event 1 and 2) and the new transaction (Event 3) were combined together in the `graft-replacement` Subgraph. -Congrats! You have successfully grafted a subgraph onto another subgraph. +Congrats! You have successfully grafted a Subgraph onto another Subgraph. ## Ytterligare resurser From 0f94f1f25eb3742c7887cf3fa88e63da4112c52d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:19:19 -0500 Subject: [PATCH 0618/1789] New translations grafting.mdx (Turkish) --- .../pages/tr/subgraphs/cookbook/grafting.mdx | 54 +++++++++---------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/website/src/pages/tr/subgraphs/cookbook/grafting.mdx b/website/src/pages/tr/subgraphs/cookbook/grafting.mdx index 60855aa97729..2ca402ebbadc 100644 --- a/website/src/pages/tr/subgraphs/cookbook/grafting.mdx +++ b/website/src/pages/tr/subgraphs/cookbook/grafting.mdx @@ -2,13 +2,13 @@ title: Bir Sözleşmeyi Değiştirin ve Graftlama ile Geçmişini Koruyun --- -Bu rehberde, mevcut subgraphları graftlayarak yeni subgraphları nasıl oluşturacağınızı ve dağıtacağınızı öğreneceksiniz. +In this guide, you will learn how to build and deploy new Subgraphs by grafting existing Subgraphs. ## Graftlama Nedir? -Graftlama, mevcut bir subgraph'daki verileri yeniden kullanır ve daha sonraki bir blokta indekslemeye başlar. Bu, geliştirme sırasında eşleştirmelerdeki basit hataları hızlı bir şekilde geçmek veya mevcut bir subgraph'ın başarısız olduktan sonra geçici olarak tekrar çalışmasını sağlamak için kullanışlıdır. Ayrıca, sıfırdan indekslenmesi uzun süren bir subgraph'a bir özellik eklerken de kullanılabilir. +Grafting reuses the data from an existing Subgraph and starts indexing it at a later block. This is useful during development to get past simple errors in the mappings quickly or to temporarily get an existing Subgraph working again after it has failed. Also, it can be used when adding a feature to a Subgraph that takes long to index from scratch. -Graftlanan subgraph, temel subgraphla tamamen aynı olmayan, ancak onunla uyumlu olan bir GraphQL şeması kullanabilir. Kendi başına geçerli bir subgraph şeması olmalıdır, ancak şu şekillerde temel subgraph şemasından sapabilir: +The grafted Subgraph can use a GraphQL schema that is not identical to the one of the base Subgraph, but merely compatible with it. It has to be a valid Subgraph schema in its own right, but may deviate from the base Subgraph's schema in the following ways: - Varlık türlerini ekler veya kaldırır - Varlık türlerinden öznitelikleri kaldırır @@ -22,35 +22,35 @@ Daha fazla bilgi için kontrol edebilirsiniz: - [Aşılama](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) -Bu eğitimde, temel bir kullanım senaryosunu ele alacağız. Mevcut bir sözleşmeyi özdeş bir sözleşme (aynı koda sahip ancak adresi farklı bir sözleşme) ile değiştireceğiz. Ardından, mevcut subgraph'i yeni sözleşmeyi izleyen "temel" subgraph'e aşılayacağız. +In this tutorial, we will be covering a basic use case. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing Subgraph onto the "base" Subgraph that tracks the new contract. ## Ağa Yükseltme Durumunda Graftlamaya İlişkin Önemli Not -> **Dikkat**: Aşılamanın The Graph Ağına yayımlanan subgraph'ler için kullanılmaması önerilir +> **Caution**: It is recommended to not use grafting for Subgraphs published to The Graph Network ### Bu Neden Önemli? -Aşılama, bir subgraph'i diğerine "aşılayarak" mevcut subgraph'ten yeni bir versiyona tarihi verileri etkin bir şekilde aktarmanıza olanak tanıyan güçlü bir özelliktir. Bir subgraph'i The Graph Ağından Subgraph Studio'ya geri aşılamak mümkün değildir. +Grafting is a powerful feature that allows you to "graft" one Subgraph onto another, effectively transferring historical data from the existing Subgraph to a new version. It is not possible to graft a Subgraph from The Graph Network back to Subgraph Studio. ### En İyi Uygulamalar -**İlk Geçiş**: Subgraph'inizi ilk kez merkeziyetsiz ağa dağıttığınızda aşılama yapmayın. Subgraph'in stabil ve beklendiği gibi çalıştığından emin olun. +**Initial Migration**: when you first deploy your Subgraph to the decentralized network, do so without grafting. Ensure that the Subgraph is stable and functioning as expected. -**Sonraki Güncellemeler**: Subgraph'iniz merkeziyetsiz ağda canlı ve stabil olduğunda, gelecekteki versiyonlar için aşılama kullanarak geçişi daha sorunsuz hale getirebilir ve tarihi verileri koruyabilirsiniz. +**Subsequent Updates**: once your Subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. Bu yönergelere uyarak riskleri en aza indirebilir ve daha sorunsuz bir taşıma süreci geçirebilirsiniz. ## Mevcut Bir Subgraph'ı Oluşturma -Subgraph oluşturmak, The Graph'in önemli bir parçasıdır. Bu konu daha detaylı olarak [burada](/subgraphs/quick-start/) açıklanmıştır. Bu eğitimde kullanılan mevcut subgraph'i inşa etmek ve dağıtmak için aşağıdaki depo sağlanmıştır: +Building Subgraphs is an essential part of The Graph, described more in depth [here](/subgraphs/quick-start/). To be able to build and deploy the existing Subgraph used in this tutorial, the following repo is provided: - [Subgraph örnek deposu](https://github.com/Shiyasmohd/grafting-tutorial) -> Not: Subgraph'te kullanılan sözleşme, [Hackathon Başlangıç Kiti](https://github.com/schmidsi/hackathon-starterkit)'nden alınmıştır. +> Note: The contract used in the Subgraph was taken from the following [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). ## Subgraph Manifest Tanımı -Subgraph manifestosu `subgraph.yaml`, subgraph için veri kaynaklarını, ilgili tetikleyicileri ve bu tetikleyicilere yanıt olarak çalıştırılması gereken fonksiyonları tanımlar. Kullanacağınız bir subgraph manifestosu örneği aşağıda verilmiştir: +The Subgraph manifest `subgraph.yaml` identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest that you will use: ```yaml specVersion: 0.0.4 @@ -85,27 +85,27 @@ dataSources: ## Graftlama Manifest Tanımı -Graftlama, orijinal subgraph bildirimine iki yeni öğe eklemeyi gerektirir: +Grafting requires adding two new items to the original Subgraph manifest: ```yaml --- features: - - grafting # özellik adı + - grafting # feature name graft: - base: Qm... # Asıl subgraph'in kimlik numarası - block: 5956000 # blok numarası + base: Qm... # Subgraph ID of base Subgraph + block: 5956000 # block number ``` - `features:` tüm kullanılan [özellik adlarının](/developing/creating-a-subgraph/#experimental-features) bir listesidir. -- `graft:` `base` subgraph ve üzerine bağlanılacak bloktan oluşan bir eşlemedir. `block`, endekslemenin başlanacağı blok numarasıdır. The Graph, belirtilen bloka kadar olan temel subgraph'in verisini kopyalayıp bu bloka kadar olan kısmı dahil edecek ve ardından yeni subgraph'i bu bloktan itibaren endekslemeye devam edecek. +- `graft:` is a map of the `base` Subgraph and the block to graft on to. The `block` is the block number to start indexing from. The Graph will copy the data of the base Subgraph up to and including the given block and then continue indexing the new Subgraph from that block on. -`base` ve `block` değerleri, iki subgraph dağıtılarak bulunabilir: Biri temel endeksleme için, diğeri ise aşılama için olan subgraph +The `base` and `block` values can be found by deploying two Subgraphs: one for the base indexing and one with grafting ## Temel Subgraph'ı Dağıtma -1. [Subgraph Studio](https://thegraph.com/studio/) adresine gidip Sepolia testnet üzerinde `graft-example` adlı bir subgraph oluşturun -2. Depodan `graft-example` klasöründeki `AUTH & DEPLOY` bölümündeki yönergeleri izleyin -3. Tamamlandığında, subgraph'ın doğru bir şekilde indekslendiğinden emin olun. Eğer aşağıdaki komutu Graph Test Alanında(Playground) çalıştırırsanız +1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a Subgraph on Sepolia testnet called `graft-example` +2. Follow the directions in the `AUTH & DEPLOY` section on your Subgraph page in the `graft-example` folder from the repo +3. Once finished, verify the Subgraph is indexing properly. If you run the following command in The Graph Playground ```graphql { @@ -138,16 +138,16 @@ graft: } ``` -Subgraph'ın düzgün bir şekilde indekslendiğini doğruladıktan sonra, subgraph'ı graftlama ile hızlı bir şekilde güncelleyebilirsiniz. +Once you have verified the Subgraph is indexing properly, you can quickly update the Subgraph with grafting. ## Graftlama Subgraph'ını Dağıtma Graft yerine geçen subgraph.yaml yeni bir sözleşme adresine sahip olacaktır. Bu, merkeziyetsiz uygulamanızı güncellediğinizde, bir sözleşmeyi yeniden dağıttığınızda vb. gerçekleşebilir. -1. [Subgraph Studio](https://thegraph.com/studio/) adresine gidin ve Sepolia testnet üzerinde `graft-replacement` adlı bir subgraph oluşturun -2. Yeni bir manifesto dosyası oluşturun. `graph-replacement` subgraph'ine ait `subgraph.yaml` dosyası, farklı bir sözleşme adresi ve nasıl aşılanması gerektiğiyle ilgili yeni bilgiler içermektedir. Bunlar, eski sözleşme tarafından ilgilendiğiniz [son yayılan olayın](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452) `blok`u ve eski subgraph'in `base`'idir (temelidir). `base` subgraph kimliği, orijinal `graph-example` subgraph'inizin `Deployment ID`'sidir (dağıtım kimliğidir). Bunu Subgraph Studio'da bulabilirsiniz. -3. `graft-replacement` klasöründeki subgraph sayfanızda, `AUTH & DEPLOY` bölümündeki talimatları izleyin -4. Tamamlandığında, subgraph'ın doğru bir şekilde indekslendiğinden emin olun. Eğer aşağıdaki komutu Graph Test Alanında(Playground) çalıştırırsanız +1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a Subgraph on Sepolia testnet called `graft-replacement` +2. Create a new manifest. The `subgraph.yaml` for `graph-replacement` contains a different contract address and new information about how it should graft. These are the `block` of the [last event emitted](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452) you care about by the old contract and the `base` of the old Subgraph. The `base` Subgraph ID is the `Deployment ID` of your original `graph-example` Subgraph. You can find this in Subgraph Studio. +3. Follow the directions in the `AUTH & DEPLOY` section on your Subgraph page in the `graft-replacement` folder from the repo +4. Once finished, verify the Subgraph is indexing properly. If you run the following command in The Graph Playground ```graphql { @@ -185,9 +185,9 @@ Aşağıdakileri döndürmelidir: } ``` -`graft-replacement` subgraph'inin eski `graph-example` verilerini ve yeni sözleşme adresinden gelen yeni verileri endekslediğini görebilirsiniz. Orijinal sözleşme, iki `Withdrawal` olayı yaydı: [Olay 1](https://sepolia.etherscan.io/tx/0xe8323d21c4f104607b10b0fff9fc24b9612b9488795dea8196b2d5f980d3dc1d) ve [Olay 2](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452). Yeni sözleşme, sonrasında bir `Withdrawal` olayı yaydı, [Olay 3](https://sepolia.etherscan.io/tx/0x2410475f76a44754bae66d293d14eac34f98ec03a3689cbbb56a716d20b209af). Önceden endekslenmiş iki işlem (Olay 1 ve 2) ve yeni işlem (Olay 3), `graft-replacement` subgraph'inde birleştirildi. +You can see that the `graft-replacement` Subgraph is indexing from older `graph-example` data and newer data from the new contract address. The original contract emitted two `Withdrawal` events, [Event 1](https://sepolia.etherscan.io/tx/0xe8323d21c4f104607b10b0fff9fc24b9612b9488795dea8196b2d5f980d3dc1d) and [Event 2](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452). The new contract emitted one `Withdrawal` after, [Event 3](https://sepolia.etherscan.io/tx/0x2410475f76a44754bae66d293d14eac34f98ec03a3689cbbb56a716d20b209af). The two previously indexed transactions (Event 1 and 2) and the new transaction (Event 3) were combined together in the `graft-replacement` Subgraph. -Tebrikler! Bir subgraph'i başka bir subgraph'e başarıyla aşıladınız. +Congrats! You have successfully grafted a Subgraph onto another Subgraph. ## Ek Kaynaklar From df88ecb7aa9bee3c37132de1d6594aa0e9664f70 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:19:20 -0500 Subject: [PATCH 0619/1789] New translations grafting.mdx (Ukrainian) --- .../pages/uk/subgraphs/cookbook/grafting.mdx | 50 +++++++++---------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/website/src/pages/uk/subgraphs/cookbook/grafting.mdx b/website/src/pages/uk/subgraphs/cookbook/grafting.mdx index 5455042183df..1703beba90ac 100644 --- a/website/src/pages/uk/subgraphs/cookbook/grafting.mdx +++ b/website/src/pages/uk/subgraphs/cookbook/grafting.mdx @@ -2,13 +2,13 @@ title: Замініть контракт та збережіть його історію за допомогою графтингу --- -У цьому гайді ви дізнаєтеся, як створювати та розгортати нові підграфи шляхом поєднання наявних підграфів. +In this guide, you will learn how to build and deploy new Subgraphs by grafting existing Subgraphs. ## Що таке Grafting? -При цьому процесі повторно використовуються дані з наявного підграфа і починається їх індексування з наступного блоку. Це корисно під час розробки для швидкого усунення простих помилок у схемах або для тимчасового відновлення працездатності наявного підграфа після його збою. Також його можна використовувати при додаванні об'єкта до підграфа, індексація якого з нуля займає багато часу. +Grafting reuses the data from an existing Subgraph and starts indexing it at a later block. This is useful during development to get past simple errors in the mappings quickly or to temporarily get an existing Subgraph working again after it has failed. Also, it can be used when adding a feature to a Subgraph that takes long to index from scratch. -Підграф, утворений в результаті може використовувати схему GraphQL, яка не є ідентичною схемі базового підграфа, а лише сумісною з нею. Вона повинна бути валідною схемою підграфа сама по собі, але може відхилятися від схеми базового підграфа у такому випадку: +The grafted Subgraph can use a GraphQL schema that is not identical to the one of the base Subgraph, but merely compatible with it. It has to be a valid Subgraph schema in its own right, but may deviate from the base Subgraph's schema in the following ways: - Додає або видаляє типи елементів - Видаляє атрибути з типів елементів @@ -22,35 +22,35 @@ title: Замініть контракт та збережіть його іст - [Grafting](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) -In this tutorial, we will be covering a basic use case. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing subgraph onto the "base" subgraph that tracks the new contract. +In this tutorial, we will be covering a basic use case. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing Subgraph onto the "base" Subgraph that tracks the new contract. ## Важливе зауваження щодо графтингу при оновленні в мережі -> **Caution**: It is recommended to not use grafting for subgraphs published to The Graph Network +> **Caution**: It is recommended to not use grafting for Subgraphs published to The Graph Network ### Чому це так важливо? -Grafting is a powerful feature that allows you to "graft" one subgraph onto another, effectively transferring historical data from the existing subgraph to a new version. It is not possible to graft a subgraph from The Graph Network back to Subgraph Studio. +Grafting is a powerful feature that allows you to "graft" one Subgraph onto another, effectively transferring historical data from the existing Subgraph to a new version. It is not possible to graft a Subgraph from The Graph Network back to Subgraph Studio. ### Найкращі практики -**Initial Migration**: when you first deploy your subgraph to the decentralized network, do so without grafting. Ensure that the subgraph is stable and functioning as expected. +**Initial Migration**: when you first deploy your Subgraph to the decentralized network, do so without grafting. Ensure that the Subgraph is stable and functioning as expected. -**Subsequent Updates**: once your subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. +**Subsequent Updates**: once your Subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. Дотримуючись цих рекомендацій, ви мінімізуєте ризики та забезпечите безперешкодний процес міграції. ## Побудова наявного підграфа -Building subgraphs is an essential part of The Graph, described more in depth [here](/subgraphs/quick-start/). To be able to build and deploy the existing subgraph used in this tutorial, the following repo is provided: +Building Subgraphs is an essential part of The Graph, described more in depth [here](/subgraphs/quick-start/). To be able to build and deploy the existing Subgraph used in this tutorial, the following repo is provided: - [Subgraph example repo](https://github.com/Shiyasmohd/grafting-tutorial) -> Note: The contract used in the subgraph was taken from the following [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). +> Note: The contract used in the Subgraph was taken from the following [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). ## Визначення маніфесту підграфів -The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest that you will use: +The Subgraph manifest `subgraph.yaml` identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest that you will use: ```yaml specVersion: 0.0.4 @@ -85,27 +85,27 @@ dataSources: ## Визначення Grafting Manifest -Графтинг вимагає додавання двох нових елементів до оригінального маніфесту підграфів: +Grafting requires adding two new items to the original Subgraph manifest: ```yaml --- features: - grafting # feature name graft: - base: Qm... # subgraph ID of base subgraph + base: Qm... # Subgraph ID of base Subgraph block: 5956000 # block number ``` - `features:` is a list of all used [feature names](/developing/creating-a-subgraph/#experimental-features). -- `graft:` is a map of the `base` subgraph and the block to graft on to. The `block` is the block number to start indexing from. The Graph will copy the data of the base subgraph up to and including the given block and then continue indexing the new subgraph from that block on. +- `graft:` is a map of the `base` Subgraph and the block to graft on to. The `block` is the block number to start indexing from. The Graph will copy the data of the base Subgraph up to and including the given block and then continue indexing the new Subgraph from that block on. -The `base` and `block` values can be found by deploying two subgraphs: one for the base indexing and one with grafting +The `base` and `block` values can be found by deploying two Subgraphs: one for the base indexing and one with grafting ## Розгортання базового підграфа -1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-example` -2. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-example` folder from the repo -3. Закінчивши, перевірте, чи правильно індексується підграф. Ви можете зробити це запустивши наступну команду у вікні The Graph Playground +1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a Subgraph on Sepolia testnet called `graft-example` +2. Follow the directions in the `AUTH & DEPLOY` section on your Subgraph page in the `graft-example` folder from the repo +3. Once finished, verify the Subgraph is indexing properly. If you run the following command in The Graph Playground ```graphql { @@ -138,16 +138,16 @@ The `base` and `block` values can be found by deploying two subgraphs: one for t } ``` -Після того, як ви переконалися, що підграф індексується належним чином, ви можете швидко оновити його за допомогою графтингу. +Once you have verified the Subgraph is indexing properly, you can quickly update the Subgraph with grafting. ## Розгортання підграфів для графтингу При цьому процесі підрозділ subgraph.yaml матиме нову адресу контракту. Це може статися, коли ви оновлюєте децентралізований додаток, перерозподіляєте контракт тощо. -1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-replacement` -2. Create a new manifest. The `subgraph.yaml` for `graph-replacement` contains a different contract address and new information about how it should graft. These are the `block` of the [last event emitted](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452) you care about by the old contract and the `base` of the old subgraph. The `base` subgraph ID is the `Deployment ID` of your original `graph-example` subgraph. You can find this in Subgraph Studio. -3. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-replacement` folder from the repo -4. Закінчивши, перевірте, чи правильно індексується підграф. Ви можете зробити це запустивши наступну команду у вікні The Graph Playground +1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a Subgraph on Sepolia testnet called `graft-replacement` +2. Create a new manifest. The `subgraph.yaml` for `graph-replacement` contains a different contract address and new information about how it should graft. These are the `block` of the [last event emitted](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452) you care about by the old contract and the `base` of the old Subgraph. The `base` Subgraph ID is the `Deployment ID` of your original `graph-example` Subgraph. You can find this in Subgraph Studio. +3. Follow the directions in the `AUTH & DEPLOY` section on your Subgraph page in the `graft-replacement` folder from the repo +4. Once finished, verify the Subgraph is indexing properly. If you run the following command in The Graph Playground ```graphql { @@ -185,9 +185,9 @@ The `base` and `block` values can be found by deploying two subgraphs: one for t } ``` -You can see that the `graft-replacement` subgraph is indexing from older `graph-example` data and newer data from the new contract address. The original contract emitted two `Withdrawal` events, [Event 1](https://sepolia.etherscan.io/tx/0xe8323d21c4f104607b10b0fff9fc24b9612b9488795dea8196b2d5f980d3dc1d) and [Event 2](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452). The new contract emitted one `Withdrawal` after, [Event 3](https://sepolia.etherscan.io/tx/0x2410475f76a44754bae66d293d14eac34f98ec03a3689cbbb56a716d20b209af). The two previously indexed transactions (Event 1 and 2) and the new transaction (Event 3) were combined together in the `graft-replacement` subgraph. +You can see that the `graft-replacement` Subgraph is indexing from older `graph-example` data and newer data from the new contract address. The original contract emitted two `Withdrawal` events, [Event 1](https://sepolia.etherscan.io/tx/0xe8323d21c4f104607b10b0fff9fc24b9612b9488795dea8196b2d5f980d3dc1d) and [Event 2](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452). The new contract emitted one `Withdrawal` after, [Event 3](https://sepolia.etherscan.io/tx/0x2410475f76a44754bae66d293d14eac34f98ec03a3689cbbb56a716d20b209af). The two previously indexed transactions (Event 1 and 2) and the new transaction (Event 3) were combined together in the `graft-replacement` Subgraph. -Congrats! You have successfully grafted a subgraph onto another subgraph. +Congrats! You have successfully grafted a Subgraph onto another Subgraph. ## Додаткові матеріали From 70bd83619720038ad9038b8115c0cd4d64c7967e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:19:21 -0500 Subject: [PATCH 0620/1789] New translations grafting.mdx (Chinese Simplified) --- .../pages/zh/subgraphs/cookbook/grafting.mdx | 64 +++++++++---------- 1 file changed, 32 insertions(+), 32 deletions(-) diff --git a/website/src/pages/zh/subgraphs/cookbook/grafting.mdx b/website/src/pages/zh/subgraphs/cookbook/grafting.mdx index 321b5b115bec..efc6a118918c 100644 --- a/website/src/pages/zh/subgraphs/cookbook/grafting.mdx +++ b/website/src/pages/zh/subgraphs/cookbook/grafting.mdx @@ -2,55 +2,55 @@ title: 用嫁接替换合约并保持合约的历史 --- -在本指南中,您将学习如何通过嫁接现有的子图来构建和部署新的子图。 +In this guide, you will learn how to build and deploy new Subgraphs by grafting existing Subgraphs. ## 什么是嫁接? -嫁接重用现有子图中的数据,并开始在稍后的区块中对其进行索引。这在开发过程中非常有用,可以快速克服映射中的简单错误,或者在现有子图失败后暂时使其重新工作。此外,当向子图添加特性时可以使用它,因为从头开始索引需要很长时间。 +Grafting reuses the data from an existing Subgraph and starts indexing it at a later block. This is useful during development to get past simple errors in the mappings quickly or to temporarily get an existing Subgraph working again after it has failed. Also, it can be used when adding a feature to a Subgraph that takes long to index from scratch. -嫁接子图可以使用一个 GraphQL 模式 schema,该模式与基子图之一不同,但仅与基子图兼容。它本身必须是一个有效的子图模式,但是可以通过以下方式偏离基子图的模式: +The grafted Subgraph can use a GraphQL schema that is not identical to the one of the base Subgraph, but merely compatible with it. It has to be a valid Subgraph schema in its own right, but may deviate from the base Subgraph's schema in the following ways: - 它添加或删除实体类型 -- 它从实体类型中删除属性 -- 它将可为空的属性添加到实体类型 -- 它将不可为空的属性转换为可空的属性 -- 它将值添加到枚举类型中 -- 它添加或删除接口 -- 它改变了实现接口的实体类型 +- 从实体类型中删除属性 +- 将可为空的属性添加到实体类型 +- 将不可为空的属性转换为可空的属性 +- 将值添加到枚举类型中 +- 添加或删除接口 +- 改变了实现接口的实体类型 有关详情,请参阅: - [Grafting](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) -In this tutorial, we will be covering a basic use case. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing subgraph onto the "base" subgraph that tracks the new contract. +In this tutorial, we will be covering a basic use case. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing Subgraph onto the "base" Subgraph that tracks the new contract. ## Important Note on Grafting When Upgrading to the Network -> **Caution**: It is recommended to not use grafting for subgraphs published to The Graph Network +> **Caution**: It is recommended to not use grafting for Subgraphs published to The Graph Network ### Why Is This Important? -Grafting is a powerful feature that allows you to "graft" one subgraph onto another, effectively transferring historical data from the existing subgraph to a new version. It is not possible to graft a subgraph from The Graph Network back to Subgraph Studio. +Grafting is a powerful feature that allows you to "graft" one Subgraph onto another, effectively transferring historical data from the existing Subgraph to a new version. It is not possible to graft a Subgraph from The Graph Network back to Subgraph Studio. ### Best Practices -**Initial Migration**: when you first deploy your subgraph to the decentralized network, do so without grafting. Ensure that the subgraph is stable and functioning as expected. +**Initial Migration**: when you first deploy your Subgraph to the decentralized network, do so without grafting. Ensure that the Subgraph is stable and functioning as expected. -**Subsequent Updates**: once your subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. +**Subsequent Updates**: once your Subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. By adhering to these guidelines, you minimize risks and ensure a smoother migration process. ## Building an Existing Subgraph -Building subgraphs is an essential part of The Graph, described more in depth [here](/subgraphs/quick-start/). To be able to build and deploy the existing subgraph used in this tutorial, the following repo is provided: +Building Subgraphs is an essential part of The Graph, described more in depth [here](/subgraphs/quick-start/). To be able to build and deploy the existing Subgraph used in this tutorial, the following repo is provided: - [Subgraph example repo](https://github.com/Shiyasmohd/grafting-tutorial) -> Note: The contract used in the subgraph was taken from the following [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). +> Note: The contract used in the Subgraph was taken from the following [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). ## 子图清单定义 -The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest that you will use: +The Subgraph manifest `subgraph.yaml` identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest that you will use: ```yaml specVersion: 0.0.4 @@ -85,27 +85,27 @@ dataSources: ## 嫁接清单定义 -嫁接需要在原始子图清单中添加两个新项: +Grafting requires adding two new items to the original Subgraph manifest: ```yaml --- features: - grafting # feature name graft: - base: Qm... # subgraph ID of base subgraph + base: Qm... # Subgraph ID of base Subgraph block: 5956000 # block number ``` - `features:` is a list of all used [feature names](/developing/creating-a-subgraph/#experimental-features). -- `graft:` is a map of the `base` subgraph and the block to graft on to. The `block` is the block number to start indexing from. The Graph will copy the data of the base subgraph up to and including the given block and then continue indexing the new subgraph from that block on. +- `graft:` is a map of the `base` Subgraph and the block to graft on to. The `block` is the block number to start indexing from. The Graph will copy the data of the base Subgraph up to and including the given block and then continue indexing the new Subgraph from that block on. -The `base` and `block` values can be found by deploying two subgraphs: one for the base indexing and one with grafting +The `base` and `block` values can be found by deploying two Subgraphs: one for the base indexing and one with grafting ## 部署基子图 -1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-example` -2. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-example` folder from the repo -3. 完成后,验证子图是否正确索引。如果在Graph Playground中运行下列指令。 +1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a Subgraph on Sepolia testnet called `graft-example` +2. Follow the directions in the `AUTH & DEPLOY` section on your Subgraph page in the `graft-example` folder from the repo +3. Once finished, verify the Subgraph is indexing properly. If you run the following command in The Graph Playground ```graphql { @@ -138,16 +138,16 @@ It returns something like this: } ``` -一旦您验证了子图是正确的索引,您可以快速更新子图与嫁接。 +Once you have verified the Subgraph is indexing properly, you can quickly update the Subgraph with grafting. ## 部署嫁接子图 嫁接替代Subgraph.yaml将获得一个新的合约地址。这可能发生在更新dapp、重新部署合约等时。 -1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-replacement` -2. Create a new manifest. The `subgraph.yaml` for `graph-replacement` contains a different contract address and new information about how it should graft. These are the `block` of the [last event emitted](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452) you care about by the old contract and the `base` of the old subgraph. The `base` subgraph ID is the `Deployment ID` of your original `graph-example` subgraph. You can find this in Subgraph Studio. -3. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-replacement` folder from the repo -4. 完成后,验证子图是否正确索引。如果在Graph Playground中运行下列指令。 +1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a Subgraph on Sepolia testnet called `graft-replacement` +2. Create a new manifest. The `subgraph.yaml` for `graph-replacement` contains a different contract address and new information about how it should graft. These are the `block` of the [last event emitted](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452) you care about by the old contract and the `base` of the old Subgraph. The `base` Subgraph ID is the `Deployment ID` of your original `graph-example` Subgraph. You can find this in Subgraph Studio. +3. Follow the directions in the `AUTH & DEPLOY` section on your Subgraph page in the `graft-replacement` folder from the repo +4. Once finished, verify the Subgraph is indexing properly. If you run the following command in The Graph Playground ```graphql { @@ -185,11 +185,11 @@ It returns something like this: } ``` -You can see that the `graft-replacement` subgraph is indexing from older `graph-example` data and newer data from the new contract address. The original contract emitted two `Withdrawal` events, [Event 1](https://sepolia.etherscan.io/tx/0xe8323d21c4f104607b10b0fff9fc24b9612b9488795dea8196b2d5f980d3dc1d) and [Event 2](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452). The new contract emitted one `Withdrawal` after, [Event 3](https://sepolia.etherscan.io/tx/0x2410475f76a44754bae66d293d14eac34f98ec03a3689cbbb56a716d20b209af). The two previously indexed transactions (Event 1 and 2) and the new transaction (Event 3) were combined together in the `graft-replacement` subgraph. +You can see that the `graft-replacement` Subgraph is indexing from older `graph-example` data and newer data from the new contract address. The original contract emitted two `Withdrawal` events, [Event 1](https://sepolia.etherscan.io/tx/0xe8323d21c4f104607b10b0fff9fc24b9612b9488795dea8196b2d5f980d3dc1d) and [Event 2](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452). The new contract emitted one `Withdrawal` after, [Event 3](https://sepolia.etherscan.io/tx/0x2410475f76a44754bae66d293d14eac34f98ec03a3689cbbb56a716d20b209af). The two previously indexed transactions (Event 1 and 2) and the new transaction (Event 3) were combined together in the `graft-replacement` Subgraph. -Congrats! You have successfully grafted a subgraph onto another subgraph. +Congrats! You have successfully grafted a Subgraph onto another Subgraph. -## 其他资源 +## Additional Resources If you want more experience with grafting, here are a few examples for popular contracts: From 79032a067f265c711a8f2a22e26e497fb59f31f3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:19:22 -0500 Subject: [PATCH 0621/1789] New translations grafting.mdx (Urdu (Pakistan)) --- .../pages/ur/subgraphs/cookbook/grafting.mdx | 50 +++++++++---------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/website/src/pages/ur/subgraphs/cookbook/grafting.mdx b/website/src/pages/ur/subgraphs/cookbook/grafting.mdx index 6cda9bfe1f6e..cfd5486cbaab 100644 --- a/website/src/pages/ur/subgraphs/cookbook/grafting.mdx +++ b/website/src/pages/ur/subgraphs/cookbook/grafting.mdx @@ -2,13 +2,13 @@ title: ایک کنٹریکٹ کو تبدیل کریں اور اس کی تاریخ کو گرافٹنگ کے ساتھ رکھیں --- -اس گائیڈ میں، آپ سیکھیں گے کہ موجودہ سب گراف کو گرافٹنگ کرکے نئے سب گراف کیسے بنائے اور ان کو تعینات کریں. +In this guide, you will learn how to build and deploy new Subgraphs by grafting existing Subgraphs. ## گرافٹنگ کیا ہے؟ -گرافٹنگ موجودہ سب گراف سے ڈیٹا کو دوبارہ استعمال کرتا ہے اور اسے بعد کے بلاک میں انڈیکس کرنا شروع کرتا ہے۔ میپنگ میں ماضی کی سادہ غلطیوں کو تیزی سے حاصل کرنے یا کسی موجودہ سب گراف کے ناکام ہونے کے بعد اسے عارضی طور پر دوبارہ کام کرنے کے لیے یہ ترقی کے دوران مفید ہے۔ نیز، اس کا استعمال کسی سب گراف میں فیچر شامل کرتے وقت کیا جا سکتا ہے جو شروع سے انڈیکس میں زیادہ وقت لیتا ہے. +Grafting reuses the data from an existing Subgraph and starts indexing it at a later block. This is useful during development to get past simple errors in the mappings quickly or to temporarily get an existing Subgraph working again after it has failed. Also, it can be used when adding a feature to a Subgraph that takes long to index from scratch. -گرافٹڈ سب گراف ایک گراف کیو ایل اسکیما استعمال کرسکتا ہے جو بیس سب گراف میں سے ایک سے مماثل نہیں ہے، لیکن اس کے ساتھ محض مطابقت رکھتا ہے۔ اسے اپنے طور پر ایک درست سب گراف سکیما ہونا چاہیے، لیکن درج ذیل طریقوں سے بنیادی سب گراف کے سکیما سے انحراف ہو سکتا ہے: +The grafted Subgraph can use a GraphQL schema that is not identical to the one of the base Subgraph, but merely compatible with it. It has to be a valid Subgraph schema in its own right, but may deviate from the base Subgraph's schema in the following ways: - یہ ہستی کی اقسام کو جوڑتا یا ہٹاتا ہے - یہ ہستی کی اقسام سے صفات کو ہٹاتا ہے @@ -22,35 +22,35 @@ title: ایک کنٹریکٹ کو تبدیل کریں اور اس کی تاری - [Grafting](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) -In this tutorial, we will be covering a basic use case. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing subgraph onto the "base" subgraph that tracks the new contract. +In this tutorial, we will be covering a basic use case. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing Subgraph onto the "base" Subgraph that tracks the new contract. ## نیٹ ورک میں اپ گریڈ کرتے وقت گرافٹنگ پر اہم نوٹ -> **Caution**: It is recommended to not use grafting for subgraphs published to The Graph Network +> **Caution**: It is recommended to not use grafting for Subgraphs published to The Graph Network ### یہ کیوں اہم ہے؟ -Grafting is a powerful feature that allows you to "graft" one subgraph onto another, effectively transferring historical data from the existing subgraph to a new version. It is not possible to graft a subgraph from The Graph Network back to Subgraph Studio. +Grafting is a powerful feature that allows you to "graft" one Subgraph onto another, effectively transferring historical data from the existing Subgraph to a new version. It is not possible to graft a Subgraph from The Graph Network back to Subgraph Studio. ### بہترین طریقے -**Initial Migration**: when you first deploy your subgraph to the decentralized network, do so without grafting. Ensure that the subgraph is stable and functioning as expected. +**Initial Migration**: when you first deploy your Subgraph to the decentralized network, do so without grafting. Ensure that the Subgraph is stable and functioning as expected. -**Subsequent Updates**: once your subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. +**Subsequent Updates**: once your Subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. ان رہنما خطوط پر عمل پیرا ہو کر، آپ خطرات کو کم کرتے ہیں اور منتقلی کے ایک ہموار عمل کو یقینی بناتے ہیں. ## ایک موجودہ سب گراف بنانا -Building subgraphs is an essential part of The Graph, described more in depth [here](/subgraphs/quick-start/). To be able to build and deploy the existing subgraph used in this tutorial, the following repo is provided: +Building Subgraphs is an essential part of The Graph, described more in depth [here](/subgraphs/quick-start/). To be able to build and deploy the existing Subgraph used in this tutorial, the following repo is provided: - [Subgraph example repo](https://github.com/Shiyasmohd/grafting-tutorial) -> Note: The contract used in the subgraph was taken from the following [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). +> Note: The contract used in the Subgraph was taken from the following [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). ## سب گراف مینی فیسٹ کی تعریف -The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest that you will use: +The Subgraph manifest `subgraph.yaml` identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest that you will use: ```yaml specVersion: 0.0.4 @@ -85,27 +85,27 @@ dataSources: ## گرافٹنگ مینی فیسٹ کی تعریف -گرافٹنگ کے لیے اصل سب گراف مینی فیسٹ میں دو نئے آئٹمز شامل کرنے کی ضرورت ہے: +Grafting requires adding two new items to the original Subgraph manifest: ```yaml --- features: - grafting # feature name graft: - base: Qm... # subgraph ID of base subgraph + base: Qm... # Subgraph ID of base Subgraph block: 5956000 # block number ``` - `features:` is a list of all used [feature names](/developing/creating-a-subgraph/#experimental-features). -- `graft:` is a map of the `base` subgraph and the block to graft on to. The `block` is the block number to start indexing from. The Graph will copy the data of the base subgraph up to and including the given block and then continue indexing the new subgraph from that block on. +- `graft:` is a map of the `base` Subgraph and the block to graft on to. The `block` is the block number to start indexing from. The Graph will copy the data of the base Subgraph up to and including the given block and then continue indexing the new Subgraph from that block on. -The `base` and `block` values can be found by deploying two subgraphs: one for the base indexing and one with grafting +The `base` and `block` values can be found by deploying two Subgraphs: one for the base indexing and one with grafting ## بیس سب گراف تعینات کرنا -1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-example` -2. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-example` folder from the repo -3. ایک دفعہ ختم ہو جاۓ، تصدیق کریں کے سب گراف صحیح سے انڈیکس ہو رہا ہے. اگر آپ درج کمانڈ گراف پلے گراونڈ میں چلائیں +1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a Subgraph on Sepolia testnet called `graft-example` +2. Follow the directions in the `AUTH & DEPLOY` section on your Subgraph page in the `graft-example` folder from the repo +3. Once finished, verify the Subgraph is indexing properly. If you run the following command in The Graph Playground ```graphql { @@ -138,16 +138,16 @@ The `base` and `block` values can be found by deploying two subgraphs: one for t } ``` -ایک بار آپ نے تصدیق کر لی کے سب گراف صحیح سے انڈیکس ہو رہا، آپ جلدی سے گرافٹنگ سے اسے اپڈیٹ کر سکتے ہیں. +Once you have verified the Subgraph is indexing properly, you can quickly update the Subgraph with grafting. ## گرافٹنگ سب گراف کو تعینات کرنا گرافٹ متبادل subgraph.yaml کے پاس ایک نیا کنٹریکٹ ایڈریس ہوگا۔ یہ اس وقت ہو سکتا ہے جب آپ اپنے ڈیپ کو اپ ڈیٹ کرتے ہیں، کسی کنٹریکٹ کو دوبارہ استعمال کرتے ہیں، وغیر. -1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-replacement` -2. Create a new manifest. The `subgraph.yaml` for `graph-replacement` contains a different contract address and new information about how it should graft. These are the `block` of the [last event emitted](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452) you care about by the old contract and the `base` of the old subgraph. The `base` subgraph ID is the `Deployment ID` of your original `graph-example` subgraph. You can find this in Subgraph Studio. -3. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-replacement` folder from the repo -4. ایک دفعہ ختم ہو جاۓ، تصدیق کریں کے سب گراف صحیح سے انڈیکس ہو رہا ہے. اگر آپ درج کمانڈ گراف پلے گراونڈ میں چلائیں +1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a Subgraph on Sepolia testnet called `graft-replacement` +2. Create a new manifest. The `subgraph.yaml` for `graph-replacement` contains a different contract address and new information about how it should graft. These are the `block` of the [last event emitted](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452) you care about by the old contract and the `base` of the old Subgraph. The `base` Subgraph ID is the `Deployment ID` of your original `graph-example` Subgraph. You can find this in Subgraph Studio. +3. Follow the directions in the `AUTH & DEPLOY` section on your Subgraph page in the `graft-replacement` folder from the repo +4. Once finished, verify the Subgraph is indexing properly. If you run the following command in The Graph Playground ```graphql { @@ -185,9 +185,9 @@ The `base` and `block` values can be found by deploying two subgraphs: one for t } ``` -You can see that the `graft-replacement` subgraph is indexing from older `graph-example` data and newer data from the new contract address. The original contract emitted two `Withdrawal` events, [Event 1](https://sepolia.etherscan.io/tx/0xe8323d21c4f104607b10b0fff9fc24b9612b9488795dea8196b2d5f980d3dc1d) and [Event 2](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452). The new contract emitted one `Withdrawal` after, [Event 3](https://sepolia.etherscan.io/tx/0x2410475f76a44754bae66d293d14eac34f98ec03a3689cbbb56a716d20b209af). The two previously indexed transactions (Event 1 and 2) and the new transaction (Event 3) were combined together in the `graft-replacement` subgraph. +You can see that the `graft-replacement` Subgraph is indexing from older `graph-example` data and newer data from the new contract address. The original contract emitted two `Withdrawal` events, [Event 1](https://sepolia.etherscan.io/tx/0xe8323d21c4f104607b10b0fff9fc24b9612b9488795dea8196b2d5f980d3dc1d) and [Event 2](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452). The new contract emitted one `Withdrawal` after, [Event 3](https://sepolia.etherscan.io/tx/0x2410475f76a44754bae66d293d14eac34f98ec03a3689cbbb56a716d20b209af). The two previously indexed transactions (Event 1 and 2) and the new transaction (Event 3) were combined together in the `graft-replacement` Subgraph. -Congrats! You have successfully grafted a subgraph onto another subgraph. +Congrats! You have successfully grafted a Subgraph onto another Subgraph. ## اضافی وسائل From e36345be6abf02b8f0cff202e903cefec27858af Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:19:23 -0500 Subject: [PATCH 0622/1789] New translations grafting.mdx (Vietnamese) --- .../pages/vi/subgraphs/cookbook/grafting.mdx | 50 +++++++++---------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/website/src/pages/vi/subgraphs/cookbook/grafting.mdx b/website/src/pages/vi/subgraphs/cookbook/grafting.mdx index 2d9b2a16a1ef..ba2b97ed67c2 100644 --- a/website/src/pages/vi/subgraphs/cookbook/grafting.mdx +++ b/website/src/pages/vi/subgraphs/cookbook/grafting.mdx @@ -2,13 +2,13 @@ title: Replace a Contract and Keep its History With Grafting --- -In this guide, you will learn how to build and deploy new subgraphs by grafting existing subgraphs. +In this guide, you will learn how to build and deploy new Subgraphs by grafting existing Subgraphs. ## What is Grafting? -Grafting reuses the data from an existing subgraph and starts indexing it at a later block. This is useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. Also, it can be used when adding a feature to a subgraph that takes long to index from scratch. +Grafting reuses the data from an existing Subgraph and starts indexing it at a later block. This is useful during development to get past simple errors in the mappings quickly or to temporarily get an existing Subgraph working again after it has failed. Also, it can be used when adding a feature to a Subgraph that takes long to index from scratch. -The grafted subgraph can use a GraphQL schema that is not identical to the one of the base subgraph, but merely compatible with it. It has to be a valid subgraph schema in its own right, but may deviate from the base subgraph's schema in the following ways: +The grafted Subgraph can use a GraphQL schema that is not identical to the one of the base Subgraph, but merely compatible with it. It has to be a valid Subgraph schema in its own right, but may deviate from the base Subgraph's schema in the following ways: - It adds or removes entity types - It removes attributes from entity types @@ -22,35 +22,35 @@ For more information, you can check: - [Grafting](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) -In this tutorial, we will be covering a basic use case. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing subgraph onto the "base" subgraph that tracks the new contract. +In this tutorial, we will be covering a basic use case. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing Subgraph onto the "base" Subgraph that tracks the new contract. ## Important Note on Grafting When Upgrading to the Network -> **Caution**: It is recommended to not use grafting for subgraphs published to The Graph Network +> **Caution**: It is recommended to not use grafting for Subgraphs published to The Graph Network ### Why Is This Important? -Grafting is a powerful feature that allows you to "graft" one subgraph onto another, effectively transferring historical data from the existing subgraph to a new version. It is not possible to graft a subgraph from The Graph Network back to Subgraph Studio. +Grafting is a powerful feature that allows you to "graft" one Subgraph onto another, effectively transferring historical data from the existing Subgraph to a new version. It is not possible to graft a Subgraph from The Graph Network back to Subgraph Studio. ### Best Practices -**Initial Migration**: when you first deploy your subgraph to the decentralized network, do so without grafting. Ensure that the subgraph is stable and functioning as expected. +**Initial Migration**: when you first deploy your Subgraph to the decentralized network, do so without grafting. Ensure that the Subgraph is stable and functioning as expected. -**Subsequent Updates**: once your subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. +**Subsequent Updates**: once your Subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. By adhering to these guidelines, you minimize risks and ensure a smoother migration process. ## Building an Existing Subgraph -Building subgraphs is an essential part of The Graph, described more in depth [here](/subgraphs/quick-start/). To be able to build and deploy the existing subgraph used in this tutorial, the following repo is provided: +Building Subgraphs is an essential part of The Graph, described more in depth [here](/subgraphs/quick-start/). To be able to build and deploy the existing Subgraph used in this tutorial, the following repo is provided: - [Subgraph example repo](https://github.com/Shiyasmohd/grafting-tutorial) -> Note: The contract used in the subgraph was taken from the following [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). +> Note: The contract used in the Subgraph was taken from the following [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). ## Subgraph Manifest Definition -The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest that you will use: +The Subgraph manifest `subgraph.yaml` identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest that you will use: ```yaml specVersion: 0.0.4 @@ -85,27 +85,27 @@ dataSources: ## Grafting Manifest Definition -Grafting requires adding two new items to the original subgraph manifest: +Grafting requires adding two new items to the original Subgraph manifest: ```yaml --- features: - grafting # feature name graft: - base: Qm... # subgraph ID of base subgraph + base: Qm... # Subgraph ID of base Subgraph block: 5956000 # block number ``` - `features:` is a list of all used [feature names](/developing/creating-a-subgraph/#experimental-features). -- `graft:` is a map of the `base` subgraph and the block to graft on to. The `block` is the block number to start indexing from. The Graph will copy the data of the base subgraph up to and including the given block and then continue indexing the new subgraph from that block on. +- `graft:` is a map of the `base` Subgraph and the block to graft on to. The `block` is the block number to start indexing from. The Graph will copy the data of the base Subgraph up to and including the given block and then continue indexing the new Subgraph from that block on. -The `base` and `block` values can be found by deploying two subgraphs: one for the base indexing and one with grafting +The `base` and `block` values can be found by deploying two Subgraphs: one for the base indexing and one with grafting ## Deploying the Base Subgraph -1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-example` -2. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-example` folder from the repo -3. Once finished, verify the subgraph is indexing properly. If you run the following command in The Graph Playground +1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a Subgraph on Sepolia testnet called `graft-example` +2. Follow the directions in the `AUTH & DEPLOY` section on your Subgraph page in the `graft-example` folder from the repo +3. Once finished, verify the Subgraph is indexing properly. If you run the following command in The Graph Playground ```graphql { @@ -138,16 +138,16 @@ It returns something like this: } ``` -Once you have verified the subgraph is indexing properly, you can quickly update the subgraph with grafting. +Once you have verified the Subgraph is indexing properly, you can quickly update the Subgraph with grafting. ## Deploying the Grafting Subgraph The graft replacement subgraph.yaml will have a new contract address. This could happen when you update your dapp, redeploy a contract, etc. -1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-replacement` -2. Create a new manifest. The `subgraph.yaml` for `graph-replacement` contains a different contract address and new information about how it should graft. These are the `block` of the [last event emitted](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452) you care about by the old contract and the `base` of the old subgraph. The `base` subgraph ID is the `Deployment ID` of your original `graph-example` subgraph. You can find this in Subgraph Studio. -3. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-replacement` folder from the repo -4. Once finished, verify the subgraph is indexing properly. If you run the following command in The Graph Playground +1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a Subgraph on Sepolia testnet called `graft-replacement` +2. Create a new manifest. The `subgraph.yaml` for `graph-replacement` contains a different contract address and new information about how it should graft. These are the `block` of the [last event emitted](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452) you care about by the old contract and the `base` of the old Subgraph. The `base` Subgraph ID is the `Deployment ID` of your original `graph-example` Subgraph. You can find this in Subgraph Studio. +3. Follow the directions in the `AUTH & DEPLOY` section on your Subgraph page in the `graft-replacement` folder from the repo +4. Once finished, verify the Subgraph is indexing properly. If you run the following command in The Graph Playground ```graphql { @@ -185,9 +185,9 @@ It should return the following: } ``` -You can see that the `graft-replacement` subgraph is indexing from older `graph-example` data and newer data from the new contract address. The original contract emitted two `Withdrawal` events, [Event 1](https://sepolia.etherscan.io/tx/0xe8323d21c4f104607b10b0fff9fc24b9612b9488795dea8196b2d5f980d3dc1d) and [Event 2](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452). The new contract emitted one `Withdrawal` after, [Event 3](https://sepolia.etherscan.io/tx/0x2410475f76a44754bae66d293d14eac34f98ec03a3689cbbb56a716d20b209af). The two previously indexed transactions (Event 1 and 2) and the new transaction (Event 3) were combined together in the `graft-replacement` subgraph. +You can see that the `graft-replacement` Subgraph is indexing from older `graph-example` data and newer data from the new contract address. The original contract emitted two `Withdrawal` events, [Event 1](https://sepolia.etherscan.io/tx/0xe8323d21c4f104607b10b0fff9fc24b9612b9488795dea8196b2d5f980d3dc1d) and [Event 2](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452). The new contract emitted one `Withdrawal` after, [Event 3](https://sepolia.etherscan.io/tx/0x2410475f76a44754bae66d293d14eac34f98ec03a3689cbbb56a716d20b209af). The two previously indexed transactions (Event 1 and 2) and the new transaction (Event 3) were combined together in the `graft-replacement` Subgraph. -Congrats! You have successfully grafted a subgraph onto another subgraph. +Congrats! You have successfully grafted a Subgraph onto another Subgraph. ## Additional Resources From edbd00ba3e58a704cff9923a07c7b4bf8a8ca270 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:19:24 -0500 Subject: [PATCH 0623/1789] New translations grafting.mdx (Marathi) --- .../pages/mr/subgraphs/cookbook/grafting.mdx | 50 +++++++++---------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/website/src/pages/mr/subgraphs/cookbook/grafting.mdx b/website/src/pages/mr/subgraphs/cookbook/grafting.mdx index 3ceb7d2c7901..2a9f80cc0027 100644 --- a/website/src/pages/mr/subgraphs/cookbook/grafting.mdx +++ b/website/src/pages/mr/subgraphs/cookbook/grafting.mdx @@ -2,13 +2,13 @@ title: करार बदला आणि त्याचा इतिहास ग्राफ्टिंगसह ठेवा --- -या मार्गदर्शकामध्ये, तुम्ही विद्यमान सबग्राफ्सचे ग्राफ्टिंग करून नवीन सबग्राफ कसे तयार करावे आणि कसे तैनात करावे ते शिकाल. +In this guide, you will learn how to build and deploy new Subgraphs by grafting existing Subgraphs. ## ग्राफ्टिंग म्हणजे काय? -ग्राफ्टिंग विद्यमान सबग्राफमधील डेटा पुन्हा वापरते आणि नंतरच्या ब्लॉकमध्ये अनुक्रमित करणे सुरू करते. मॅपिंगमध्ये भूतकाळातील साध्या चुका लवकर मिळवण्यासाठी किंवा विद्यमान सबग्राफ अयशस्वी झाल्यानंतर तात्पुरते काम करण्यासाठी हे विकासादरम्यान उपयुक्त आहे. तसेच, स्क्रॅचपासून इंडेक्स होण्यास बराच वेळ घेणार्‍या सबग्राफमध्ये वैशिष्ट्य जोडताना ते वापरले जाऊ शकते. +Grafting reuses the data from an existing Subgraph and starts indexing it at a later block. This is useful during development to get past simple errors in the mappings quickly or to temporarily get an existing Subgraph working again after it has failed. Also, it can be used when adding a feature to a Subgraph that takes long to index from scratch. -ग्राफ्टेड सबग्राफ GraphQL स्कीमा वापरू शकतो जो बेस सबग्राफपैकी एकाशी एकसारखा नसतो, परंतु त्याच्याशी फक्त सुसंगत असतो. ती स्वतःच्या अधिकारात वैध सबग्राफ स्कीमा असणे आवश्यक आहे, परंतु खालील प्रकारे बेस सबग्राफच्या स्कीमापासून विचलित होऊ शकते: +The grafted Subgraph can use a GraphQL schema that is not identical to the one of the base Subgraph, but merely compatible with it. It has to be a valid Subgraph schema in its own right, but may deviate from the base Subgraph's schema in the following ways: - हे घटक प्रकार जोडते किंवा काढून टाकते - हे घटक प्रकारातील गुणधर्म काढून टाकते @@ -22,35 +22,35 @@ title: करार बदला आणि त्याचा इतिहास - [Grafting](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) -In this tutorial, we will be covering a basic use case. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing subgraph onto the "base" subgraph that tracks the new contract. +In this tutorial, we will be covering a basic use case. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing Subgraph onto the "base" Subgraph that tracks the new contract. ## Important Note on Grafting When Upgrading to the Network -> **Caution**: It is recommended to not use grafting for subgraphs published to The Graph Network +> **Caution**: It is recommended to not use grafting for Subgraphs published to The Graph Network ### Why Is This Important? -Grafting is a powerful feature that allows you to "graft" one subgraph onto another, effectively transferring historical data from the existing subgraph to a new version. It is not possible to graft a subgraph from The Graph Network back to Subgraph Studio. +Grafting is a powerful feature that allows you to "graft" one Subgraph onto another, effectively transferring historical data from the existing Subgraph to a new version. It is not possible to graft a Subgraph from The Graph Network back to Subgraph Studio. ### Best Practices -**Initial Migration**: when you first deploy your subgraph to the decentralized network, do so without grafting. Ensure that the subgraph is stable and functioning as expected. +**Initial Migration**: when you first deploy your Subgraph to the decentralized network, do so without grafting. Ensure that the Subgraph is stable and functioning as expected. -**Subsequent Updates**: once your subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. +**Subsequent Updates**: once your Subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. By adhering to these guidelines, you minimize risks and ensure a smoother migration process. ## विद्यमान सबग्राफ तयार करणे -Building subgraphs is an essential part of The Graph, described more in depth [here](/subgraphs/quick-start/). To be able to build and deploy the existing subgraph used in this tutorial, the following repo is provided: +Building Subgraphs is an essential part of The Graph, described more in depth [here](/subgraphs/quick-start/). To be able to build and deploy the existing Subgraph used in this tutorial, the following repo is provided: - [Subgraph example repo](https://github.com/Shiyasmohd/grafting-tutorial) -> Note: The contract used in the subgraph was taken from the following [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). +> Note: The contract used in the Subgraph was taken from the following [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). ## सबग्राफ मॅनिफेस्ट व्याख्या -The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest that you will use: +The Subgraph manifest `subgraph.yaml` identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest that you will use: ```yaml specVersion: 0.0.4 @@ -85,27 +85,27 @@ dataSources: ## Grafting मॅनिफेस्ट व्याख्या -ग्राफ्टिंगसाठी मूळ सबग्राफ मॅनिफेस्टमध्ये दोन नवीन आयटम जोडणे आवश्यक आहे: +Grafting requires adding two new items to the original Subgraph manifest: ```yaml --- features: - grafting # feature name graft: - base: Qm... # subgraph ID of base subgraph + base: Qm... # Subgraph ID of base Subgraph block: 5956000 # block number ``` - `features:` is a list of all used [feature names](/developing/creating-a-subgraph/#experimental-features). -- `graft:` is a map of the `base` subgraph and the block to graft on to. The `block` is the block number to start indexing from. The Graph will copy the data of the base subgraph up to and including the given block and then continue indexing the new subgraph from that block on. +- `graft:` is a map of the `base` Subgraph and the block to graft on to. The `block` is the block number to start indexing from. The Graph will copy the data of the base Subgraph up to and including the given block and then continue indexing the new Subgraph from that block on. -The `base` and `block` values can be found by deploying two subgraphs: one for the base indexing and one with grafting +The `base` and `block` values can be found by deploying two Subgraphs: one for the base indexing and one with grafting ## बेस सबग्राफ तैनात करणे -1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-example` -2. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-example` folder from the repo -3. एकदा पूर्ण झाल्यावर, सबग्राफ योग्यरित्या अनुक्रमित होत असल्याचे सत्यापित करा. जर तुम्ही ग्राफ प्लेग्राउंडमध्ये खालील आदेश चालवलात +1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a Subgraph on Sepolia testnet called `graft-example` +2. Follow the directions in the `AUTH & DEPLOY` section on your Subgraph page in the `graft-example` folder from the repo +3. Once finished, verify the Subgraph is indexing properly. If you run the following command in The Graph Playground ```graphql { @@ -138,16 +138,16 @@ The `base` and `block` values can be found by deploying two subgraphs: one for t } ``` -एकदा तुम्ही सबग्राफ व्यवस्थित इंडेक्स करत असल्याची पडताळणी केल्यानंतर, तुम्ही ग्राफ्टिंगसह सबग्राफ त्वरीत अपडेट करू शकता. +Once you have verified the Subgraph is indexing properly, you can quickly update the Subgraph with grafting. ## ग्राफ्टिंग सबग्राफ तैनात करणे कलम बदली subgraph.yaml मध्ये नवीन करार पत्ता असेल. जेव्हा तुम्ही तुमचा dapp अपडेट करता, करार पुन्हा लागू करता तेव्हा असे होऊ शकते. -1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-replacement` -2. Create a new manifest. The `subgraph.yaml` for `graph-replacement` contains a different contract address and new information about how it should graft. These are the `block` of the [last event emitted](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452) you care about by the old contract and the `base` of the old subgraph. The `base` subgraph ID is the `Deployment ID` of your original `graph-example` subgraph. You can find this in Subgraph Studio. -3. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-replacement` folder from the repo -4. एकदा पूर्ण झाल्यावर, सबग्राफ योग्यरित्या अनुक्रमित होत असल्याचे सत्यापित करा. जर तुम्ही ग्राफ प्लेग्राउंडमध्ये खालील आदेश चालवलात +1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a Subgraph on Sepolia testnet called `graft-replacement` +2. Create a new manifest. The `subgraph.yaml` for `graph-replacement` contains a different contract address and new information about how it should graft. These are the `block` of the [last event emitted](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452) you care about by the old contract and the `base` of the old Subgraph. The `base` Subgraph ID is the `Deployment ID` of your original `graph-example` Subgraph. You can find this in Subgraph Studio. +3. Follow the directions in the `AUTH & DEPLOY` section on your Subgraph page in the `graft-replacement` folder from the repo +4. Once finished, verify the Subgraph is indexing properly. If you run the following command in The Graph Playground ```graphql { @@ -185,9 +185,9 @@ It should return the following: } ``` -You can see that the `graft-replacement` subgraph is indexing from older `graph-example` data and newer data from the new contract address. The original contract emitted two `Withdrawal` events, [Event 1](https://sepolia.etherscan.io/tx/0xe8323d21c4f104607b10b0fff9fc24b9612b9488795dea8196b2d5f980d3dc1d) and [Event 2](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452). The new contract emitted one `Withdrawal` after, [Event 3](https://sepolia.etherscan.io/tx/0x2410475f76a44754bae66d293d14eac34f98ec03a3689cbbb56a716d20b209af). The two previously indexed transactions (Event 1 and 2) and the new transaction (Event 3) were combined together in the `graft-replacement` subgraph. +You can see that the `graft-replacement` Subgraph is indexing from older `graph-example` data and newer data from the new contract address. The original contract emitted two `Withdrawal` events, [Event 1](https://sepolia.etherscan.io/tx/0xe8323d21c4f104607b10b0fff9fc24b9612b9488795dea8196b2d5f980d3dc1d) and [Event 2](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452). The new contract emitted one `Withdrawal` after, [Event 3](https://sepolia.etherscan.io/tx/0x2410475f76a44754bae66d293d14eac34f98ec03a3689cbbb56a716d20b209af). The two previously indexed transactions (Event 1 and 2) and the new transaction (Event 3) were combined together in the `graft-replacement` Subgraph. -Congrats! You have successfully grafted a subgraph onto another subgraph. +Congrats! You have successfully grafted a Subgraph onto another Subgraph. ## अतिरिक्त संसाधने From 9ce504404b8677eb302ce2efda0ea6a6a650c4ed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:19:26 -0500 Subject: [PATCH 0624/1789] New translations grafting.mdx (Hindi) --- .../pages/hi/subgraphs/cookbook/grafting.mdx | 50 +++++++++---------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/website/src/pages/hi/subgraphs/cookbook/grafting.mdx b/website/src/pages/hi/subgraphs/cookbook/grafting.mdx index c0703bcfb101..2a5168e67ad4 100644 --- a/website/src/pages/hi/subgraphs/cookbook/grafting.mdx +++ b/website/src/pages/hi/subgraphs/cookbook/grafting.mdx @@ -2,13 +2,13 @@ title: एक कॉन्ट्रैक्ट बदलें और उसका इतिहास ग्राफ्टिंग के साथ रखें --- -इस गाइड में, आप सीखेंगे कि मौजूदा सबग्राफ को ग्राफ्ट करके नए सबग्राफ कैसे बनाएं और तैनात करें। +In this guide, you will learn how to build and deploy new Subgraphs by grafting existing Subgraphs. ## ग्राफ्टिंग क्या है? -ग्राफ्टिंग एक वर्तमान सब-ग्राफ के डाटा का दोबारा इस्तेमाल करता है और उसे बाद के ब्लॉक्स में इंडेक्स करना चालू कर देता है| यह विकास की प्रक्रिया में उपयोगी है क्यूंकि इसकी वजह से मैप्पिंग्स में छोटी-मोटी त्रुटियों से छुटकारा पाया जा सकता है या फिर एक मौजूदा सब-ग्राफ को विफल होने के बाद दोबारा चालू किया जा सकता है| साथ हीं, इसका इस्तेमाल ऐसे सब-ग्राफ में कोई खूबी जोड़ते वक़्त भी किया जा सकता है जिसमे शुरुआत से इंडेक्स करने में काफी लम्बा वक़्त लगता हो| +Grafting reuses the data from an existing Subgraph and starts indexing it at a later block. This is useful during development to get past simple errors in the mappings quickly or to temporarily get an existing Subgraph working again after it has failed. Also, it can be used when adding a feature to a Subgraph that takes long to index from scratch. -ग्राफ्टेड सबग्राफ एक ग्राफक्यूएल स्कीमा का उपयोग कर सकता है जो बेस सबग्राफ के समान नहीं है, लेकिन इसके अनुकूल हो। यह अपने आप में एक मान्य सबग्राफ स्कीमा होना चाहिए, लेकिन निम्नलिखित तरीकों से बेस सबग्राफ के स्कीमा से विचलित हो सकता है: +The grafted Subgraph can use a GraphQL schema that is not identical to the one of the base Subgraph, but merely compatible with it. It has to be a valid Subgraph schema in its own right, but may deviate from the base Subgraph's schema in the following ways: - यह इकाई के प्रकारों को जोड़ या हटा सकता है| - यह इकाई प्रकारों में से गुणों को हटाता है| @@ -22,35 +22,35 @@ title: एक कॉन्ट्रैक्ट बदलें और उसक - [Grafting](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) -इस ट्यूटोरियल में, हम एक बुनियादी उपयोग मामले को कवर करेंगे। हम एक मौजूदा कॉन्ट्रैक्ट को एक समान कॉन्ट्रैक्ट (नए पते के साथ, लेकिन वही कोड) से बदलेंगे। फिर, मौजूदा Subgraph को "बेस" Subgraph पर जोड़ेंगे, जो नए कॉन्ट्रैक्ट को ट्रैक करता है। +In this tutorial, we will be covering a basic use case. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing Subgraph onto the "base" Subgraph that tracks the new contract. ## Important Note on Grafting When Upgrading to the Network -> **Caution**: It is recommended to not use grafting for subgraphs published to The Graph Network +> **Caution**: It is recommended to not use grafting for Subgraphs published to The Graph Network ### Why Is This Important? -Grafting एक शक्तिशाली विशेषता है जो आपको एक सबग्राफ़ को दूसरे पर "graft" करने की अनुमति देती है, जिससे मौजूदा सबग्राफ़ से नए संस्करण में ऐतिहासिक डेटा को प्रभावी ढंग से स्थानांतरित किया जा सके।The Graph Network से सबग्राफ़ को Subgraph Studioमें वापस graft करना संभव नहीं है। +Grafting is a powerful feature that allows you to "graft" one Subgraph onto another, effectively transferring historical data from the existing Subgraph to a new version. It is not possible to graft a Subgraph from The Graph Network back to Subgraph Studio. ### Best Practices -**Initial Migration**: when you first deploy your subgraph to the decentralized network, do so without grafting. Ensure that the subgraph is stable and functioning as expected. +**Initial Migration**: when you first deploy your Subgraph to the decentralized network, do so without grafting. Ensure that the Subgraph is stable and functioning as expected. -**Subsequent Updates**: once your subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. +**Subsequent Updates**: once your Subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. By adhering to these guidelines, you minimize risks and ensure a smoother migration process. ## एक मौजूदा सब-ग्राफ बनाना -Building subgraphs is an essential part of The Graph, described more in depth [here](/subgraphs/quick-start/). To be able to build and deploy the existing subgraph used in this tutorial, the following repo is provided: +Building Subgraphs is an essential part of The Graph, described more in depth [here](/subgraphs/quick-start/). To be able to build and deploy the existing Subgraph used in this tutorial, the following repo is provided: - [Subgraph example repo](https://github.com/Shiyasmohd/grafting-tutorial) -> Note: The contract used in the subgraph was taken from the following [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). +> Note: The contract used in the Subgraph was taken from the following [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). ## सब ग्राफ मैनिफेस्ट की परिभाषा -The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest that you will use: +The Subgraph manifest `subgraph.yaml` identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest that you will use: ```yaml specVersion: 0.0.4 @@ -85,27 +85,27 @@ dataSources: ## ग्राफ्टिंग मैनिफेस्ट की परिभाषा -ग्राफ्टिंग करने के लिए मूल सब-ग्राफ मैनिफेस्ट में 2 नई चीज़ें जोड़ने की आवश्यकता है: +Grafting requires adding two new items to the original Subgraph manifest: ```yaml --- features: - grafting # feature name graft: - base: Qm... # subgraph ID of base subgraph + base: Qm... # Subgraph ID of base Subgraph block: 5956000 # block number ``` - `features:` is a list of all used [feature names](/developing/creating-a-subgraph/#experimental-features). -- `graft:` is a map of the `base` subgraph and the block to graft on to. The `block` is the block number to start indexing from. The Graph will copy the data of the base subgraph up to and including the given block and then continue indexing the new subgraph from that block on. +- `graft:` is a map of the `base` Subgraph and the block to graft on to. The `block` is the block number to start indexing from. The Graph will copy the data of the base Subgraph up to and including the given block and then continue indexing the new Subgraph from that block on. -The `base` and `block` values can be found by deploying two subgraphs: one for the base indexing and one with grafting +The `base` and `block` values can be found by deploying two Subgraphs: one for the base indexing and one with grafting ## बेस सब-ग्राफ को तैनात करना -1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-example` -2. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-example` folder from the repo -3. एक बार पूरा होने पर, सत्यापित करें की इंडेक्सिंग सही ढंग से हो गयी है| यदि आप निम्न कमांड ग्राफ प्लेग्राउंड में चलाते हैं +1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a Subgraph on Sepolia testnet called `graft-example` +2. Follow the directions in the `AUTH & DEPLOY` section on your Subgraph page in the `graft-example` folder from the repo +3. Once finished, verify the Subgraph is indexing properly. If you run the following command in The Graph Playground ```graphql { @@ -138,16 +138,16 @@ The `base` and `block` values can be found by deploying two subgraphs: one for t } ``` -एक बार आपका सत्यापित सब-ग्राफ ढंग से इंडेक्स हो जाता है तो आप बिना किसी देरी के अपना सब-ग्राफ को ग्राफ्टिंग से अपडेट कर सकते हैं| +Once you have verified the Subgraph is indexing properly, you can quickly update the Subgraph with grafting. ## ग्राफ्टिंग सब-ग्राफ तैनात करना ग्राफ्ट प्रतिस्तापित subgraph.yaml में एक नया कॉन्ट्रैक्ट एड्रेस होगा| यह तब हो सकता है जब आप अपना डैप अपडेट करें, कॉन्ट्रैक्ट को दोबारा तैनात करें, इत्यादि| -1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-replacement` -2. Create a new manifest. The `subgraph.yaml` for `graph-replacement` contains a different contract address and new information about how it should graft. These are the `block` of the [last event emitted](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452) you care about by the old contract and the `base` of the old subgraph. The `base` subgraph ID is the `Deployment ID` of your original `graph-example` subgraph. You can find this in Subgraph Studio. -3. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-replacement` folder from the repo -4. एक बार पूरा होने पर, सत्यापित करें की इंडेक्सिंग सही ढंग से हो गयी है| यदि आप निम्न कमांड ग्राफ प्लेग्राउंड में चलाते हैं +1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a Subgraph on Sepolia testnet called `graft-replacement` +2. Create a new manifest. The `subgraph.yaml` for `graph-replacement` contains a different contract address and new information about how it should graft. These are the `block` of the [last event emitted](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452) you care about by the old contract and the `base` of the old Subgraph. The `base` Subgraph ID is the `Deployment ID` of your original `graph-example` Subgraph. You can find this in Subgraph Studio. +3. Follow the directions in the `AUTH & DEPLOY` section on your Subgraph page in the `graft-replacement` folder from the repo +4. Once finished, verify the Subgraph is indexing properly. If you run the following command in The Graph Playground ```graphql { @@ -185,9 +185,9 @@ The `base` and `block` values can be found by deploying two subgraphs: one for t } ``` -You can see that the `graft-replacement` subgraph is indexing from older `graph-example` data and newer data from the new contract address. The original contract emitted two `Withdrawal` events, [Event 1](https://sepolia.etherscan.io/tx/0xe8323d21c4f104607b10b0fff9fc24b9612b9488795dea8196b2d5f980d3dc1d) and [Event 2](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452). The new contract emitted one `Withdrawal` after, [Event 3](https://sepolia.etherscan.io/tx/0x2410475f76a44754bae66d293d14eac34f98ec03a3689cbbb56a716d20b209af). The two previously indexed transactions (Event 1 and 2) and the new transaction (Event 3) were combined together in the `graft-replacement` subgraph. +You can see that the `graft-replacement` Subgraph is indexing from older `graph-example` data and newer data from the new contract address. The original contract emitted two `Withdrawal` events, [Event 1](https://sepolia.etherscan.io/tx/0xe8323d21c4f104607b10b0fff9fc24b9612b9488795dea8196b2d5f980d3dc1d) and [Event 2](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452). The new contract emitted one `Withdrawal` after, [Event 3](https://sepolia.etherscan.io/tx/0x2410475f76a44754bae66d293d14eac34f98ec03a3689cbbb56a716d20b209af). The two previously indexed transactions (Event 1 and 2) and the new transaction (Event 3) were combined together in the `graft-replacement` Subgraph. -Congrats! You have successfully grafted a subgraph onto another subgraph. +Congrats! You have successfully grafted a Subgraph onto another Subgraph. ## Additional Resources From 15276732f75acbd1ffbeac1bd2ed25eb3b1ecf14 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:19:27 -0500 Subject: [PATCH 0625/1789] New translations grafting.mdx (Swahili) --- .../pages/sw/subgraphs/cookbook/grafting.mdx | 202 ++++++++++++++++++ 1 file changed, 202 insertions(+) create mode 100644 website/src/pages/sw/subgraphs/cookbook/grafting.mdx diff --git a/website/src/pages/sw/subgraphs/cookbook/grafting.mdx b/website/src/pages/sw/subgraphs/cookbook/grafting.mdx new file mode 100644 index 000000000000..0a0d7b3bb370 --- /dev/null +++ b/website/src/pages/sw/subgraphs/cookbook/grafting.mdx @@ -0,0 +1,202 @@ +--- +title: Replace a Contract and Keep its History With Grafting +--- + +In this guide, you will learn how to build and deploy new Subgraphs by grafting existing Subgraphs. + +## What is Grafting? + +Grafting reuses the data from an existing Subgraph and starts indexing it at a later block. This is useful during development to get past simple errors in the mappings quickly or to temporarily get an existing Subgraph working again after it has failed. Also, it can be used when adding a feature to a Subgraph that takes long to index from scratch. + +The grafted Subgraph can use a GraphQL schema that is not identical to the one of the base Subgraph, but merely compatible with it. It has to be a valid Subgraph schema in its own right, but may deviate from the base Subgraph's schema in the following ways: + +- It adds or removes entity types +- It removes attributes from entity types +- It adds nullable attributes to entity types +- It turns non-nullable attributes into nullable attributes +- It adds values to enums +- It adds or removes interfaces +- It changes for which entity types an interface is implemented + +For more information, you can check: + +- [Grafting](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) + +In this tutorial, we will be covering a basic use case. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing Subgraph onto the "base" Subgraph that tracks the new contract. + +## Important Note on Grafting When Upgrading to the Network + +> **Caution**: It is recommended to not use grafting for Subgraphs published to The Graph Network + +### Why Is This Important? + +Grafting is a powerful feature that allows you to "graft" one Subgraph onto another, effectively transferring historical data from the existing Subgraph to a new version. It is not possible to graft a Subgraph from The Graph Network back to Subgraph Studio. + +### Best Practices + +**Initial Migration**: when you first deploy your Subgraph to the decentralized network, do so without grafting. Ensure that the Subgraph is stable and functioning as expected. + +**Subsequent Updates**: once your Subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. + +By adhering to these guidelines, you minimize risks and ensure a smoother migration process. + +## Building an Existing Subgraph + +Building Subgraphs is an essential part of The Graph, described more in depth [here](/subgraphs/quick-start/). To be able to build and deploy the existing Subgraph used in this tutorial, the following repo is provided: + +- [Subgraph example repo](https://github.com/Shiyasmohd/grafting-tutorial) + +> Note: The contract used in the Subgraph was taken from the following [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). + +## Subgraph Manifest Definition + +The Subgraph manifest `subgraph.yaml` identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest that you will use: + +```yaml +specVersion: 0.0.4 +schema: + file: ./schema.graphql +dataSources: + - kind: ethereum + name: Lock + network: sepolia + source: + address: '0xb3aabe721794b85fe4e72134795c2f93b4eb7e63' + abi: Lock + startBlock: 5955690 + mapping: + kind: ethereum/events + apiVersion: 0.0.6 + language: wasm/assemblyscript + entities: + - Withdrawal + abis: + - name: Lock + file: ./abis/Lock.json + eventHandlers: + - event: Withdrawal(uint256,uint256) + handler: handleWithdrawal + file: ./src/lock.ts +``` + +- The `Lock` data source is the abi and contract address we will get when we compile and deploy the contract +- The network should correspond to an indexed network being queried. Since we're running on Sepolia testnet, the network is `sepolia` +- The `mapping` section defines the triggers of interest and the functions that should be run in response to those triggers. In this case, we are listening for the `Withdrawal` event and calling the `handleWithdrawal` function when it is emitted. + +## Grafting Manifest Definition + +Grafting requires adding two new items to the original Subgraph manifest: + +```yaml +--- +features: + - grafting # feature name +graft: + base: Qm... # Subgraph ID of base Subgraph + block: 5956000 # block number +``` + +- `features:` is a list of all used [feature names](/developing/creating-a-subgraph/#experimental-features). +- `graft:` is a map of the `base` Subgraph and the block to graft on to. The `block` is the block number to start indexing from. The Graph will copy the data of the base Subgraph up to and including the given block and then continue indexing the new Subgraph from that block on. + +The `base` and `block` values can be found by deploying two Subgraphs: one for the base indexing and one with grafting + +## Deploying the Base Subgraph + +1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a Subgraph on Sepolia testnet called `graft-example` +2. Follow the directions in the `AUTH & DEPLOY` section on your Subgraph page in the `graft-example` folder from the repo +3. Once finished, verify the Subgraph is indexing properly. If you run the following command in The Graph Playground + +```graphql +{ + withdrawals(first: 5) { + id + amount + when + } +} +``` + +It returns something like this: + +``` +{ + "data": { + "withdrawals": [ + { + "id": "0xe8323d21c4f104607b10b0fff9fc24b9612b9488795dea8196b2d5f980d3dc1d0a000000", + "amount": "0", + "when": "1716394824" + }, + { + "id": "0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc45203000000", + "amount": "0", + "when": "1716394848" + } + ] + } +} +``` + +Once you have verified the Subgraph is indexing properly, you can quickly update the Subgraph with grafting. + +## Deploying the Grafting Subgraph + +The graft replacement subgraph.yaml will have a new contract address. This could happen when you update your dapp, redeploy a contract, etc. + +1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a Subgraph on Sepolia testnet called `graft-replacement` +2. Create a new manifest. The `subgraph.yaml` for `graph-replacement` contains a different contract address and new information about how it should graft. These are the `block` of the [last event emitted](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452) you care about by the old contract and the `base` of the old Subgraph. The `base` Subgraph ID is the `Deployment ID` of your original `graph-example` Subgraph. You can find this in Subgraph Studio. +3. Follow the directions in the `AUTH & DEPLOY` section on your Subgraph page in the `graft-replacement` folder from the repo +4. Once finished, verify the Subgraph is indexing properly. If you run the following command in The Graph Playground + +```graphql +{ + withdrawals(first: 5) { + id + amount + when + } +} +``` + +It should return the following: + +``` +{ + "data": { + "withdrawals": [ + { + "id": "0xe8323d21c4f104607b10b0fff9fc24b9612b9488795dea8196b2d5f980d3dc1d0a000000", + "amount": "0", + "when": "1716394824" + }, + { + "id": "0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc45203000000", + "amount": "0", + "when": "1716394848" + }, + { + "id": "0x2410475f76a44754bae66d293d14eac34f98ec03a3689cbbb56a716d20b209af06000000", + "amount": "0", + "when": "1716429732" + } + ] + } +} +``` + +You can see that the `graft-replacement` Subgraph is indexing from older `graph-example` data and newer data from the new contract address. The original contract emitted two `Withdrawal` events, [Event 1](https://sepolia.etherscan.io/tx/0xe8323d21c4f104607b10b0fff9fc24b9612b9488795dea8196b2d5f980d3dc1d) and [Event 2](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452). The new contract emitted one `Withdrawal` after, [Event 3](https://sepolia.etherscan.io/tx/0x2410475f76a44754bae66d293d14eac34f98ec03a3689cbbb56a716d20b209af). The two previously indexed transactions (Event 1 and 2) and the new transaction (Event 3) were combined together in the `graft-replacement` Subgraph. + +Congrats! You have successfully grafted a Subgraph onto another Subgraph. + +## Additional Resources + +If you want more experience with grafting, here are a few examples for popular contracts: + +- [Curve](https://github.com/messari/subgraphs/blob/master/subgraphs/curve-finance/protocols/curve-finance/config/templates/curve.template.yaml) +- [ERC-721](https://github.com/messari/subgraphs/blob/master/subgraphs/erc721-metadata/subgraph.yaml) +- [Uniswap](https://github.com/messari/subgraphs/blob/master/subgraphs/uniswap-v3-forks/protocols/uniswap-v3/config/templates/uniswapV3Template.yaml), + +To become even more of a Graph expert, consider learning about other ways to handle changes in underlying datasources. Alternatives like [Data Source Templates](/developing/creating-a-subgraph/#data-source-templates) can achieve similar results + +> Note: A lot of material from this article was taken from the previously published [Arweave article](/subgraphs/cookbook/arweave/) From 8f917e7b362d1fcfb5759fb87aaefa76690d3a64 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:19:28 -0500 Subject: [PATCH 0626/1789] New translations near.mdx (Romanian) --- .../src/pages/ro/subgraphs/cookbook/near.mdx | 74 +++++++++---------- 1 file changed, 37 insertions(+), 37 deletions(-) diff --git a/website/src/pages/ro/subgraphs/cookbook/near.mdx b/website/src/pages/ro/subgraphs/cookbook/near.mdx index 6060eb27e761..698a0ac3486c 100644 --- a/website/src/pages/ro/subgraphs/cookbook/near.mdx +++ b/website/src/pages/ro/subgraphs/cookbook/near.mdx @@ -2,17 +2,17 @@ title: Building Subgraphs on NEAR --- -This guide is an introduction to building subgraphs indexing smart contracts on the [NEAR blockchain](https://docs.near.org/). +This guide is an introduction to building Subgraphs indexing smart contracts on the [NEAR blockchain](https://docs.near.org/). ## What is NEAR? [NEAR](https://near.org/) is a smart contract platform for building decentralized applications. Visit the [official documentation](https://docs.near.org/concepts/basics/protocol) for more information. -## What are NEAR subgraphs? +## What are NEAR Subgraphs? -The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build subgraphs to index their smart contracts. +The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a Subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build Subgraphs to index their smart contracts. -Subgraphs are event-based, which means that they listen for and then process onchain events. There are currently two types of handlers supported for NEAR subgraphs: +Subgraphs are event-based, which means that they listen for and then process onchain events. There are currently two types of handlers supported for NEAR Subgraphs: - Block handlers: these are run on every new block - Receipt handlers: run every time a message is executed at a specified account @@ -23,32 +23,32 @@ Subgraphs are event-based, which means that they listen for and then process onc ## Building a NEAR Subgraph -`@graphprotocol/graph-cli` is a command-line tool for building and deploying subgraphs. +`@graphprotocol/graph-cli` is a command-line tool for building and deploying Subgraphs. -`@graphprotocol/graph-ts` is a library of subgraph-specific types. +`@graphprotocol/graph-ts` is a library of Subgraph-specific types. -NEAR subgraph development requires `graph-cli` above version `0.23.0`, and `graph-ts` above version `0.23.0`. +NEAR Subgraph development requires `graph-cli` above version `0.23.0`, and `graph-ts` above version `0.23.0`. -> Building a NEAR subgraph is very similar to building a subgraph that indexes Ethereum. +> Building a NEAR Subgraph is very similar to building a Subgraph that indexes Ethereum. -There are three aspects of subgraph definition: +There are three aspects of Subgraph definition: -**subgraph.yaml:** the subgraph manifest, defining the data sources of interest, and how they should be processed. NEAR is a new `kind` of data source. +**subgraph.yaml:** the Subgraph manifest, defining the data sources of interest, and how they should be processed. NEAR is a new `kind` of data source. -**schema.graphql:** a schema file that defines what data is stored for your subgraph, and how to query it via GraphQL. The requirements for NEAR subgraphs are covered by [the existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). +**schema.graphql:** a schema file that defines what data is stored for your Subgraph, and how to query it via GraphQL. The requirements for NEAR Subgraphs are covered by [the existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). **AssemblyScript Mappings:** [AssemblyScript code](/subgraphs/developing/creating/graph-ts/api/) that translates from the event data to the entities defined in your schema. NEAR support introduces NEAR-specific data types and new JSON parsing functionality. -During subgraph development there are two key commands: +During Subgraph development there are two key commands: ```bash $ graph codegen # generates types from the schema file identified in the manifest -$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the subgraph files in a /build folder +$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the Subgraph files in a /build folder ``` ### Subgraph Manifest Definition -The subgraph manifest (`subgraph.yaml`) identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for a NEAR subgraph: +The Subgraph manifest (`subgraph.yaml`) identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest for a NEAR Subgraph: ```yaml specVersion: 0.0.2 @@ -70,7 +70,7 @@ dataSources: file: ./src/mapping.ts # link to the file with the Assemblyscript mappings ``` -- NEAR subgraphs introduce a new `kind` of data source (`near`) +- NEAR Subgraphs introduce a new `kind` of data source (`near`) - The `network` should correspond to a network on the hosting Graph Node. On Subgraph Studio, NEAR's mainnet is `near-mainnet`, and NEAR's testnet is `near-testnet` - NEAR data sources introduce an optional `source.account` field, which is a human-readable ID corresponding to a [NEAR account](https://docs.near.org/concepts/protocol/account-model). This can be an account or a sub-account. - NEAR data sources introduce an alternative optional `source.accounts` field, which contains optional suffixes and prefixes. At least prefix or suffix must be specified, they will match the any account starting or ending with the list of values respectively. The example below would match: `[app|good].*[morning.near|morning.testnet]`. If only a list of prefixes or suffixes is necessary the other field can be omitted. @@ -92,7 +92,7 @@ NEAR data sources support two types of handlers: ### Schema Definition -Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). +Schema definition describes the structure of the resulting Subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on Subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). ### AssemblyScript Mappings @@ -165,31 +165,31 @@ These types are passed to block & receipt handlers: - Block handlers will receive a `Block` - Receipt handlers will receive a `ReceiptWithOutcome` -Otherwise, the rest of the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/) is available to NEAR subgraph developers during mapping execution. +Otherwise, the rest of the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/) is available to NEAR Subgraph developers during mapping execution. This includes a new JSON parsing function - logs on NEAR are frequently emitted as stringified JSONs. A new `json.fromString(...)` function is available as part of the [JSON API](/subgraphs/developing/creating/graph-ts/api/#json-api) to allow developers to easily process these logs. ## Deploying a NEAR Subgraph -Once you have a built subgraph, it is time to deploy it to Graph Node for indexing. NEAR subgraphs can be deployed to any Graph Node `>=v0.26.x` (this version has not yet been tagged & released). +Once you have a built Subgraph, it is time to deploy it to Graph Node for indexing. NEAR Subgraphs can be deployed to any Graph Node `>=v0.26.x` (this version has not yet been tagged & released). Subgraph Studio and the upgrade Indexer on The Graph Network currently supports indexing NEAR mainnet and testnet in beta, with the following network names: - `near-mainnet` - `near-testnet` -More information on creating and deploying subgraphs on Subgraph Studio can be found [here](/deploying/deploying-a-subgraph-to-studio/). +More information on creating and deploying Subgraphs on Subgraph Studio can be found [here](/deploying/deploying-a-subgraph-to-studio/). -As a quick primer - the first step is to "create" your subgraph - this only needs to be done once. On Subgraph Studio, this can be done from [your Dashboard](https://thegraph.com/studio/): "Create a subgraph". +As a quick primer - the first step is to "create" your Subgraph - this only needs to be done once. On Subgraph Studio, this can be done from [your Dashboard](https://thegraph.com/studio/): "Create a Subgraph". -Once your subgraph has been created, you can deploy your subgraph by using the `graph deploy` CLI command: +Once your Subgraph has been created, you can deploy your Subgraph by using the `graph deploy` CLI command: ```sh -$ graph create --node # creates a subgraph on a local Graph Node (on Subgraph Studio, this is done via the UI) -$ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # uploads the build files to a specified IPFS endpoint, and then deploys the subgraph to a specified Graph Node based on the manifest IPFS hash +$ graph create --node # creates a Subgraph on a local Graph Node (on Subgraph Studio, this is done via the UI) +$ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # uploads the build files to a specified IPFS endpoint, and then deploys the Subgraph to a specified Graph Node based on the manifest IPFS hash ``` -The node configuration will depend on where the subgraph is being deployed. +The node configuration will depend on where the Subgraph is being deployed. ### Subgraph Studio @@ -204,7 +204,7 @@ graph deploy graph deploy --node http://localhost:8020/ --ipfs http://localhost:5001 ``` -Once your subgraph has been deployed, it will be indexed by Graph Node. You can check its progress by querying the subgraph itself: +Once your Subgraph has been deployed, it will be indexed by Graph Node. You can check its progress by querying the Subgraph itself: ```graphql { @@ -228,11 +228,11 @@ We will provide more information on running the above components soon. ## Querying a NEAR Subgraph -The GraphQL endpoint for NEAR subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. +The GraphQL endpoint for NEAR Subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. ## Example Subgraphs -Here are some example subgraphs for reference: +Here are some example Subgraphs for reference: [NEAR Blocks](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-blocks) @@ -242,13 +242,13 @@ Here are some example subgraphs for reference: ### How does the beta work? -NEAR support is in beta, which means that there may be changes to the API as we continue to work on improving the integration. Please email near@thegraph.com so that we can support you in building NEAR subgraphs, and keep you up to date on the latest developments! +NEAR support is in beta, which means that there may be changes to the API as we continue to work on improving the integration. Please email near@thegraph.com so that we can support you in building NEAR Subgraphs, and keep you up to date on the latest developments! -### Can a subgraph index both NEAR and EVM chains? +### Can a Subgraph index both NEAR and EVM chains? -No, a subgraph can only support data sources from one chain/network. +No, a Subgraph can only support data sources from one chain/network. -### Can subgraphs react to more specific triggers? +### Can Subgraphs react to more specific triggers? Currently, only Block and Receipt triggers are supported. We are investigating triggers for function calls to a specified account. We are also interested in supporting event triggers, once NEAR has native event support. @@ -262,21 +262,21 @@ accounts: - mintbase1.near ``` -### Can NEAR subgraphs make view calls to NEAR accounts during mappings? +### Can NEAR Subgraphs make view calls to NEAR accounts during mappings? This is not supported. We are evaluating whether this functionality is required for indexing. -### Can I use data source templates in my NEAR subgraph? +### Can I use data source templates in my NEAR Subgraph? This is not currently supported. We are evaluating whether this functionality is required for indexing. -### Ethereum subgraphs support "pending" and "current" versions, how can I deploy a "pending" version of a NEAR subgraph? +### Ethereum Subgraphs support "pending" and "current" versions, how can I deploy a "pending" version of a NEAR Subgraph? -Pending functionality is not yet supported for NEAR subgraphs. In the interim, you can deploy a new version to a different "named" subgraph, and then when that is synced with the chain head, you can redeploy to your primary "named" subgraph, which will use the same underlying deployment ID, so the main subgraph will be instantly synced. +Pending functionality is not yet supported for NEAR Subgraphs. In the interim, you can deploy a new version to a different "named" Subgraph, and then when that is synced with the chain head, you can redeploy to your primary "named" Subgraph, which will use the same underlying deployment ID, so the main Subgraph will be instantly synced. -### My question hasn't been answered, where can I get more help building NEAR subgraphs? +### My question hasn't been answered, where can I get more help building NEAR Subgraphs? -If it is a general question about subgraph development, there is a lot more information in the rest of the [Developer documentation](/subgraphs/quick-start/). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. +If it is a general question about Subgraph development, there is a lot more information in the rest of the [Developer documentation](/subgraphs/quick-start/). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. ## References From e6888b12e068e92a3f18cf38d539555767e6c063 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:19:29 -0500 Subject: [PATCH 0627/1789] New translations near.mdx (French) --- .../src/pages/fr/subgraphs/cookbook/near.mdx | 128 +++++++++--------- 1 file changed, 64 insertions(+), 64 deletions(-) diff --git a/website/src/pages/fr/subgraphs/cookbook/near.mdx b/website/src/pages/fr/subgraphs/cookbook/near.mdx index 0e6830668726..ded110a533fe 100644 --- a/website/src/pages/fr/subgraphs/cookbook/near.mdx +++ b/website/src/pages/fr/subgraphs/cookbook/near.mdx @@ -2,17 +2,17 @@ title: Construction de subgraphs sur NEAR --- -This guide is an introduction to building subgraphs indexing smart contracts on the [NEAR blockchain](https://docs.near.org/). +This guide is an introduction to building Subgraphs indexing smart contracts on the [NEAR blockchain](https://docs.near.org/). ## Que signifie NEAR ? [NEAR](https://near.org/) is a smart contract platform for building decentralized applications. Visit the [official documentation](https://docs.near.org/concepts/basics/protocol) for more information. -## Que sont les subgraphs NEAR ? +## What are NEAR Subgraphs? -The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build subgraphs to index their smart contracts. +The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a Subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build Subgraphs to index their smart contracts. -Subgraphs are event-based, which means that they listen for and then process onchain events. There are currently two types of handlers supported for NEAR subgraphs: +Subgraphs are event-based, which means that they listen for and then process onchain events. There are currently two types of handlers supported for NEAR Subgraphs: - Gestionnaires de blocs : ceux-ci sont exécutés à chaque nouveau bloc - Gestionnaires de reçus : exécutés à chaque fois qu'un message est exécuté sur un compte spécifié @@ -23,66 +23,66 @@ Subgraphs are event-based, which means that they listen for and then process onc ## Construction d'un subgraph NEAR -`@graphprotocol/graph-cli` is a command-line tool for building and deploying subgraphs. +`@graphprotocol/graph-cli` is a command-line tool for building and deploying Subgraphs. -`@graphprotocol/graph-ts` is a library of subgraph-specific types. +`@graphprotocol/graph-ts` is a library of Subgraph-specific types. -NEAR subgraph development requires `graph-cli` above version `0.23.0`, and `graph-ts` above version `0.23.0`. +NEAR Subgraph development requires `graph-cli` above version `0.23.0`, and `graph-ts` above version `0.23.0`. -> La construction d'un subgraph NEAR est très similaire à la construction d'un subgraph qui indexe Ethereum. +> Building a NEAR Subgraph is very similar to building a Subgraph that indexes Ethereum. -La définition d'un subgraph comporte trois aspects : +There are three aspects of Subgraph definition: -**subgraph.yaml:** the subgraph manifest, defining the data sources of interest, and how they should be processed. NEAR is a new `kind` of data source. +**subgraph.yaml:** the Subgraph manifest, defining the data sources of interest, and how they should be processed. NEAR is a new `kind` of data source. -**schema.graphql:** a schema file that defines what data is stored for your subgraph, and how to query it via GraphQL. The requirements for NEAR subgraphs are covered by [the existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). +**schema.graphql:** a schema file that defines what data is stored for your Subgraph, and how to query it via GraphQL. The requirements for NEAR Subgraphs are covered by [the existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). **AssemblyScript Mappings:** [AssemblyScript code](/subgraphs/developing/creating/graph-ts/api/) that translates from the event data to the entities defined in your schema. NEAR support introduces NEAR-specific data types and new JSON parsing functionality. -Lors du développement du subgraph, il y a deux commandes clés : +During Subgraph development there are two key commands: ```bash -$ graph codegen # génère des types à partir du fichier de schéma identifié dans le manifeste -$ graph build # génère le Web Assembly à partir des fichiers AssemblyScript, et prépare tous les fichiers de subgraphes dans un dossier /build +$ graph codegen # generates types from the schema file identified in the manifest +$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the Subgraph files in a /build folder ``` ### Définition du manifeste du subgraph -The subgraph manifest (`subgraph.yaml`) identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for a NEAR subgraph: +The Subgraph manifest (`subgraph.yaml`) identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest for a NEAR Subgraph: ```yaml -specVersion: 0.0.2 -schema: - file: ./src/schema.graphql # lien vers le fichier de schéma -dataSources: - - kind: near - network: near-mainnet - source: - account: app.good-morning.near # Cette source de données surveillera ce compte - startBlock: 10662188 # Requis pour NEAR - mapping: - apiVersion: 0.0.5 - language: wasm/assemblyscript - blockHandlers: - - handler: handleNewBlock # le nom de la fonction dans le fichier de mapping - receiptHandlers: - - handler: handleReceipt # le nom de la fonction dans le fichier de mappage - file: ./src/mapping.ts # lien vers le fichier contenant les mappings Assemblyscript +specVersion : 0.0.2 +schema : + file : ./src/schema.graphql # lien vers le fichier de schéma +dataSources : + - kind : near + network : near-mainnet + source : + account : app.good-morning.near # Cette source de données surveillera ce compte + startBlock : 10662188 # Requis pour NEAR + mapping : + apiVersion : 0.0.5 + language : wasm/assemblyscript + blockHandlers : + - handler : handleNewBlock # le nom de la fonction dans le fichier de mapping + receiptHandlers : + - handler : handleReceipt # le nom de la fonction dans le fichier de mappage + file : ./src/mapping.ts # lien vers le fichier contenant les mappings Assemblyscript ``` -- NEAR subgraphs introduce a new `kind` of data source (`near`) +- NEAR Subgraphs introduce a new `kind` of data source (`near`) - The `network` should correspond to a network on the hosting Graph Node. On Subgraph Studio, NEAR's mainnet is `near-mainnet`, and NEAR's testnet is `near-testnet` - NEAR data sources introduce an optional `source.account` field, which is a human-readable ID corresponding to a [NEAR account](https://docs.near.org/concepts/protocol/account-model). This can be an account or a sub-account. - NEAR data sources introduce an alternative optional `source.accounts` field, which contains optional suffixes and prefixes. At least prefix or suffix must be specified, they will match the any account starting or ending with the list of values respectively. The example below would match: `[app|good].*[morning.near|morning.testnet]`. If only a list of prefixes or suffixes is necessary the other field can be omitted. ```yaml comptes: - préfixes: - - application - - bien - suffixes: - - matin.près - - matin.testnet + préfixes : + - application + - bien + suffixes : + - matin.près + - matin.testnet ``` Les fichiers de données NEAR prennent en charge deux types de gestionnaires : @@ -92,11 +92,11 @@ Les fichiers de données NEAR prennent en charge deux types de gestionnaires : ### Définition de schéma -Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). +Schema definition describes the structure of the resulting Subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on Subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). ### Cartographies AssemblyScript -The handlers for processing events are written in [AssemblyScript](https://www.assemblyscript.org/). +Les gestionnaires d'événements sont écrits en [AssemblyScript](https://www.assemblyscript.org/). NEAR indexing introduces NEAR-specific data types to the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/). @@ -165,31 +165,31 @@ These types are passed to block & receipt handlers: - Block handlers will receive a `Block` - Receipt handlers will receive a `ReceiptWithOutcome` -Otherwise, the rest of the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/) is available to NEAR subgraph developers during mapping execution. +Otherwise, the rest of the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/) is available to NEAR Subgraph developers during mapping execution. This includes a new JSON parsing function - logs on NEAR are frequently emitted as stringified JSONs. A new `json.fromString(...)` function is available as part of the [JSON API](/subgraphs/developing/creating/graph-ts/api/#json-api) to allow developers to easily process these logs. ## Déploiement d'un subgraph NEAR -Once you have a built subgraph, it is time to deploy it to Graph Node for indexing. NEAR subgraphs can be deployed to any Graph Node `>=v0.26.x` (this version has not yet been tagged & released). +Once you have a built Subgraph, it is time to deploy it to Graph Node for indexing. NEAR Subgraphs can be deployed to any Graph Node `>=v0.26.x` (this version has not yet been tagged & released). Subgraph Studio et l'Indexeur de mise à niveau sur The Graph Network prennent en charge actuellement l'indexation du mainnet et du testnet NEAR en bêta, avec les noms de réseau suivants : - `near-mainnet` - `near-testnet` -More information on creating and deploying subgraphs on Subgraph Studio can be found [here](/deploying/deploying-a-subgraph-to-studio/). +More information on creating and deploying Subgraphs on Subgraph Studio can be found [here](/deploying/deploying-a-subgraph-to-studio/). -As a quick primer - the first step is to "create" your subgraph - this only needs to be done once. On Subgraph Studio, this can be done from [your Dashboard](https://thegraph.com/studio/): "Create a subgraph". +As a quick primer - the first step is to "create" your Subgraph - this only needs to be done once. On Subgraph Studio, this can be done from [your Dashboard](https://thegraph.com/studio/): "Create a Subgraph". -Once your subgraph has been created, you can deploy your subgraph by using the `graph deploy` CLI command: +Once your Subgraph has been created, you can deploy your Subgraph by using the `graph deploy` CLI command: ```sh -$ graph create --node # creates a subgraph on a local Graph Node (on Subgraph Studio, this is done via the UI) -$ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # uploads the build files to a specified IPFS endpoint, and then deploys the subgraph to a specified Graph Node based on the manifest IPFS hash +$ graph create --node # creates a Subgraph on a local Graph Node (on Subgraph Studio, this is done via the UI) +$ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # uploads the build files to a specified IPFS endpoint, and then deploys the Subgraph to a specified Graph Node based on the manifest IPFS hash ``` -La configuration du nœud dépend de l'endroit où le subgraph est déployé. +The node configuration will depend on where the Subgraph is being deployed. ### Subgraph Studio @@ -204,7 +204,7 @@ graph deploy graph deploy --node http://localhost:8020/ --ipfs http://localhost:5001 ``` -Une fois que votre subgraph a été déployé, il sera indexé par le nœud The Graph. Vous pouvez vérifier sa progression en interrogeant le subgraph lui-même : +Once your Subgraph has been deployed, it will be indexed by Graph Node. You can check its progress by querying the Subgraph itself: ```graphql { @@ -228,11 +228,11 @@ Nous fournirons bientôt plus d'informations sur l'utilisation des composants ci ## Interrogation d'un subgraph NEAR -The GraphQL endpoint for NEAR subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. +The GraphQL endpoint for NEAR Subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. ## Exemples de subgraphs -Voici quelques exemples de subgraphs pour référence : +Here are some example Subgraphs for reference: [NEAR Blocks](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-blocks) @@ -242,13 +242,13 @@ Voici quelques exemples de subgraphs pour référence : ### Comment fonctionne la bêta ? -Le support de NEAR est en version bêta, ce qui signifie qu'il peut y avoir des changements dans l'API alors que nous continuons à travailler sur l'amélioration de l'intégration. Veuillez envoyer un e-mail à near@thegraph.com pour que nous puissions vous aider à construire des subgraphs NEAR et vous tenir au courant des derniers développements ! +NEAR support is in beta, which means that there may be changes to the API as we continue to work on improving the integration. Please email near@thegraph.com so that we can support you in building NEAR Subgraphs, and keep you up to date on the latest developments! -### Un subgraph peut-il indexer à la fois les chaînes NEAR et EVM ? +### Can a Subgraph index both NEAR and EVM chains? -Non, un subgraph ne peut supporter que les sources de données d'une seule chaîne/réseau. +No, a Subgraph can only support data sources from one chain/network. -### Les subgraphs peuvent-ils réagir à des déclencheurs plus spécifiques ? +### Can Subgraphs react to more specific triggers? Actuellement, seuls les déclencheurs de blocage et de réception sont pris en charge. Nous étudions les déclencheurs pour les appels de fonction à un compte spécifique. Nous souhaitons également prendre en charge les déclencheurs d'événements, une fois que NEAR disposera d'un support natif pour les événements. @@ -258,25 +258,25 @@ If an `account` is specified, that will only match the exact account name. It is ```yaml comptes: - suffixes: - - mintbase1.near + suffixes : + - mintbase1.near ``` -### Les subgraphs NEAR peuvent-ils faire des appels de view aux comptes NEAR pendant les mappings? +### Can NEAR Subgraphs make view calls to NEAR accounts during mappings? Cette fonction n'est pas prise en charge. Nous sommes en train d'évaluer si cette fonctionnalité est nécessaire pour l'indexation. -### Puis-je utiliser des modèles de sources de données dans mon subgraph NEAR ? +### Can I use data source templates in my NEAR Subgraph? Ceci n’est actuellement pas pris en charge. Nous évaluons si cette fonctionnalité est requise pour l'indexation. -### Les subgraphs Ethereum supportent les versions "pending" et "current", comment puis-je déployer une version "pending" d'un subgraph NEAR ? +### Ethereum Subgraphs support "pending" and "current" versions, how can I deploy a "pending" version of a NEAR Subgraph? -La fonctionnalité "pending" n'est pas encore prise en charge pour les subgraphs NEAR. Dans l'intervalle, vous pouvez déployer une nouvelle version dans un autre subgraph "named", puis, lorsque celui-ci est synchronisé avec la tête de chaîne, vous pouvez redéployer dans votre subgraph principal "named", qui utilisera le même ID de déploiement sous-jacent, de sorte que le subgraph principal sera instantanément synchronisé. +Pending functionality is not yet supported for NEAR Subgraphs. In the interim, you can deploy a new version to a different "named" Subgraph, and then when that is synced with the chain head, you can redeploy to your primary "named" Subgraph, which will use the same underlying deployment ID, so the main Subgraph will be instantly synced. -### Ma question n'a pas reçu de réponse, où puis-je obtenir plus d'aide concernant la création de subgraphs NEAR ? +### My question hasn't been answered, where can I get more help building NEAR Subgraphs? -If it is a general question about subgraph development, there is a lot more information in the rest of the [Developer documentation](/subgraphs/quick-start/). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. +If it is a general question about Subgraph development, there is a lot more information in the rest of the [Developer documentation](/subgraphs/quick-start/). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. ## Les Références From 63c8dc8c51f08e0010ecafc9f2a3ff0d467aacb5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:19:30 -0500 Subject: [PATCH 0628/1789] New translations near.mdx (Spanish) --- .../src/pages/es/subgraphs/cookbook/near.mdx | 76 +++++++++---------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/website/src/pages/es/subgraphs/cookbook/near.mdx b/website/src/pages/es/subgraphs/cookbook/near.mdx index 67db2b1278cb..eb19dfa2ac90 100644 --- a/website/src/pages/es/subgraphs/cookbook/near.mdx +++ b/website/src/pages/es/subgraphs/cookbook/near.mdx @@ -2,17 +2,17 @@ title: Construcción de subgrafos en NEAR --- -This guide is an introduction to building subgraphs indexing smart contracts on the [NEAR blockchain](https://docs.near.org/). +This guide is an introduction to building Subgraphs indexing smart contracts on the [NEAR blockchain](https://docs.near.org/). ## ¿Qué es NEAR? [NEAR](https://near.org/) is a smart contract platform for building decentralized applications. Visit the [official documentation](https://docs.near.org/concepts/basics/protocol) for more information. -## ¿Qué son los subgrafos NEAR? +## What are NEAR Subgraphs? -The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build subgraphs to index their smart contracts. +The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a Subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build Subgraphs to index their smart contracts. -Subgraphs are event-based, which means that they listen for and then process onchain events. There are currently two types of handlers supported for NEAR subgraphs: +Subgraphs are event-based, which means that they listen for and then process onchain events. There are currently two types of handlers supported for NEAR Subgraphs: - Handlers de bloques: se ejecutan en cada nuevo bloque - Handlers de recibos: se realizan cada vez que se ejecuta un mensaje en una cuenta específica @@ -23,32 +23,32 @@ Subgraphs are event-based, which means that they listen for and then process onc ## Construcción de un subgrafo NEAR -`@graphprotocol/graph-cli` is a command-line tool for building and deploying subgraphs. +`@graphprotocol/graph-cli` is a command-line tool for building and deploying Subgraphs. -`@graphprotocol/graph-ts` is a library of subgraph-specific types. +`@graphprotocol/graph-ts` is a library of Subgraph-specific types. -NEAR subgraph development requires `graph-cli` above version `0.23.0`, and `graph-ts` above version `0.23.0`. +NEAR Subgraph development requires `graph-cli` above version `0.23.0`, and `graph-ts` above version `0.23.0`. -> Construir un subgrafo NEAR es muy similar a construir un subgrafo que indexa Ethereum. +> Building a NEAR Subgraph is very similar to building a Subgraph that indexes Ethereum. -Hay tres aspectos de la definición de subgrafo: +There are three aspects of Subgraph definition: -**subgraph.yaml:** the subgraph manifest, defining the data sources of interest, and how they should be processed. NEAR is a new `kind` of data source. +**subgraph.yaml:** the Subgraph manifest, defining the data sources of interest, and how they should be processed. NEAR is a new `kind` of data source. -**schema.graphql:** a schema file that defines what data is stored for your subgraph, and how to query it via GraphQL. The requirements for NEAR subgraphs are covered by [the existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). +**schema.graphql:** a schema file that defines what data is stored for your Subgraph, and how to query it via GraphQL. The requirements for NEAR Subgraphs are covered by [the existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). **AssemblyScript Mappings:** [AssemblyScript code](/subgraphs/developing/creating/graph-ts/api/) that translates from the event data to the entities defined in your schema. NEAR support introduces NEAR-specific data types and new JSON parsing functionality. -Durante el desarrollo del subgrafo hay dos comandos clave: +During Subgraph development there are two key commands: ```bash -$ graph codegen # genera tipos a partir del archivo de esquema identificado en el manifiesto -$ graph build # genera Web Assembly a partir de los archivos de AssemblyScript y prepara todos los archivos de subgrafo en una carpeta /build +$ graph codegen # generates types from the schema file identified in the manifest +$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the Subgraph files in a /build folder ``` ### Definición de manifiesto del subgrafo -The subgraph manifest (`subgraph.yaml`) identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for a NEAR subgraph: +The Subgraph manifest (`subgraph.yaml`) identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest for a NEAR Subgraph: ```yaml specVersion: 0.0.2 @@ -70,7 +70,7 @@ dataSources: file: ./src/mapping.ts # link to the file with the Assemblyscript mappings ``` -- NEAR subgraphs introduce a new `kind` of data source (`near`) +- NEAR Subgraphs introduce a new `kind` of data source (`near`) - The `network` should correspond to a network on the hosting Graph Node. On Subgraph Studio, NEAR's mainnet is `near-mainnet`, and NEAR's testnet is `near-testnet` - NEAR data sources introduce an optional `source.account` field, which is a human-readable ID corresponding to a [NEAR account](https://docs.near.org/concepts/protocol/account-model). This can be an account or a sub-account. - NEAR data sources introduce an alternative optional `source.accounts` field, which contains optional suffixes and prefixes. At least prefix or suffix must be specified, they will match the any account starting or ending with the list of values respectively. The example below would match: `[app|good].*[morning.near|morning.testnet]`. If only a list of prefixes or suffixes is necessary the other field can be omitted. @@ -92,7 +92,7 @@ Las fuentes de datos NEAR admiten dos tipos de handlers: ### Definición de esquema -Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). +Schema definition describes the structure of the resulting Subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on Subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). ### Asignaciones de AssemblyScript @@ -165,31 +165,31 @@ These types are passed to block & receipt handlers: - Block handlers will receive a `Block` - Receipt handlers will receive a `ReceiptWithOutcome` -Otherwise, the rest of the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/) is available to NEAR subgraph developers during mapping execution. +Otherwise, the rest of the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/) is available to NEAR Subgraph developers during mapping execution. This includes a new JSON parsing function - logs on NEAR are frequently emitted as stringified JSONs. A new `json.fromString(...)` function is available as part of the [JSON API](/subgraphs/developing/creating/graph-ts/api/#json-api) to allow developers to easily process these logs. ## Deployando un subgrafo NEAR -Once you have a built subgraph, it is time to deploy it to Graph Node for indexing. NEAR subgraphs can be deployed to any Graph Node `>=v0.26.x` (this version has not yet been tagged & released). +Once you have a built Subgraph, it is time to deploy it to Graph Node for indexing. NEAR Subgraphs can be deployed to any Graph Node `>=v0.26.x` (this version has not yet been tagged & released). Subgraph Studio and the upgrade Indexer on The Graph Network currently supports indexing NEAR mainnet and testnet in beta, with the following network names: - `near-mainnet` - `near-testnet` -More information on creating and deploying subgraphs on Subgraph Studio can be found [here](/deploying/deploying-a-subgraph-to-studio/). +More information on creating and deploying Subgraphs on Subgraph Studio can be found [here](/deploying/deploying-a-subgraph-to-studio/). -As a quick primer - the first step is to "create" your subgraph - this only needs to be done once. On Subgraph Studio, this can be done from [your Dashboard](https://thegraph.com/studio/): "Create a subgraph". +As a quick primer - the first step is to "create" your Subgraph - this only needs to be done once. On Subgraph Studio, this can be done from [your Dashboard](https://thegraph.com/studio/): "Create a Subgraph". -Once your subgraph has been created, you can deploy your subgraph by using the `graph deploy` CLI command: +Once your Subgraph has been created, you can deploy your Subgraph by using the `graph deploy` CLI command: ```sh -$ graph create --node # creates a subgraph on a local Graph Node (on Subgraph Studio, this is done via the UI) -$ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # uploads the build files to a specified IPFS endpoint, and then deploys the subgraph to a specified Graph Node based on the manifest IPFS hash +$ graph create --node # creates a Subgraph on a local Graph Node (on Subgraph Studio, this is done via the UI) +$ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # uploads the build files to a specified IPFS endpoint, and then deploys the Subgraph to a specified Graph Node based on the manifest IPFS hash ``` -La configuración del nodo dependerá de dónde se implemente el subgrafo. +The node configuration will depend on where the Subgraph is being deployed. ### Subgraph Studio @@ -204,7 +204,7 @@ graph deploy graph deploy --node http://localhost:8020/ --ipfs http://localhost:5001 ``` -Una vez que se haya implementado su subgrafo, Graph Node lo indexará. Puede comprobar su progreso consultando el propio subgrafo: +Once your Subgraph has been deployed, it will be indexed by Graph Node. You can check its progress by querying the Subgraph itself: ```graphql { @@ -228,11 +228,11 @@ Pronto proporcionaremos más información sobre cómo ejecutar los componentes a ## Consultando un subgrafo NEAR -The GraphQL endpoint for NEAR subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. +The GraphQL endpoint for NEAR Subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. ## Subgrafos de ejemplo -Here are some example subgraphs for reference: +Here are some example Subgraphs for reference: [NEAR Blocks](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-blocks) @@ -242,13 +242,13 @@ Here are some example subgraphs for reference: ### ¿Cómo funciona la beta? -El soporte NEAR está en versión beta, lo que significa que puede haber cambios en la API a medida que continuamos trabajando para mejorar la integración. Envíe un correo electrónico a near@thegraph.com para que podamos ayudarlo a crear subgrafos NEAR y mantenerte actualizado sobre los últimos desarrollos! +NEAR support is in beta, which means that there may be changes to the API as we continue to work on improving the integration. Please email near@thegraph.com so that we can support you in building NEAR Subgraphs, and keep you up to date on the latest developments! -### ¿Puede un subgrafo indexar las cadenas NEAR y EVM? +### Can a Subgraph index both NEAR and EVM chains? -No, un subgrafo sólo puede admitir fuentes de datos de una cadena/red. +No, a Subgraph can only support data sources from one chain/network. -### ¿Pueden los subgrafos reaccionar a activadores más específicos? +### Can Subgraphs react to more specific triggers? Actualmente, solo se admiten los activadores de Bloque y Recibo. Estamos investigando activadores para llamadas a funciones a una cuenta específica. También estamos interesados en admitir activadores de eventos, una vez que NEAR tenga soporte nativo para eventos. @@ -262,21 +262,21 @@ accounts: - mintbase1.near ``` -### ¿Pueden los subgrafos NEAR realizar view calls a cuentas NEAR durante las asignaciones? +### Can NEAR Subgraphs make view calls to NEAR accounts during mappings? Esto no es compatible. Estamos evaluando si esta funcionalidad es necesaria para la indexación. -### ¿Puedo usar plantillas de fuente de datos en mi subgrafo NEAR? +### Can I use data source templates in my NEAR Subgraph? Esto no es compatible actualmente. Estamos evaluando si esta funcionalidad es necesaria para la indexación. -### Los subgrafos de Ethereum admiten versiones "pendientes" y "actuales", ¿cómo puedo implementar una versión "pendiente" de un subgrafo NEAR? +### Ethereum Subgraphs support "pending" and "current" versions, how can I deploy a "pending" version of a NEAR Subgraph? -La funcionalidad pendiente aún no es compatible con los subgrafos NEAR. Mientras tanto, puedes implementar una nueva versión en un subgrafo "nombrado" diferente y luego, cuando se sincroniza con el encabezado de la cadena, puedes volver a implementarlo en su subgrafo principal "nombrado", que usará el mismo ID de implementación subyacente, por lo que el subgrafo principal se sincronizará instantáneamente. +Pending functionality is not yet supported for NEAR Subgraphs. In the interim, you can deploy a new version to a different "named" Subgraph, and then when that is synced with the chain head, you can redeploy to your primary "named" Subgraph, which will use the same underlying deployment ID, so the main Subgraph will be instantly synced. -### Mi pregunta no ha sido respondida, ¿dónde puedo obtener más ayuda para crear subgrafos NEAR? +### My question hasn't been answered, where can I get more help building NEAR Subgraphs? -If it is a general question about subgraph development, there is a lot more information in the rest of the [Developer documentation](/subgraphs/quick-start/). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. +If it is a general question about Subgraph development, there is a lot more information in the rest of the [Developer documentation](/subgraphs/quick-start/). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. ## Referencias From 98f70d84079f22b030ca06c86c2a752d0b7db4d3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:19:31 -0500 Subject: [PATCH 0629/1789] New translations near.mdx (Arabic) --- .../src/pages/ar/subgraphs/cookbook/near.mdx | 74 +++++++++---------- 1 file changed, 37 insertions(+), 37 deletions(-) diff --git a/website/src/pages/ar/subgraphs/cookbook/near.mdx b/website/src/pages/ar/subgraphs/cookbook/near.mdx index bdbe8e518a6b..c251994610f9 100644 --- a/website/src/pages/ar/subgraphs/cookbook/near.mdx +++ b/website/src/pages/ar/subgraphs/cookbook/near.mdx @@ -2,17 +2,17 @@ title: بناء Subgraphs على NEAR --- -This guide is an introduction to building subgraphs indexing smart contracts on the [NEAR blockchain](https://docs.near.org/). +This guide is an introduction to building Subgraphs indexing smart contracts on the [NEAR blockchain](https://docs.near.org/). ## ما هو NEAR؟ [NEAR](https://near.org/) is a smart contract platform for building decentralized applications. Visit the [official documentation](https://docs.near.org/concepts/basics/protocol) for more information. -## ماهي NEAR subgraphs؟ +## What are NEAR Subgraphs? -The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build subgraphs to index their smart contracts. +The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a Subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build Subgraphs to index their smart contracts. -Subgraphs are event-based, which means that they listen for and then process onchain events. There are currently two types of handlers supported for NEAR subgraphs: +Subgraphs are event-based, which means that they listen for and then process onchain events. There are currently two types of handlers supported for NEAR Subgraphs: - معالجات الكتل(Block handlers): يتم تشغيلها على كل كتلة جديدة - معالجات الاستلام (Receipt handlers): يتم تشغيلها في كل مرة يتم فيها تنفيذ رسالة على حساب محدد @@ -23,32 +23,32 @@ Subgraphs are event-based, which means that they listen for and then process onc ## بناء NEAR Subgraph -`@graphprotocol/graph-cli` is a command-line tool for building and deploying subgraphs. +`@graphprotocol/graph-cli` is a command-line tool for building and deploying Subgraphs. -`@graphprotocol/graph-ts` is a library of subgraph-specific types. +`@graphprotocol/graph-ts` is a library of Subgraph-specific types. -NEAR subgraph development requires `graph-cli` above version `0.23.0`, and `graph-ts` above version `0.23.0`. +NEAR Subgraph development requires `graph-cli` above version `0.23.0`, and `graph-ts` above version `0.23.0`. -> Building a NEAR subgraph is very similar to building a subgraph that indexes Ethereum. +> Building a NEAR Subgraph is very similar to building a Subgraph that indexes Ethereum. -هناك ثلاثة جوانب لتعريف الـ subgraph: +There are three aspects of Subgraph definition: -**subgraph.yaml:** the subgraph manifest, defining the data sources of interest, and how they should be processed. NEAR is a new `kind` of data source. +**subgraph.yaml:** the Subgraph manifest, defining the data sources of interest, and how they should be processed. NEAR is a new `kind` of data source. -**schema.graphql:** a schema file that defines what data is stored for your subgraph, and how to query it via GraphQL. The requirements for NEAR subgraphs are covered by [the existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). +**schema.graphql:** a schema file that defines what data is stored for your Subgraph, and how to query it via GraphQL. The requirements for NEAR Subgraphs are covered by [the existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). **AssemblyScript Mappings:** [AssemblyScript code](/subgraphs/developing/creating/graph-ts/api/) that translates from the event data to the entities defined in your schema. NEAR support introduces NEAR-specific data types and new JSON parsing functionality. -During subgraph development there are two key commands: +During Subgraph development there are two key commands: ```bash $ graph codegen # generates types from the schema file identified in the manifest -$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the subgraph files in a /build folder +$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the Subgraph files in a /build folder ``` ### تعريف Subgraph Manifest -The subgraph manifest (`subgraph.yaml`) identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for a NEAR subgraph: +The Subgraph manifest (`subgraph.yaml`) identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest for a NEAR Subgraph: ```yaml specVersion: 0.0.2 @@ -70,7 +70,7 @@ dataSources: file: ./src/mapping.ts # link to the file with the Assemblyscript mappings ``` -- NEAR subgraphs introduce a new `kind` of data source (`near`) +- NEAR Subgraphs introduce a new `kind` of data source (`near`) - The `network` should correspond to a network on the hosting Graph Node. On Subgraph Studio, NEAR's mainnet is `near-mainnet`, and NEAR's testnet is `near-testnet` - NEAR data sources introduce an optional `source.account` field, which is a human-readable ID corresponding to a [NEAR account](https://docs.near.org/concepts/protocol/account-model). This can be an account or a sub-account. - NEAR data sources introduce an alternative optional `source.accounts` field, which contains optional suffixes and prefixes. At least prefix or suffix must be specified, they will match the any account starting or ending with the list of values respectively. The example below would match: `[app|good].*[morning.near|morning.testnet]`. If only a list of prefixes or suffixes is necessary the other field can be omitted. @@ -92,7 +92,7 @@ accounts: ### تعريف المخطط -Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). +Schema definition describes the structure of the resulting Subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on Subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). ### AssemblyScript Mappings @@ -165,31 +165,31 @@ These types are passed to block & receipt handlers: - Block handlers will receive a `Block` - Receipt handlers will receive a `ReceiptWithOutcome` -Otherwise, the rest of the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/) is available to NEAR subgraph developers during mapping execution. +Otherwise, the rest of the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/) is available to NEAR Subgraph developers during mapping execution. This includes a new JSON parsing function - logs on NEAR are frequently emitted as stringified JSONs. A new `json.fromString(...)` function is available as part of the [JSON API](/subgraphs/developing/creating/graph-ts/api/#json-api) to allow developers to easily process these logs. ## نشر NEAR Subgraph -Once you have a built subgraph, it is time to deploy it to Graph Node for indexing. NEAR subgraphs can be deployed to any Graph Node `>=v0.26.x` (this version has not yet been tagged & released). +Once you have a built Subgraph, it is time to deploy it to Graph Node for indexing. NEAR Subgraphs can be deployed to any Graph Node `>=v0.26.x` (this version has not yet been tagged & released). Subgraph Studio and the upgrade Indexer on The Graph Network currently supports indexing NEAR mainnet and testnet in beta, with the following network names: - `near-mainnet` - `near-testnet` -More information on creating and deploying subgraphs on Subgraph Studio can be found [here](/deploying/deploying-a-subgraph-to-studio/). +More information on creating and deploying Subgraphs on Subgraph Studio can be found [here](/deploying/deploying-a-subgraph-to-studio/). -As a quick primer - the first step is to "create" your subgraph - this only needs to be done once. On Subgraph Studio, this can be done from [your Dashboard](https://thegraph.com/studio/): "Create a subgraph". +As a quick primer - the first step is to "create" your Subgraph - this only needs to be done once. On Subgraph Studio, this can be done from [your Dashboard](https://thegraph.com/studio/): "Create a Subgraph". -Once your subgraph has been created, you can deploy your subgraph by using the `graph deploy` CLI command: +Once your Subgraph has been created, you can deploy your Subgraph by using the `graph deploy` CLI command: ```sh -$ graph create --node # creates a subgraph on a local Graph Node (on Subgraph Studio, this is done via the UI) -$ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # uploads the build files to a specified IPFS endpoint, and then deploys the subgraph to a specified Graph Node based on the manifest IPFS hash +$ graph create --node # creates a Subgraph on a local Graph Node (on Subgraph Studio, this is done via the UI) +$ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # uploads the build files to a specified IPFS endpoint, and then deploys the Subgraph to a specified Graph Node based on the manifest IPFS hash ``` -The node configuration will depend on where the subgraph is being deployed. +The node configuration will depend on where the Subgraph is being deployed. ### Subgraph Studio @@ -204,7 +204,7 @@ graph deploy graph deploy --node http://localhost:8020/ --ipfs http://localhost:5001 ``` -بمجرد نشر الـ subgraph الخاص بك ، سيتم فهرسته بواسطة Graph Node. يمكنك التحقق من تقدمه عن طريق الاستعلام عن الـ subgraph نفسه: +Once your Subgraph has been deployed, it will be indexed by Graph Node. You can check its progress by querying the Subgraph itself: ```graphql { @@ -228,11 +228,11 @@ graph deploy --node http://localhost:8020/ --ipfs http://localhost:5001 Date: Tue, 25 Feb 2025 17:19:32 -0500 Subject: [PATCH 0630/1789] New translations near.mdx (Czech) --- .../src/pages/cs/subgraphs/cookbook/near.mdx | 76 +++++++++---------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/website/src/pages/cs/subgraphs/cookbook/near.mdx b/website/src/pages/cs/subgraphs/cookbook/near.mdx index dc65c11da629..6b6dd1ff6e78 100644 --- a/website/src/pages/cs/subgraphs/cookbook/near.mdx +++ b/website/src/pages/cs/subgraphs/cookbook/near.mdx @@ -2,17 +2,17 @@ title: Vytváření podgrafů v NEAR --- -This guide is an introduction to building subgraphs indexing smart contracts on the [NEAR blockchain](https://docs.near.org/). +This guide is an introduction to building Subgraphs indexing smart contracts on the [NEAR blockchain](https://docs.near.org/). ## Co je NEAR? [NEAR](https://near.org/) is a smart contract platform for building decentralized applications. Visit the [official documentation](https://docs.near.org/concepts/basics/protocol) for more information. -## Co jsou podgrafy NEAR? +## What are NEAR Subgraphs? -The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build subgraphs to index their smart contracts. +The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a Subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build Subgraphs to index their smart contracts. -Subgraphs are event-based, which means that they listen for and then process onchain events. There are currently two types of handlers supported for NEAR subgraphs: +Subgraphs are event-based, which means that they listen for and then process onchain events. There are currently two types of handlers supported for NEAR Subgraphs: - Obsluhy bloků: jsou spouštěny při každém novém bloku. - Obsluhy příjmu: spouštějí se pokaždé, když je zpráva provedena na zadaném účtu. @@ -23,32 +23,32 @@ Subgraphs are event-based, which means that they listen for and then process onc ## Sestavení podgrafu NEAR -`@graphprotocol/graph-cli` is a command-line tool for building and deploying subgraphs. +`@graphprotocol/graph-cli` is a command-line tool for building and deploying Subgraphs. -`@graphprotocol/graph-ts` is a library of subgraph-specific types. +`@graphprotocol/graph-ts` is a library of Subgraph-specific types. -NEAR subgraph development requires `graph-cli` above version `0.23.0`, and `graph-ts` above version `0.23.0`. +NEAR Subgraph development requires `graph-cli` above version `0.23.0`, and `graph-ts` above version `0.23.0`. -> Vytváření subgrafu NEAR je velmi podobné vytváření subgrafu, který indexuje Ethereum. +> Building a NEAR Subgraph is very similar to building a Subgraph that indexes Ethereum. -Definice podgrafů má tři aspekty: +There are three aspects of Subgraph definition: -**subgraph.yaml:** the subgraph manifest, defining the data sources of interest, and how they should be processed. NEAR is a new `kind` of data source. +**subgraph.yaml:** the Subgraph manifest, defining the data sources of interest, and how they should be processed. NEAR is a new `kind` of data source. -**schema.graphql:** a schema file that defines what data is stored for your subgraph, and how to query it via GraphQL. The requirements for NEAR subgraphs are covered by [the existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). +**schema.graphql:** a schema file that defines what data is stored for your Subgraph, and how to query it via GraphQL. The requirements for NEAR Subgraphs are covered by [the existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). **AssemblyScript Mappings:** [AssemblyScript code](/subgraphs/developing/creating/graph-ts/api/) that translates from the event data to the entities defined in your schema. NEAR support introduces NEAR-specific data types and new JSON parsing functionality. -Při vývoji podgrafů existují dva klíčové příkazy: +During Subgraph development there are two key commands: ```bash -$ graph codegen # generuje typy ze souboru se schématem identifikovaným v manifestu -$ graph build # vygeneruje webové sestavení ze souborů AssemblyScript a připraví všechny dílčí soubory do složky /build +$ graph codegen # generates types from the schema file identified in the manifest +$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the Subgraph files in a /build folder ``` ### Definice podgrafu Manifest -The subgraph manifest (`subgraph.yaml`) identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for a NEAR subgraph: +The Subgraph manifest (`subgraph.yaml`) identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest for a NEAR Subgraph: ```yaml specVersion: 0.0.2 @@ -70,7 +70,7 @@ dataSources: file: ./src/mapping.ts # link to the file with the Assemblyscript mappings ``` -- NEAR subgraphs introduce a new `kind` of data source (`near`) +- NEAR Subgraphs introduce a new `kind` of data source (`near`) - The `network` should correspond to a network on the hosting Graph Node. On Subgraph Studio, NEAR's mainnet is `near-mainnet`, and NEAR's testnet is `near-testnet` - NEAR data sources introduce an optional `source.account` field, which is a human-readable ID corresponding to a [NEAR account](https://docs.near.org/concepts/protocol/account-model). This can be an account or a sub-account. - NEAR data sources introduce an alternative optional `source.accounts` field, which contains optional suffixes and prefixes. At least prefix or suffix must be specified, they will match the any account starting or ending with the list of values respectively. The example below would match: `[app|good].*[morning.near|morning.testnet]`. If only a list of prefixes or suffixes is necessary the other field can be omitted. @@ -92,7 +92,7 @@ Zdroje dat NEAR podporují dva typy zpracovatelů: ### Definice schématu -Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). +Schema definition describes the structure of the resulting Subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on Subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). ### AssemblyScript Mapování @@ -165,31 +165,31 @@ These types are passed to block & receipt handlers: - Block handlers will receive a `Block` - Receipt handlers will receive a `ReceiptWithOutcome` -Otherwise, the rest of the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/) is available to NEAR subgraph developers during mapping execution. +Otherwise, the rest of the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/) is available to NEAR Subgraph developers during mapping execution. This includes a new JSON parsing function - logs on NEAR are frequently emitted as stringified JSONs. A new `json.fromString(...)` function is available as part of the [JSON API](/subgraphs/developing/creating/graph-ts/api/#json-api) to allow developers to easily process these logs. ## Nasazení podgrafu NEAR -Once you have a built subgraph, it is time to deploy it to Graph Node for indexing. NEAR subgraphs can be deployed to any Graph Node `>=v0.26.x` (this version has not yet been tagged & released). +Once you have a built Subgraph, it is time to deploy it to Graph Node for indexing. NEAR Subgraphs can be deployed to any Graph Node `>=v0.26.x` (this version has not yet been tagged & released). Subgraph Studio and the upgrade Indexer on The Graph Network currently supports indexing NEAR mainnet and testnet in beta, with the following network names: - `near-mainnet` - `near-testnet` -More information on creating and deploying subgraphs on Subgraph Studio can be found [here](/deploying/deploying-a-subgraph-to-studio/). +More information on creating and deploying Subgraphs on Subgraph Studio can be found [here](/deploying/deploying-a-subgraph-to-studio/). -As a quick primer - the first step is to "create" your subgraph - this only needs to be done once. On Subgraph Studio, this can be done from [your Dashboard](https://thegraph.com/studio/): "Create a subgraph". +As a quick primer - the first step is to "create" your Subgraph - this only needs to be done once. On Subgraph Studio, this can be done from [your Dashboard](https://thegraph.com/studio/): "Create a Subgraph". -Once your subgraph has been created, you can deploy your subgraph by using the `graph deploy` CLI command: +Once your Subgraph has been created, you can deploy your Subgraph by using the `graph deploy` CLI command: ```sh -$ graph create --node # creates a subgraph on a local Graph Node (on Subgraph Studio, this is done via the UI) -$ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # uploads the build files to a specified IPFS endpoint, and then deploys the subgraph to a specified Graph Node based on the manifest IPFS hash +$ graph create --node # creates a Subgraph on a local Graph Node (on Subgraph Studio, this is done via the UI) +$ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # uploads the build files to a specified IPFS endpoint, and then deploys the Subgraph to a specified Graph Node based on the manifest IPFS hash ``` -Konfigurace uzlů závisí na tom, kde je podgraf nasazen. +The node configuration will depend on where the Subgraph is being deployed. ### Podgraf Studio @@ -204,7 +204,7 @@ graph deploy graph deploy --node http://localhost:8020/ --ipfs http://localhost:5001 ``` -Jakmile je podgraf nasazen, bude indexován pomocí Graph Node. Jeho průběh můžete zkontrolovat dotazem na samotný podgraf: +Once your Subgraph has been deployed, it will be indexed by Graph Node. You can check its progress by querying the Subgraph itself: ```graphql { @@ -228,11 +228,11 @@ Brzy vám poskytneme další informace o provozu výše uvedených komponent. ## Dotazování podgrafu NEAR -The GraphQL endpoint for NEAR subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. +The GraphQL endpoint for NEAR Subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. ## Příklady podgrafů -Zde je několik příkladů podgrafů: +Here are some example Subgraphs for reference: [NEAR Blocks](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-blocks) @@ -242,13 +242,13 @@ Zde je několik příkladů podgrafů: ### Jak funguje beta verze? -Podpora NEAR je ve fázi beta, což znamená, že v API může dojít ke změnám, protože budeme pokračovat ve zdokonalování integrace. Napište nám prosím na adresu near@thegraph.com, abychom vás mohli podpořit při vytváření podgrafů NEAR a informovat vás o nejnovějším vývoji! +NEAR support is in beta, which means that there may be changes to the API as we continue to work on improving the integration. Please email near@thegraph.com so that we can support you in building NEAR Subgraphs, and keep you up to date on the latest developments! -### Může podgraf indexovat řetězce NEAR i EVM? +### Can a Subgraph index both NEAR and EVM chains? -Ne, podgraf může podporovat zdroje dat pouze z jednoho řetězce/sítě. +No, a Subgraph can only support data sources from one chain/network. -### Mohou podgrafy reagovat na specifičtější spouštěče? +### Can Subgraphs react to more specific triggers? V současné době jsou podporovány pouze spouštěče Blok a Příjem. Zkoumáme spouštěče pro volání funkcí na zadaném účtu. Máme také zájem o podporu spouštěčů událostí, jakmile bude mít NEAR nativní podporu událostí. @@ -262,21 +262,21 @@ accounts: - mintbase1.near ``` -### Mohou podgrafy NEAR během mapování volat zobrazení na účty NEAR? +### Can NEAR Subgraphs make view calls to NEAR accounts during mappings? To není podporováno. Vyhodnocujeme, zda je tato funkce pro indexování nutná. -### Mohu v podgrafu NEAR používat šablony zdrojů dat? +### Can I use data source templates in my NEAR Subgraph? Tato funkce není v současné době podporována. Vyhodnocujeme, zda je tato funkce pro indexování nutná. -### Podgrafy Ethereum podporují verze "pending" a "current", jak mohu nasadit verzi "pending" podgrafu NEAR? +### Ethereum Subgraphs support "pending" and "current" versions, how can I deploy a "pending" version of a NEAR Subgraph? -Pro podgrafy NEAR zatím nejsou podporovány čekající funkce. V mezidobí můžete novou verzi nasadit do jiného "pojmenovaného" podgrafu a po jeho synchronizaci s hlavou řetězce ji můžete znovu nasadit do svého hlavního "pojmenovaného" podgrafu, který bude používat stejné ID nasazení, takže hlavní podgraf bude okamžitě synchronizován. +Pending functionality is not yet supported for NEAR Subgraphs. In the interim, you can deploy a new version to a different "named" Subgraph, and then when that is synced with the chain head, you can redeploy to your primary "named" Subgraph, which will use the same underlying deployment ID, so the main Subgraph will be instantly synced. -### Moje otázka nebyla zodpovězena, kde mohu získat další pomoc při vytváření podgrafů NEAR? +### My question hasn't been answered, where can I get more help building NEAR Subgraphs? -If it is a general question about subgraph development, there is a lot more information in the rest of the [Developer documentation](/subgraphs/quick-start/). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. +If it is a general question about Subgraph development, there is a lot more information in the rest of the [Developer documentation](/subgraphs/quick-start/). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. ## Odkazy: From 29c8dbf11cac4450a846371d09fd452977747bf7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:19:33 -0500 Subject: [PATCH 0631/1789] New translations near.mdx (German) --- .../src/pages/de/subgraphs/cookbook/near.mdx | 74 +++++++++---------- 1 file changed, 37 insertions(+), 37 deletions(-) diff --git a/website/src/pages/de/subgraphs/cookbook/near.mdx b/website/src/pages/de/subgraphs/cookbook/near.mdx index d748e4787563..0f9702e647ad 100644 --- a/website/src/pages/de/subgraphs/cookbook/near.mdx +++ b/website/src/pages/de/subgraphs/cookbook/near.mdx @@ -2,17 +2,17 @@ title: Building Subgraphs on NEAR --- -This guide is an introduction to building subgraphs indexing smart contracts on the [NEAR blockchain](https://docs.near.org/). +This guide is an introduction to building Subgraphs indexing smart contracts on the [NEAR blockchain](https://docs.near.org/). ## What is NEAR? [NEAR](https://near.org/) is a smart contract platform for building decentralized applications. Visit the [official documentation](https://docs.near.org/concepts/basics/protocol) for more information. -## What are NEAR subgraphs? +## What are NEAR Subgraphs? -The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build subgraphs to index their smart contracts. +The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a Subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build Subgraphs to index their smart contracts. -Subgraphs are event-based, which means that they listen for and then process onchain events. There are currently two types of handlers supported for NEAR subgraphs: +Subgraphs are event-based, which means that they listen for and then process onchain events. There are currently two types of handlers supported for NEAR Subgraphs: - Block handlers: these are run on every new block - Receipt handlers: run every time a message is executed at a specified account @@ -23,32 +23,32 @@ Subgraphs are event-based, which means that they listen for and then process onc ## Building a NEAR Subgraph -`@graphprotocol/graph-cli` is a command-line tool for building and deploying subgraphs. +`@graphprotocol/graph-cli` is a command-line tool for building and deploying Subgraphs. -`@graphprotocol/graph-ts` is a library of subgraph-specific types. +`@graphprotocol/graph-ts` is a library of Subgraph-specific types. -NEAR subgraph development requires `graph-cli` above version `0.23.0`, and `graph-ts` above version `0.23.0`. +NEAR Subgraph development requires `graph-cli` above version `0.23.0`, and `graph-ts` above version `0.23.0`. -> Building a NEAR subgraph is very similar to building a subgraph that indexes Ethereum. +> Building a NEAR Subgraph is very similar to building a Subgraph that indexes Ethereum. -There are three aspects of subgraph definition: +There are three aspects of Subgraph definition: -**subgraph.yaml:** the subgraph manifest, defining the data sources of interest, and how they should be processed. NEAR is a new `kind` of data source. +**subgraph.yaml:** the Subgraph manifest, defining the data sources of interest, and how they should be processed. NEAR is a new `kind` of data source. -**schema.graphql:** a schema file that defines what data is stored for your subgraph, and how to query it via GraphQL. The requirements for NEAR subgraphs are covered by [the existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). +**schema.graphql:** a schema file that defines what data is stored for your Subgraph, and how to query it via GraphQL. The requirements for NEAR Subgraphs are covered by [the existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). **AssemblyScript Mappings:** [AssemblyScript code](/subgraphs/developing/creating/graph-ts/api/) that translates from the event data to the entities defined in your schema. NEAR support introduces NEAR-specific data types and new JSON parsing functionality. -During subgraph development there are two key commands: +During Subgraph development there are two key commands: ```bash $ graph codegen # generates types from the schema file identified in the manifest -$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the subgraph files in a /build folder +$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the Subgraph files in a /build folder ``` ### Subgraf-Manifest-Definition -The subgraph manifest (`subgraph.yaml`) identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for a NEAR subgraph: +The Subgraph manifest (`subgraph.yaml`) identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest for a NEAR Subgraph: ```yaml specVersion: 0.0.2 @@ -70,7 +70,7 @@ dataSources: file: ./src/mapping.ts # link to the file with the Assemblyscript mappings ``` -- NEAR subgraphs introduce a new `kind` of data source (`near`) +- NEAR Subgraphs introduce a new `kind` of data source (`near`) - The `network` should correspond to a network on the hosting Graph Node. On Subgraph Studio, NEAR's mainnet is `near-mainnet`, and NEAR's testnet is `near-testnet` - NEAR data sources introduce an optional `source.account` field, which is a human-readable ID corresponding to a [NEAR account](https://docs.near.org/concepts/protocol/account-model). This can be an account or a sub-account. - NEAR data sources introduce an alternative optional `source.accounts` field, which contains optional suffixes and prefixes. At least prefix or suffix must be specified, they will match the any account starting or ending with the list of values respectively. The example below would match: `[app|good].*[morning.near|morning.testnet]`. If only a list of prefixes or suffixes is necessary the other field can be omitted. @@ -92,7 +92,7 @@ NEAR data sources support two types of handlers: ### Schema-Definition -Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). +Schema definition describes the structure of the resulting Subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on Subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). ### AssemblyScript-Mappings @@ -165,31 +165,31 @@ These types are passed to block & receipt handlers: - Block handlers will receive a `Block` - Receipt handlers will receive a `ReceiptWithOutcome` -Otherwise, the rest of the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/) is available to NEAR subgraph developers during mapping execution. +Otherwise, the rest of the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/) is available to NEAR Subgraph developers during mapping execution. This includes a new JSON parsing function - logs on NEAR are frequently emitted as stringified JSONs. A new `json.fromString(...)` function is available as part of the [JSON API](/subgraphs/developing/creating/graph-ts/api/#json-api) to allow developers to easily process these logs. ## Deploying a NEAR Subgraph -Once you have a built subgraph, it is time to deploy it to Graph Node for indexing. NEAR subgraphs can be deployed to any Graph Node `>=v0.26.x` (this version has not yet been tagged & released). +Once you have a built Subgraph, it is time to deploy it to Graph Node for indexing. NEAR Subgraphs can be deployed to any Graph Node `>=v0.26.x` (this version has not yet been tagged & released). Subgraph Studio and the upgrade Indexer on The Graph Network currently supports indexing NEAR mainnet and testnet in beta, with the following network names: - `near-mainnet` - `near-testnet` -More information on creating and deploying subgraphs on Subgraph Studio can be found [here](/deploying/deploying-a-subgraph-to-studio/). +More information on creating and deploying Subgraphs on Subgraph Studio can be found [here](/deploying/deploying-a-subgraph-to-studio/). -As a quick primer - the first step is to "create" your subgraph - this only needs to be done once. On Subgraph Studio, this can be done from [your Dashboard](https://thegraph.com/studio/): "Create a subgraph". +As a quick primer - the first step is to "create" your Subgraph - this only needs to be done once. On Subgraph Studio, this can be done from [your Dashboard](https://thegraph.com/studio/): "Create a Subgraph". -Once your subgraph has been created, you can deploy your subgraph by using the `graph deploy` CLI command: +Once your Subgraph has been created, you can deploy your Subgraph by using the `graph deploy` CLI command: ```sh -$ graph create --node # creates a subgraph on a local Graph Node (on Subgraph Studio, this is done via the UI) -$ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # uploads the build files to a specified IPFS endpoint, and then deploys the subgraph to a specified Graph Node based on the manifest IPFS hash +$ graph create --node # creates a Subgraph on a local Graph Node (on Subgraph Studio, this is done via the UI) +$ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # uploads the build files to a specified IPFS endpoint, and then deploys the Subgraph to a specified Graph Node based on the manifest IPFS hash ``` -The node configuration will depend on where the subgraph is being deployed. +The node configuration will depend on where the Subgraph is being deployed. ### Subgraph Studio @@ -204,7 +204,7 @@ graph deploy graph deploy --node http://localhost:8020/ --ipfs http://localhost:5001 ``` -Once your subgraph has been deployed, it will be indexed by Graph Node. You can check its progress by querying the subgraph itself: +Once your Subgraph has been deployed, it will be indexed by Graph Node. You can check its progress by querying the Subgraph itself: ```graphql { @@ -228,11 +228,11 @@ We will provide more information on running the above components soon. ## Querying a NEAR Subgraph -The GraphQL endpoint for NEAR subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. +The GraphQL endpoint for NEAR Subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. ## Beispiele von Subgrafen -Here are some example subgraphs for reference: +Here are some example Subgraphs for reference: [NEAR Blocks](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-blocks) @@ -242,13 +242,13 @@ Here are some example subgraphs for reference: ### How does the beta work? -NEAR support is in beta, which means that there may be changes to the API as we continue to work on improving the integration. Please email near@thegraph.com so that we can support you in building NEAR subgraphs, and keep you up to date on the latest developments! +NEAR support is in beta, which means that there may be changes to the API as we continue to work on improving the integration. Please email near@thegraph.com so that we can support you in building NEAR Subgraphs, and keep you up to date on the latest developments! -### Can a subgraph index both NEAR and EVM chains? +### Can a Subgraph index both NEAR and EVM chains? -No, a subgraph can only support data sources from one chain/network. +No, a Subgraph can only support data sources from one chain/network. -### Can subgraphs react to more specific triggers? +### Can Subgraphs react to more specific triggers? Currently, only Block and Receipt triggers are supported. We are investigating triggers for function calls to a specified account. We are also interested in supporting event triggers, once NEAR has native event support. @@ -262,21 +262,21 @@ accounts: - mintbase1.near ``` -### Can NEAR subgraphs make view calls to NEAR accounts during mappings? +### Can NEAR Subgraphs make view calls to NEAR accounts during mappings? This is not supported. We are evaluating whether this functionality is required for indexing. -### Can I use data source templates in my NEAR subgraph? +### Can I use data source templates in my NEAR Subgraph? This is not currently supported. We are evaluating whether this functionality is required for indexing. -### Ethereum subgraphs support "pending" and "current" versions, how can I deploy a "pending" version of a NEAR subgraph? +### Ethereum Subgraphs support "pending" and "current" versions, how can I deploy a "pending" version of a NEAR Subgraph? -Pending functionality is not yet supported for NEAR subgraphs. In the interim, you can deploy a new version to a different "named" subgraph, and then when that is synced with the chain head, you can redeploy to your primary "named" subgraph, which will use the same underlying deployment ID, so the main subgraph will be instantly synced. +Pending functionality is not yet supported for NEAR Subgraphs. In the interim, you can deploy a new version to a different "named" Subgraph, and then when that is synced with the chain head, you can redeploy to your primary "named" Subgraph, which will use the same underlying deployment ID, so the main Subgraph will be instantly synced. -### My question hasn't been answered, where can I get more help building NEAR subgraphs? +### My question hasn't been answered, where can I get more help building NEAR Subgraphs? -If it is a general question about subgraph development, there is a lot more information in the rest of the [Developer documentation](/subgraphs/quick-start/). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. +If it is a general question about Subgraph development, there is a lot more information in the rest of the [Developer documentation](/subgraphs/quick-start/). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. ## References From b20566171aa029d8830b304f936b3f1603bc824a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:19:34 -0500 Subject: [PATCH 0632/1789] New translations near.mdx (Italian) --- .../src/pages/it/subgraphs/cookbook/near.mdx | 74 +++++++++---------- 1 file changed, 37 insertions(+), 37 deletions(-) diff --git a/website/src/pages/it/subgraphs/cookbook/near.mdx b/website/src/pages/it/subgraphs/cookbook/near.mdx index 809574aa81cd..020da4e9296c 100644 --- a/website/src/pages/it/subgraphs/cookbook/near.mdx +++ b/website/src/pages/it/subgraphs/cookbook/near.mdx @@ -2,17 +2,17 @@ title: Building Subgraphs on NEAR --- -This guide is an introduction to building subgraphs indexing smart contracts on the [NEAR blockchain](https://docs.near.org/). +This guide is an introduction to building Subgraphs indexing smart contracts on the [NEAR blockchain](https://docs.near.org/). ## What is NEAR? [NEAR](https://near.org/) is a smart contract platform for building decentralized applications. Visit the [official documentation](https://docs.near.org/concepts/basics/protocol) for more information. -## What are NEAR subgraphs? +## What are NEAR Subgraphs? -The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build subgraphs to index their smart contracts. +The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a Subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build Subgraphs to index their smart contracts. -Subgraphs are event-based, which means that they listen for and then process onchain events. There are currently two types of handlers supported for NEAR subgraphs: +Subgraphs are event-based, which means that they listen for and then process onchain events. There are currently two types of handlers supported for NEAR Subgraphs: - Block handlers: these are run on every new block - Receipt handlers: run every time a message is executed at a specified account @@ -23,32 +23,32 @@ Subgraphs are event-based, which means that they listen for and then process onc ## Building a NEAR Subgraph -`@graphprotocol/graph-cli` is a command-line tool for building and deploying subgraphs. +`@graphprotocol/graph-cli` is a command-line tool for building and deploying Subgraphs. -`@graphprotocol/graph-ts` is a library of subgraph-specific types. +`@graphprotocol/graph-ts` is a library of Subgraph-specific types. -NEAR subgraph development requires `graph-cli` above version `0.23.0`, and `graph-ts` above version `0.23.0`. +NEAR Subgraph development requires `graph-cli` above version `0.23.0`, and `graph-ts` above version `0.23.0`. -> Building a NEAR subgraph is very similar to building a subgraph that indexes Ethereum. +> Building a NEAR Subgraph is very similar to building a Subgraph that indexes Ethereum. -There are three aspects of subgraph definition: +There are three aspects of Subgraph definition: -**subgraph.yaml:** the subgraph manifest, defining the data sources of interest, and how they should be processed. NEAR is a new `kind` of data source. +**subgraph.yaml:** the Subgraph manifest, defining the data sources of interest, and how they should be processed. NEAR is a new `kind` of data source. -**schema.graphql:** a schema file that defines what data is stored for your subgraph, and how to query it via GraphQL. The requirements for NEAR subgraphs are covered by [the existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). +**schema.graphql:** a schema file that defines what data is stored for your Subgraph, and how to query it via GraphQL. The requirements for NEAR Subgraphs are covered by [the existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). **AssemblyScript Mappings:** [AssemblyScript code](/subgraphs/developing/creating/graph-ts/api/) that translates from the event data to the entities defined in your schema. NEAR support introduces NEAR-specific data types and new JSON parsing functionality. -During subgraph development there are two key commands: +During Subgraph development there are two key commands: ```bash $ graph codegen # generates types from the schema file identified in the manifest -$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the subgraph files in a /build folder +$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the Subgraph files in a /build folder ``` ### Subgraph Manifest Definition -The subgraph manifest (`subgraph.yaml`) identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for a NEAR subgraph: +The Subgraph manifest (`subgraph.yaml`) identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest for a NEAR Subgraph: ```yaml specVersion: 0.0.2 @@ -70,7 +70,7 @@ dataSources: file: ./src/mapping.ts # link to the file with the Assemblyscript mappings ``` -- NEAR subgraphs introduce a new `kind` of data source (`near`) +- NEAR Subgraphs introduce a new `kind` of data source (`near`) - The `network` should correspond to a network on the hosting Graph Node. On Subgraph Studio, NEAR's mainnet is `near-mainnet`, and NEAR's testnet is `near-testnet` - NEAR data sources introduce an optional `source.account` field, which is a human-readable ID corresponding to a [NEAR account](https://docs.near.org/concepts/protocol/account-model). This can be an account or a sub-account. - NEAR data sources introduce an alternative optional `source.accounts` field, which contains optional suffixes and prefixes. At least prefix or suffix must be specified, they will match the any account starting or ending with the list of values respectively. The example below would match: `[app|good].*[morning.near|morning.testnet]`. If only a list of prefixes or suffixes is necessary the other field can be omitted. @@ -92,7 +92,7 @@ NEAR data sources support two types of handlers: ### Schema Definition -Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). +Schema definition describes the structure of the resulting Subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on Subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). ### AssemblyScript Mappings @@ -165,31 +165,31 @@ These types are passed to block & receipt handlers: - Block handlers will receive a `Block` - Receipt handlers will receive a `ReceiptWithOutcome` -Otherwise, the rest of the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/) is available to NEAR subgraph developers during mapping execution. +Otherwise, the rest of the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/) is available to NEAR Subgraph developers during mapping execution. This includes a new JSON parsing function - logs on NEAR are frequently emitted as stringified JSONs. A new `json.fromString(...)` function is available as part of the [JSON API](/subgraphs/developing/creating/graph-ts/api/#json-api) to allow developers to easily process these logs. ## Deploying a NEAR Subgraph -Once you have a built subgraph, it is time to deploy it to Graph Node for indexing. NEAR subgraphs can be deployed to any Graph Node `>=v0.26.x` (this version has not yet been tagged & released). +Once you have a built Subgraph, it is time to deploy it to Graph Node for indexing. NEAR Subgraphs can be deployed to any Graph Node `>=v0.26.x` (this version has not yet been tagged & released). Subgraph Studio and the upgrade Indexer on The Graph Network currently supports indexing NEAR mainnet and testnet in beta, with the following network names: - `near-mainnet` - `near-testnet` -More information on creating and deploying subgraphs on Subgraph Studio can be found [here](/deploying/deploying-a-subgraph-to-studio/). +More information on creating and deploying Subgraphs on Subgraph Studio can be found [here](/deploying/deploying-a-subgraph-to-studio/). -As a quick primer - the first step is to "create" your subgraph - this only needs to be done once. On Subgraph Studio, this can be done from [your Dashboard](https://thegraph.com/studio/): "Create a subgraph". +As a quick primer - the first step is to "create" your Subgraph - this only needs to be done once. On Subgraph Studio, this can be done from [your Dashboard](https://thegraph.com/studio/): "Create a Subgraph". -Once your subgraph has been created, you can deploy your subgraph by using the `graph deploy` CLI command: +Once your Subgraph has been created, you can deploy your Subgraph by using the `graph deploy` CLI command: ```sh -$ graph create --node # creates a subgraph on a local Graph Node (on Subgraph Studio, this is done via the UI) -$ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # uploads the build files to a specified IPFS endpoint, and then deploys the subgraph to a specified Graph Node based on the manifest IPFS hash +$ graph create --node # creates a Subgraph on a local Graph Node (on Subgraph Studio, this is done via the UI) +$ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # uploads the build files to a specified IPFS endpoint, and then deploys the Subgraph to a specified Graph Node based on the manifest IPFS hash ``` -The node configuration will depend on where the subgraph is being deployed. +The node configuration will depend on where the Subgraph is being deployed. ### Subgraph Studio @@ -204,7 +204,7 @@ graph deploy graph deploy --node http://localhost:8020/ --ipfs http://localhost:5001 ``` -Once your subgraph has been deployed, it will be indexed by Graph Node. You can check its progress by querying the subgraph itself: +Once your Subgraph has been deployed, it will be indexed by Graph Node. You can check its progress by querying the Subgraph itself: ```graphql { @@ -228,11 +228,11 @@ We will provide more information on running the above components soon. ## Querying a NEAR Subgraph -The GraphQL endpoint for NEAR subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. +The GraphQL endpoint for NEAR Subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. ## Example Subgraphs -Here are some example subgraphs for reference: +Here are some example Subgraphs for reference: [NEAR Blocks](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-blocks) @@ -242,13 +242,13 @@ Here are some example subgraphs for reference: ### How does the beta work? -NEAR support is in beta, which means that there may be changes to the API as we continue to work on improving the integration. Please email near@thegraph.com so that we can support you in building NEAR subgraphs, and keep you up to date on the latest developments! +NEAR support is in beta, which means that there may be changes to the API as we continue to work on improving the integration. Please email near@thegraph.com so that we can support you in building NEAR Subgraphs, and keep you up to date on the latest developments! -### Can a subgraph index both NEAR and EVM chains? +### Can a Subgraph index both NEAR and EVM chains? -No, a subgraph can only support data sources from one chain/network. +No, a Subgraph can only support data sources from one chain/network. -### Can subgraphs react to more specific triggers? +### Can Subgraphs react to more specific triggers? Currently, only Block and Receipt triggers are supported. We are investigating triggers for function calls to a specified account. We are also interested in supporting event triggers, once NEAR has native event support. @@ -262,21 +262,21 @@ accounts: - mintbase1.near ``` -### Can NEAR subgraphs make view calls to NEAR accounts during mappings? +### Can NEAR Subgraphs make view calls to NEAR accounts during mappings? This is not supported. We are evaluating whether this functionality is required for indexing. -### Can I use data source templates in my NEAR subgraph? +### Can I use data source templates in my NEAR Subgraph? This is not currently supported. We are evaluating whether this functionality is required for indexing. -### Ethereum subgraphs support "pending" and "current" versions, how can I deploy a "pending" version of a NEAR subgraph? +### Ethereum Subgraphs support "pending" and "current" versions, how can I deploy a "pending" version of a NEAR Subgraph? -Pending functionality is not yet supported for NEAR subgraphs. In the interim, you can deploy a new version to a different "named" subgraph, and then when that is synced with the chain head, you can redeploy to your primary "named" subgraph, which will use the same underlying deployment ID, so the main subgraph will be instantly synced. +Pending functionality is not yet supported for NEAR Subgraphs. In the interim, you can deploy a new version to a different "named" Subgraph, and then when that is synced with the chain head, you can redeploy to your primary "named" Subgraph, which will use the same underlying deployment ID, so the main Subgraph will be instantly synced. -### My question hasn't been answered, where can I get more help building NEAR subgraphs? +### My question hasn't been answered, where can I get more help building NEAR Subgraphs? -If it is a general question about subgraph development, there is a lot more information in the rest of the [Developer documentation](/subgraphs/quick-start/). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. +If it is a general question about Subgraph development, there is a lot more information in the rest of the [Developer documentation](/subgraphs/quick-start/). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. ## Riferimenti From 9344d154a64e131a17f2a2ead4dca921885cd3be Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:19:35 -0500 Subject: [PATCH 0633/1789] New translations near.mdx (Japanese) --- .../src/pages/ja/subgraphs/cookbook/near.mdx | 76 +++++++++---------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/website/src/pages/ja/subgraphs/cookbook/near.mdx b/website/src/pages/ja/subgraphs/cookbook/near.mdx index 6f4069566be2..fb416fe03402 100644 --- a/website/src/pages/ja/subgraphs/cookbook/near.mdx +++ b/website/src/pages/ja/subgraphs/cookbook/near.mdx @@ -2,17 +2,17 @@ title: NEAR でサブグラフを作成する --- -This guide is an introduction to building subgraphs indexing smart contracts on the [NEAR blockchain](https://docs.near.org/). +This guide is an introduction to building Subgraphs indexing smart contracts on the [NEAR blockchain](https://docs.near.org/). ## NEAR とは? [NEAR](https://near.org/) is a smart contract platform for building decentralized applications. Visit the [official documentation](https://docs.near.org/concepts/basics/protocol) for more information. -## NEAR サブグラフとは? +## What are NEAR Subgraphs? -The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build subgraphs to index their smart contracts. +The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a Subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build Subgraphs to index their smart contracts. -Subgraphs are event-based, which means that they listen for and then process onchain events. There are currently two types of handlers supported for NEAR subgraphs: +Subgraphs are event-based, which means that they listen for and then process onchain events. There are currently two types of handlers supported for NEAR Subgraphs: - ブロックハンドラ:新しいブロックごとに実行されます - レシートハンドラ:指定されたアカウントでメッセージが実行されるたびに実行されます @@ -23,32 +23,32 @@ Subgraphs are event-based, which means that they listen for and then process onc ## NEAR サブグラフの構築 -`@graphprotocol/graph-cli` is a command-line tool for building and deploying subgraphs. +`@graphprotocol/graph-cli` is a command-line tool for building and deploying Subgraphs. -`@graphprotocol/graph-ts` is a library of subgraph-specific types. +`@graphprotocol/graph-ts` is a library of Subgraph-specific types. -NEAR subgraph development requires `graph-cli` above version `0.23.0`, and `graph-ts` above version `0.23.0`. +NEAR Subgraph development requires `graph-cli` above version `0.23.0`, and `graph-ts` above version `0.23.0`. -> NEAR サブグラフの構築は、Ethereum のインデックスを作成するサブグラフの構築と非常によく似ています。 +> Building a NEAR Subgraph is very similar to building a Subgraph that indexes Ethereum. -サブグラフの定義には 3 つの側面があります: +There are three aspects of Subgraph definition: -**subgraph.yaml:** the subgraph manifest, defining the data sources of interest, and how they should be processed. NEAR is a new `kind` of data source. +**subgraph.yaml:** the Subgraph manifest, defining the data sources of interest, and how they should be processed. NEAR is a new `kind` of data source. -**schema.graphql:** a schema file that defines what data is stored for your subgraph, and how to query it via GraphQL. The requirements for NEAR subgraphs are covered by [the existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). +**schema.graphql:** a schema file that defines what data is stored for your Subgraph, and how to query it via GraphQL. The requirements for NEAR Subgraphs are covered by [the existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). **AssemblyScript Mappings:** [AssemblyScript code](/subgraphs/developing/creating/graph-ts/api/) that translates from the event data to the entities defined in your schema. NEAR support introduces NEAR-specific data types and new JSON parsing functionality. -サブグラフの開発には 2 つの重要なコマンドがあります: +During Subgraph development there are two key commands: ```bash -$ graph codegen # マニフェストで識別されたようにファイルから型を生成します -$ グラフ ビルド # AssemblyScript ファイルから Web アセンブリを生成し、/build フォルダにすべてのサブグラフ ファイルを準備します +$ graph codegen # generates types from the schema file identified in the manifest +$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the Subgraph files in a /build folder ``` ### サブグラフマニフェストの定義 -The subgraph manifest (`subgraph.yaml`) identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for a NEAR subgraph: +The Subgraph manifest (`subgraph.yaml`) identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest for a NEAR Subgraph: ```yaml specVersion: 0.0.2 @@ -70,7 +70,7 @@ dataSources: file: ./src/mapping.ts # link to the file with the Assemblyscript mappings ``` -- NEAR subgraphs introduce a new `kind` of data source (`near`) +- NEAR Subgraphs introduce a new `kind` of data source (`near`) - The `network` should correspond to a network on the hosting Graph Node. On Subgraph Studio, NEAR's mainnet is `near-mainnet`, and NEAR's testnet is `near-testnet` - NEAR data sources introduce an optional `source.account` field, which is a human-readable ID corresponding to a [NEAR account](https://docs.near.org/concepts/protocol/account-model). This can be an account or a sub-account. - NEAR data sources introduce an alternative optional `source.accounts` field, which contains optional suffixes and prefixes. At least prefix or suffix must be specified, they will match the any account starting or ending with the list of values respectively. The example below would match: `[app|good].*[morning.near|morning.testnet]`. If only a list of prefixes or suffixes is necessary the other field can be omitted. @@ -92,7 +92,7 @@ NEAR データソースは 2 種類のハンドラーをサポートしていま ### スキーマ定義 -Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). +Schema definition describes the structure of the resulting Subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on Subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). ### AssemblyScript マッピング @@ -165,31 +165,31 @@ These types are passed to block & receipt handlers: - Block handlers will receive a `Block` - Receipt handlers will receive a `ReceiptWithOutcome` -Otherwise, the rest of the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/) is available to NEAR subgraph developers during mapping execution. +Otherwise, the rest of the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/) is available to NEAR Subgraph developers during mapping execution. This includes a new JSON parsing function - logs on NEAR are frequently emitted as stringified JSONs. A new `json.fromString(...)` function is available as part of the [JSON API](/subgraphs/developing/creating/graph-ts/api/#json-api) to allow developers to easily process these logs. ## NEAR サブグラフの展開 -Once you have a built subgraph, it is time to deploy it to Graph Node for indexing. NEAR subgraphs can be deployed to any Graph Node `>=v0.26.x` (this version has not yet been tagged & released). +Once you have a built Subgraph, it is time to deploy it to Graph Node for indexing. NEAR Subgraphs can be deployed to any Graph Node `>=v0.26.x` (this version has not yet been tagged & released). Subgraph Studio and the upgrade Indexer on The Graph Network currently supports indexing NEAR mainnet and testnet in beta, with the following network names: - `near-mainnet` - `near-testnet` -More information on creating and deploying subgraphs on Subgraph Studio can be found [here](/deploying/deploying-a-subgraph-to-studio/). +More information on creating and deploying Subgraphs on Subgraph Studio can be found [here](/deploying/deploying-a-subgraph-to-studio/). -As a quick primer - the first step is to "create" your subgraph - this only needs to be done once. On Subgraph Studio, this can be done from [your Dashboard](https://thegraph.com/studio/): "Create a subgraph". +As a quick primer - the first step is to "create" your Subgraph - this only needs to be done once. On Subgraph Studio, this can be done from [your Dashboard](https://thegraph.com/studio/): "Create a Subgraph". -Once your subgraph has been created, you can deploy your subgraph by using the `graph deploy` CLI command: +Once your Subgraph has been created, you can deploy your Subgraph by using the `graph deploy` CLI command: ```sh -$ graph create --node # creates a subgraph on a local Graph Node (on Subgraph Studio, this is done via the UI) -$ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # uploads the build files to a specified IPFS endpoint, and then deploys the subgraph to a specified Graph Node based on the manifest IPFS hash +$ graph create --node # creates a Subgraph on a local Graph Node (on Subgraph Studio, this is done via the UI) +$ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # uploads the build files to a specified IPFS endpoint, and then deploys the Subgraph to a specified Graph Node based on the manifest IPFS hash ``` -ノードの構成は、サブグラフがどこにディプロイされるかによって異なります。 +The node configuration will depend on where the Subgraph is being deployed. ### Subgraph Studio @@ -204,7 +204,7 @@ graph deploy graph deploy --node http://localhost:8020/ --ipfs http://localhost:5001 ``` -デプロイされたサブグラフは、Graph Node によってインデックス化され、その進捗状況は、サブグラフ自体にクエリして確認できます: +Once your Subgraph has been deployed, it will be indexed by Graph Node. You can check its progress by querying the Subgraph itself: ```graphql { @@ -228,11 +228,11 @@ NEAR のインデックスを作成するグラフノードの運用には、以 ## NEAR サブグラフへのクエリ -The GraphQL endpoint for NEAR subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. +The GraphQL endpoint for NEAR Subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. ## サブグラフの例 -Here are some example subgraphs for reference: +Here are some example Subgraphs for reference: [NEAR Blocks](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-blocks) @@ -242,13 +242,13 @@ Here are some example subgraphs for reference: ### ベータ版はどのように機能しますか? -NEAR サポートはベータ版です。統合の改善を続ける中で、API に変更が加えられる可能性があります。NEAR サブグラフの構築をサポートし、最新の開発状況をお知らせしますので、near@thegraph.comまでメールをお送りください。 +NEAR support is in beta, which means that there may be changes to the API as we continue to work on improving the integration. Please email near@thegraph.com so that we can support you in building NEAR Subgraphs, and keep you up to date on the latest developments! -### サブグラフは NEAR チェーンと EVM チェーンの両方にインデックスを付けることができますか? +### Can a Subgraph index both NEAR and EVM chains? -いいえ、サブグラフは 1 つのチェーン/ネットワークのデータソースのみをサポートします。 +No, a Subgraph can only support data sources from one chain/network. -### サブグラフはより具体的なトリガーに反応できますか? +### Can Subgraphs react to more specific triggers? 現在、ブロックとレシートのトリガーのみがサポートされています。指定されたアカウントへのファンクションコールのトリガーを検討しています。また、NEAR がネイティブイベントをサポートするようになれば、イベントトリガーのサポートも検討しています。 @@ -262,21 +262,21 @@ accounts: - mintbase1.near ``` -### NEAR サブグラフは、マッピング中に NEAR アカウントへのビュー呼び出しを行うことができますか? +### Can NEAR Subgraphs make view calls to NEAR accounts during mappings? これはサポートされていません。この機能がインデックス作成に必要かどうかを評価しています。 -### NEAR サブグラフでデータ ソース テンプレートを使用できますか? +### Can I use data source templates in my NEAR Subgraph? これは現在サポートされていません。この機能がインデックス作成に必要かどうかを評価しています。 -### Ethereum サブグラフは「保留中」バージョンと「現在」バージョンをサポートしていますが、NEAR サブグラフの「保留中」バージョンをデプロイするにはどうすればよいですか? +### Ethereum Subgraphs support "pending" and "current" versions, how can I deploy a "pending" version of a NEAR Subgraph? -「pending」は、NEAR サブグラフではまだサポートされていません。暫定的に、新しいバージョンを別の「named」サブグラフにデプロイし、それがチェーンヘッドと同期したときに、メインの「named」サブグラフに再デプロイすることができます。 +Pending functionality is not yet supported for NEAR Subgraphs. In the interim, you can deploy a new version to a different "named" Subgraph, and then when that is synced with the chain head, you can redeploy to your primary "named" Subgraph, which will use the same underlying deployment ID, so the main Subgraph will be instantly synced. -### 私の質問に対する回答がありません。NEAR サブグラフの作成に関するヘルプはどこで入手できますか? +### My question hasn't been answered, where can I get more help building NEAR Subgraphs? -If it is a general question about subgraph development, there is a lot more information in the rest of the [Developer documentation](/subgraphs/quick-start/). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. +If it is a general question about Subgraph development, there is a lot more information in the rest of the [Developer documentation](/subgraphs/quick-start/). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. ## 参考文献 From ee208fbbeaafda519dccca41a06fb4fb7e1f0f94 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:19:36 -0500 Subject: [PATCH 0634/1789] New translations near.mdx (Korean) --- .../src/pages/ko/subgraphs/cookbook/near.mdx | 74 +++++++++---------- 1 file changed, 37 insertions(+), 37 deletions(-) diff --git a/website/src/pages/ko/subgraphs/cookbook/near.mdx b/website/src/pages/ko/subgraphs/cookbook/near.mdx index 6060eb27e761..698a0ac3486c 100644 --- a/website/src/pages/ko/subgraphs/cookbook/near.mdx +++ b/website/src/pages/ko/subgraphs/cookbook/near.mdx @@ -2,17 +2,17 @@ title: Building Subgraphs on NEAR --- -This guide is an introduction to building subgraphs indexing smart contracts on the [NEAR blockchain](https://docs.near.org/). +This guide is an introduction to building Subgraphs indexing smart contracts on the [NEAR blockchain](https://docs.near.org/). ## What is NEAR? [NEAR](https://near.org/) is a smart contract platform for building decentralized applications. Visit the [official documentation](https://docs.near.org/concepts/basics/protocol) for more information. -## What are NEAR subgraphs? +## What are NEAR Subgraphs? -The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build subgraphs to index their smart contracts. +The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a Subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build Subgraphs to index their smart contracts. -Subgraphs are event-based, which means that they listen for and then process onchain events. There are currently two types of handlers supported for NEAR subgraphs: +Subgraphs are event-based, which means that they listen for and then process onchain events. There are currently two types of handlers supported for NEAR Subgraphs: - Block handlers: these are run on every new block - Receipt handlers: run every time a message is executed at a specified account @@ -23,32 +23,32 @@ Subgraphs are event-based, which means that they listen for and then process onc ## Building a NEAR Subgraph -`@graphprotocol/graph-cli` is a command-line tool for building and deploying subgraphs. +`@graphprotocol/graph-cli` is a command-line tool for building and deploying Subgraphs. -`@graphprotocol/graph-ts` is a library of subgraph-specific types. +`@graphprotocol/graph-ts` is a library of Subgraph-specific types. -NEAR subgraph development requires `graph-cli` above version `0.23.0`, and `graph-ts` above version `0.23.0`. +NEAR Subgraph development requires `graph-cli` above version `0.23.0`, and `graph-ts` above version `0.23.0`. -> Building a NEAR subgraph is very similar to building a subgraph that indexes Ethereum. +> Building a NEAR Subgraph is very similar to building a Subgraph that indexes Ethereum. -There are three aspects of subgraph definition: +There are three aspects of Subgraph definition: -**subgraph.yaml:** the subgraph manifest, defining the data sources of interest, and how they should be processed. NEAR is a new `kind` of data source. +**subgraph.yaml:** the Subgraph manifest, defining the data sources of interest, and how they should be processed. NEAR is a new `kind` of data source. -**schema.graphql:** a schema file that defines what data is stored for your subgraph, and how to query it via GraphQL. The requirements for NEAR subgraphs are covered by [the existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). +**schema.graphql:** a schema file that defines what data is stored for your Subgraph, and how to query it via GraphQL. The requirements for NEAR Subgraphs are covered by [the existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). **AssemblyScript Mappings:** [AssemblyScript code](/subgraphs/developing/creating/graph-ts/api/) that translates from the event data to the entities defined in your schema. NEAR support introduces NEAR-specific data types and new JSON parsing functionality. -During subgraph development there are two key commands: +During Subgraph development there are two key commands: ```bash $ graph codegen # generates types from the schema file identified in the manifest -$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the subgraph files in a /build folder +$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the Subgraph files in a /build folder ``` ### Subgraph Manifest Definition -The subgraph manifest (`subgraph.yaml`) identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for a NEAR subgraph: +The Subgraph manifest (`subgraph.yaml`) identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest for a NEAR Subgraph: ```yaml specVersion: 0.0.2 @@ -70,7 +70,7 @@ dataSources: file: ./src/mapping.ts # link to the file with the Assemblyscript mappings ``` -- NEAR subgraphs introduce a new `kind` of data source (`near`) +- NEAR Subgraphs introduce a new `kind` of data source (`near`) - The `network` should correspond to a network on the hosting Graph Node. On Subgraph Studio, NEAR's mainnet is `near-mainnet`, and NEAR's testnet is `near-testnet` - NEAR data sources introduce an optional `source.account` field, which is a human-readable ID corresponding to a [NEAR account](https://docs.near.org/concepts/protocol/account-model). This can be an account or a sub-account. - NEAR data sources introduce an alternative optional `source.accounts` field, which contains optional suffixes and prefixes. At least prefix or suffix must be specified, they will match the any account starting or ending with the list of values respectively. The example below would match: `[app|good].*[morning.near|morning.testnet]`. If only a list of prefixes or suffixes is necessary the other field can be omitted. @@ -92,7 +92,7 @@ NEAR data sources support two types of handlers: ### Schema Definition -Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). +Schema definition describes the structure of the resulting Subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on Subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). ### AssemblyScript Mappings @@ -165,31 +165,31 @@ These types are passed to block & receipt handlers: - Block handlers will receive a `Block` - Receipt handlers will receive a `ReceiptWithOutcome` -Otherwise, the rest of the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/) is available to NEAR subgraph developers during mapping execution. +Otherwise, the rest of the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/) is available to NEAR Subgraph developers during mapping execution. This includes a new JSON parsing function - logs on NEAR are frequently emitted as stringified JSONs. A new `json.fromString(...)` function is available as part of the [JSON API](/subgraphs/developing/creating/graph-ts/api/#json-api) to allow developers to easily process these logs. ## Deploying a NEAR Subgraph -Once you have a built subgraph, it is time to deploy it to Graph Node for indexing. NEAR subgraphs can be deployed to any Graph Node `>=v0.26.x` (this version has not yet been tagged & released). +Once you have a built Subgraph, it is time to deploy it to Graph Node for indexing. NEAR Subgraphs can be deployed to any Graph Node `>=v0.26.x` (this version has not yet been tagged & released). Subgraph Studio and the upgrade Indexer on The Graph Network currently supports indexing NEAR mainnet and testnet in beta, with the following network names: - `near-mainnet` - `near-testnet` -More information on creating and deploying subgraphs on Subgraph Studio can be found [here](/deploying/deploying-a-subgraph-to-studio/). +More information on creating and deploying Subgraphs on Subgraph Studio can be found [here](/deploying/deploying-a-subgraph-to-studio/). -As a quick primer - the first step is to "create" your subgraph - this only needs to be done once. On Subgraph Studio, this can be done from [your Dashboard](https://thegraph.com/studio/): "Create a subgraph". +As a quick primer - the first step is to "create" your Subgraph - this only needs to be done once. On Subgraph Studio, this can be done from [your Dashboard](https://thegraph.com/studio/): "Create a Subgraph". -Once your subgraph has been created, you can deploy your subgraph by using the `graph deploy` CLI command: +Once your Subgraph has been created, you can deploy your Subgraph by using the `graph deploy` CLI command: ```sh -$ graph create --node # creates a subgraph on a local Graph Node (on Subgraph Studio, this is done via the UI) -$ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # uploads the build files to a specified IPFS endpoint, and then deploys the subgraph to a specified Graph Node based on the manifest IPFS hash +$ graph create --node # creates a Subgraph on a local Graph Node (on Subgraph Studio, this is done via the UI) +$ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # uploads the build files to a specified IPFS endpoint, and then deploys the Subgraph to a specified Graph Node based on the manifest IPFS hash ``` -The node configuration will depend on where the subgraph is being deployed. +The node configuration will depend on where the Subgraph is being deployed. ### Subgraph Studio @@ -204,7 +204,7 @@ graph deploy graph deploy --node http://localhost:8020/ --ipfs http://localhost:5001 ``` -Once your subgraph has been deployed, it will be indexed by Graph Node. You can check its progress by querying the subgraph itself: +Once your Subgraph has been deployed, it will be indexed by Graph Node. You can check its progress by querying the Subgraph itself: ```graphql { @@ -228,11 +228,11 @@ We will provide more information on running the above components soon. ## Querying a NEAR Subgraph -The GraphQL endpoint for NEAR subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. +The GraphQL endpoint for NEAR Subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. ## Example Subgraphs -Here are some example subgraphs for reference: +Here are some example Subgraphs for reference: [NEAR Blocks](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-blocks) @@ -242,13 +242,13 @@ Here are some example subgraphs for reference: ### How does the beta work? -NEAR support is in beta, which means that there may be changes to the API as we continue to work on improving the integration. Please email near@thegraph.com so that we can support you in building NEAR subgraphs, and keep you up to date on the latest developments! +NEAR support is in beta, which means that there may be changes to the API as we continue to work on improving the integration. Please email near@thegraph.com so that we can support you in building NEAR Subgraphs, and keep you up to date on the latest developments! -### Can a subgraph index both NEAR and EVM chains? +### Can a Subgraph index both NEAR and EVM chains? -No, a subgraph can only support data sources from one chain/network. +No, a Subgraph can only support data sources from one chain/network. -### Can subgraphs react to more specific triggers? +### Can Subgraphs react to more specific triggers? Currently, only Block and Receipt triggers are supported. We are investigating triggers for function calls to a specified account. We are also interested in supporting event triggers, once NEAR has native event support. @@ -262,21 +262,21 @@ accounts: - mintbase1.near ``` -### Can NEAR subgraphs make view calls to NEAR accounts during mappings? +### Can NEAR Subgraphs make view calls to NEAR accounts during mappings? This is not supported. We are evaluating whether this functionality is required for indexing. -### Can I use data source templates in my NEAR subgraph? +### Can I use data source templates in my NEAR Subgraph? This is not currently supported. We are evaluating whether this functionality is required for indexing. -### Ethereum subgraphs support "pending" and "current" versions, how can I deploy a "pending" version of a NEAR subgraph? +### Ethereum Subgraphs support "pending" and "current" versions, how can I deploy a "pending" version of a NEAR Subgraph? -Pending functionality is not yet supported for NEAR subgraphs. In the interim, you can deploy a new version to a different "named" subgraph, and then when that is synced with the chain head, you can redeploy to your primary "named" subgraph, which will use the same underlying deployment ID, so the main subgraph will be instantly synced. +Pending functionality is not yet supported for NEAR Subgraphs. In the interim, you can deploy a new version to a different "named" Subgraph, and then when that is synced with the chain head, you can redeploy to your primary "named" Subgraph, which will use the same underlying deployment ID, so the main Subgraph will be instantly synced. -### My question hasn't been answered, where can I get more help building NEAR subgraphs? +### My question hasn't been answered, where can I get more help building NEAR Subgraphs? -If it is a general question about subgraph development, there is a lot more information in the rest of the [Developer documentation](/subgraphs/quick-start/). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. +If it is a general question about Subgraph development, there is a lot more information in the rest of the [Developer documentation](/subgraphs/quick-start/). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. ## References From 536da88480f12898148241a397c9205cf01104ac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:19:37 -0500 Subject: [PATCH 0635/1789] New translations near.mdx (Dutch) --- .../src/pages/nl/subgraphs/cookbook/near.mdx | 76 +++++++++---------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/website/src/pages/nl/subgraphs/cookbook/near.mdx b/website/src/pages/nl/subgraphs/cookbook/near.mdx index 75f966e7a597..698a0ac3486c 100644 --- a/website/src/pages/nl/subgraphs/cookbook/near.mdx +++ b/website/src/pages/nl/subgraphs/cookbook/near.mdx @@ -2,17 +2,17 @@ title: Building Subgraphs on NEAR --- -This guide is an introduction to building subgraphs indexing smart contracts on the [NEAR blockchain](https://docs.near.org/). +This guide is an introduction to building Subgraphs indexing smart contracts on the [NEAR blockchain](https://docs.near.org/). ## What is NEAR? [NEAR](https://near.org/) is a smart contract platform for building decentralized applications. Visit the [official documentation](https://docs.near.org/concepts/basics/protocol) for more information. -## What are NEAR subgraphs? +## What are NEAR Subgraphs? -The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build subgraphs to index their smart contracts. +The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a Subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build Subgraphs to index their smart contracts. -Subgraphs are event-based, which means that they listen for and then process onchain events. There are currently two types of handlers supported for NEAR subgraphs: +Subgraphs are event-based, which means that they listen for and then process onchain events. There are currently two types of handlers supported for NEAR Subgraphs: - Block handlers: these are run on every new block - Receipt handlers: run every time a message is executed at a specified account @@ -23,32 +23,32 @@ Subgraphs are event-based, which means that they listen for and then process onc ## Building a NEAR Subgraph -`@graphprotocol/graph-cli` is a command-line tool for building and deploying subgraphs. +`@graphprotocol/graph-cli` is a command-line tool for building and deploying Subgraphs. -`@graphprotocol/graph-ts` is a library of subgraph-specific types. +`@graphprotocol/graph-ts` is a library of Subgraph-specific types. -NEAR subgraph development requires `graph-cli` above version `0.23.0`, and `graph-ts` above version `0.23.0`. +NEAR Subgraph development requires `graph-cli` above version `0.23.0`, and `graph-ts` above version `0.23.0`. -> Building a NEAR subgraph is very similar to building a subgraph that indexes Ethereum. +> Building a NEAR Subgraph is very similar to building a Subgraph that indexes Ethereum. -There are three aspects of subgraph definition: +There are three aspects of Subgraph definition: -**subgraph.yaml:** the subgraph manifest, defining the data sources of interest, and how they should be processed. NEAR is a new `kind` of data source. +**subgraph.yaml:** the Subgraph manifest, defining the data sources of interest, and how they should be processed. NEAR is a new `kind` of data source. -**schema.graphql:** a schema file that defines what data is stored for your subgraph, and how to query it via GraphQL. The requirements for NEAR subgraphs are covered by [the existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). +**schema.graphql:** a schema file that defines what data is stored for your Subgraph, and how to query it via GraphQL. The requirements for NEAR Subgraphs are covered by [the existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). **AssemblyScript Mappings:** [AssemblyScript code](/subgraphs/developing/creating/graph-ts/api/) that translates from the event data to the entities defined in your schema. NEAR support introduces NEAR-specific data types and new JSON parsing functionality. -Tijdens subgraph ontwikkeling zijn er twee belangrijke commando's: +During Subgraph development there are two key commands: ```bash -$ graph codegen # genereert types van het schema bestand die geïdentificeerd is in het manifest -$ graph build # genereert Web Assembly vanuit de AssemblyScript-bestanden, en bereidt alle Subgraph-bestanden voor in een /build map +$ graph codegen # generates types from the schema file identified in the manifest +$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the Subgraph files in a /build folder ``` ### Subgraph Manifest Definition -The subgraph manifest (`subgraph.yaml`) identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for a NEAR subgraph: +The Subgraph manifest (`subgraph.yaml`) identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest for a NEAR Subgraph: ```yaml specVersion: 0.0.2 @@ -70,7 +70,7 @@ dataSources: file: ./src/mapping.ts # link to the file with the Assemblyscript mappings ``` -- NEAR subgraphs introduce a new `kind` of data source (`near`) +- NEAR Subgraphs introduce a new `kind` of data source (`near`) - The `network` should correspond to a network on the hosting Graph Node. On Subgraph Studio, NEAR's mainnet is `near-mainnet`, and NEAR's testnet is `near-testnet` - NEAR data sources introduce an optional `source.account` field, which is a human-readable ID corresponding to a [NEAR account](https://docs.near.org/concepts/protocol/account-model). This can be an account or a sub-account. - NEAR data sources introduce an alternative optional `source.accounts` field, which contains optional suffixes and prefixes. At least prefix or suffix must be specified, they will match the any account starting or ending with the list of values respectively. The example below would match: `[app|good].*[morning.near|morning.testnet]`. If only a list of prefixes or suffixes is necessary the other field can be omitted. @@ -92,7 +92,7 @@ NEAR data sources support two types of handlers: ### Schema Definition -Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). +Schema definition describes the structure of the resulting Subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on Subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). ### AssemblyScript Mappings @@ -165,31 +165,31 @@ These types are passed to block & receipt handlers: - Block handlers will receive a `Block` - Receipt handlers will receive a `ReceiptWithOutcome` -Otherwise, the rest of the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/) is available to NEAR subgraph developers during mapping execution. +Otherwise, the rest of the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/) is available to NEAR Subgraph developers during mapping execution. This includes a new JSON parsing function - logs on NEAR are frequently emitted as stringified JSONs. A new `json.fromString(...)` function is available as part of the [JSON API](/subgraphs/developing/creating/graph-ts/api/#json-api) to allow developers to easily process these logs. ## Deploying a NEAR Subgraph -Once you have a built subgraph, it is time to deploy it to Graph Node for indexing. NEAR subgraphs can be deployed to any Graph Node `>=v0.26.x` (this version has not yet been tagged & released). +Once you have a built Subgraph, it is time to deploy it to Graph Node for indexing. NEAR Subgraphs can be deployed to any Graph Node `>=v0.26.x` (this version has not yet been tagged & released). Subgraph Studio and the upgrade Indexer on The Graph Network currently supports indexing NEAR mainnet and testnet in beta, with the following network names: - `near-mainnet` - `near-testnet` -More information on creating and deploying subgraphs on Subgraph Studio can be found [here](/deploying/deploying-a-subgraph-to-studio/). +More information on creating and deploying Subgraphs on Subgraph Studio can be found [here](/deploying/deploying-a-subgraph-to-studio/). -As a quick primer - the first step is to "create" your subgraph - this only needs to be done once. On Subgraph Studio, this can be done from [your Dashboard](https://thegraph.com/studio/): "Create a subgraph". +As a quick primer - the first step is to "create" your Subgraph - this only needs to be done once. On Subgraph Studio, this can be done from [your Dashboard](https://thegraph.com/studio/): "Create a Subgraph". -Once your subgraph has been created, you can deploy your subgraph by using the `graph deploy` CLI command: +Once your Subgraph has been created, you can deploy your Subgraph by using the `graph deploy` CLI command: ```sh -$ graph create --node # creates a subgraph on a local Graph Node (on Subgraph Studio, this is done via the UI) -$ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # uploads the build files to a specified IPFS endpoint, and then deploys the subgraph to a specified Graph Node based on the manifest IPFS hash +$ graph create --node # creates a Subgraph on a local Graph Node (on Subgraph Studio, this is done via the UI) +$ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # uploads the build files to a specified IPFS endpoint, and then deploys the Subgraph to a specified Graph Node based on the manifest IPFS hash ``` -The node configuration will depend on where the subgraph is being deployed. +The node configuration will depend on where the Subgraph is being deployed. ### Subgraph Studio @@ -204,7 +204,7 @@ graph deploy graph deploy --node http://localhost:8020/ --ipfs http://localhost:5001 ``` -Once your subgraph has been deployed, it will be indexed by Graph Node. You can check its progress by querying the subgraph itself: +Once your Subgraph has been deployed, it will be indexed by Graph Node. You can check its progress by querying the Subgraph itself: ```graphql { @@ -228,11 +228,11 @@ We will provide more information on running the above components soon. ## Querying a NEAR Subgraph -The GraphQL endpoint for NEAR subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. +The GraphQL endpoint for NEAR Subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. ## Example Subgraphs -Here are some example subgraphs for reference: +Here are some example Subgraphs for reference: [NEAR Blocks](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-blocks) @@ -242,13 +242,13 @@ Here are some example subgraphs for reference: ### How does the beta work? -NEAR support is in beta, which means that there may be changes to the API as we continue to work on improving the integration. Please email near@thegraph.com so that we can support you in building NEAR subgraphs, and keep you up to date on the latest developments! +NEAR support is in beta, which means that there may be changes to the API as we continue to work on improving the integration. Please email near@thegraph.com so that we can support you in building NEAR Subgraphs, and keep you up to date on the latest developments! -### Can a subgraph index both NEAR and EVM chains? +### Can a Subgraph index both NEAR and EVM chains? -No, a subgraph can only support data sources from one chain/network. +No, a Subgraph can only support data sources from one chain/network. -### Can subgraphs react to more specific triggers? +### Can Subgraphs react to more specific triggers? Currently, only Block and Receipt triggers are supported. We are investigating triggers for function calls to a specified account. We are also interested in supporting event triggers, once NEAR has native event support. @@ -262,21 +262,21 @@ accounts: - mintbase1.near ``` -### Can NEAR subgraphs make view calls to NEAR accounts during mappings? +### Can NEAR Subgraphs make view calls to NEAR accounts during mappings? This is not supported. We are evaluating whether this functionality is required for indexing. -### Can I use data source templates in my NEAR subgraph? +### Can I use data source templates in my NEAR Subgraph? This is not currently supported. We are evaluating whether this functionality is required for indexing. -### Ethereum subgraphs support "pending" and "current" versions, how can I deploy a "pending" version of a NEAR subgraph? +### Ethereum Subgraphs support "pending" and "current" versions, how can I deploy a "pending" version of a NEAR Subgraph? -Pending functionality is not yet supported for NEAR subgraphs. In the interim, you can deploy a new version to a different "named" subgraph, and then when that is synced with the chain head, you can redeploy to your primary "named" subgraph, which will use the same underlying deployment ID, so the main subgraph will be instantly synced. +Pending functionality is not yet supported for NEAR Subgraphs. In the interim, you can deploy a new version to a different "named" Subgraph, and then when that is synced with the chain head, you can redeploy to your primary "named" Subgraph, which will use the same underlying deployment ID, so the main Subgraph will be instantly synced. -### My question hasn't been answered, where can I get more help building NEAR subgraphs? +### My question hasn't been answered, where can I get more help building NEAR Subgraphs? -If it is a general question about subgraph development, there is a lot more information in the rest of the [Developer documentation](/subgraphs/quick-start/). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. +If it is a general question about Subgraph development, there is a lot more information in the rest of the [Developer documentation](/subgraphs/quick-start/). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. ## References From e5754ace134f036edde8edf96705d033df4c2706 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:19:38 -0500 Subject: [PATCH 0636/1789] New translations near.mdx (Polish) --- .../src/pages/pl/subgraphs/cookbook/near.mdx | 74 +++++++++---------- 1 file changed, 37 insertions(+), 37 deletions(-) diff --git a/website/src/pages/pl/subgraphs/cookbook/near.mdx b/website/src/pages/pl/subgraphs/cookbook/near.mdx index 6060eb27e761..698a0ac3486c 100644 --- a/website/src/pages/pl/subgraphs/cookbook/near.mdx +++ b/website/src/pages/pl/subgraphs/cookbook/near.mdx @@ -2,17 +2,17 @@ title: Building Subgraphs on NEAR --- -This guide is an introduction to building subgraphs indexing smart contracts on the [NEAR blockchain](https://docs.near.org/). +This guide is an introduction to building Subgraphs indexing smart contracts on the [NEAR blockchain](https://docs.near.org/). ## What is NEAR? [NEAR](https://near.org/) is a smart contract platform for building decentralized applications. Visit the [official documentation](https://docs.near.org/concepts/basics/protocol) for more information. -## What are NEAR subgraphs? +## What are NEAR Subgraphs? -The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build subgraphs to index their smart contracts. +The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a Subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build Subgraphs to index their smart contracts. -Subgraphs are event-based, which means that they listen for and then process onchain events. There are currently two types of handlers supported for NEAR subgraphs: +Subgraphs are event-based, which means that they listen for and then process onchain events. There are currently two types of handlers supported for NEAR Subgraphs: - Block handlers: these are run on every new block - Receipt handlers: run every time a message is executed at a specified account @@ -23,32 +23,32 @@ Subgraphs are event-based, which means that they listen for and then process onc ## Building a NEAR Subgraph -`@graphprotocol/graph-cli` is a command-line tool for building and deploying subgraphs. +`@graphprotocol/graph-cli` is a command-line tool for building and deploying Subgraphs. -`@graphprotocol/graph-ts` is a library of subgraph-specific types. +`@graphprotocol/graph-ts` is a library of Subgraph-specific types. -NEAR subgraph development requires `graph-cli` above version `0.23.0`, and `graph-ts` above version `0.23.0`. +NEAR Subgraph development requires `graph-cli` above version `0.23.0`, and `graph-ts` above version `0.23.0`. -> Building a NEAR subgraph is very similar to building a subgraph that indexes Ethereum. +> Building a NEAR Subgraph is very similar to building a Subgraph that indexes Ethereum. -There are three aspects of subgraph definition: +There are three aspects of Subgraph definition: -**subgraph.yaml:** the subgraph manifest, defining the data sources of interest, and how they should be processed. NEAR is a new `kind` of data source. +**subgraph.yaml:** the Subgraph manifest, defining the data sources of interest, and how they should be processed. NEAR is a new `kind` of data source. -**schema.graphql:** a schema file that defines what data is stored for your subgraph, and how to query it via GraphQL. The requirements for NEAR subgraphs are covered by [the existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). +**schema.graphql:** a schema file that defines what data is stored for your Subgraph, and how to query it via GraphQL. The requirements for NEAR Subgraphs are covered by [the existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). **AssemblyScript Mappings:** [AssemblyScript code](/subgraphs/developing/creating/graph-ts/api/) that translates from the event data to the entities defined in your schema. NEAR support introduces NEAR-specific data types and new JSON parsing functionality. -During subgraph development there are two key commands: +During Subgraph development there are two key commands: ```bash $ graph codegen # generates types from the schema file identified in the manifest -$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the subgraph files in a /build folder +$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the Subgraph files in a /build folder ``` ### Subgraph Manifest Definition -The subgraph manifest (`subgraph.yaml`) identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for a NEAR subgraph: +The Subgraph manifest (`subgraph.yaml`) identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest for a NEAR Subgraph: ```yaml specVersion: 0.0.2 @@ -70,7 +70,7 @@ dataSources: file: ./src/mapping.ts # link to the file with the Assemblyscript mappings ``` -- NEAR subgraphs introduce a new `kind` of data source (`near`) +- NEAR Subgraphs introduce a new `kind` of data source (`near`) - The `network` should correspond to a network on the hosting Graph Node. On Subgraph Studio, NEAR's mainnet is `near-mainnet`, and NEAR's testnet is `near-testnet` - NEAR data sources introduce an optional `source.account` field, which is a human-readable ID corresponding to a [NEAR account](https://docs.near.org/concepts/protocol/account-model). This can be an account or a sub-account. - NEAR data sources introduce an alternative optional `source.accounts` field, which contains optional suffixes and prefixes. At least prefix or suffix must be specified, they will match the any account starting or ending with the list of values respectively. The example below would match: `[app|good].*[morning.near|morning.testnet]`. If only a list of prefixes or suffixes is necessary the other field can be omitted. @@ -92,7 +92,7 @@ NEAR data sources support two types of handlers: ### Schema Definition -Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). +Schema definition describes the structure of the resulting Subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on Subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). ### AssemblyScript Mappings @@ -165,31 +165,31 @@ These types are passed to block & receipt handlers: - Block handlers will receive a `Block` - Receipt handlers will receive a `ReceiptWithOutcome` -Otherwise, the rest of the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/) is available to NEAR subgraph developers during mapping execution. +Otherwise, the rest of the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/) is available to NEAR Subgraph developers during mapping execution. This includes a new JSON parsing function - logs on NEAR are frequently emitted as stringified JSONs. A new `json.fromString(...)` function is available as part of the [JSON API](/subgraphs/developing/creating/graph-ts/api/#json-api) to allow developers to easily process these logs. ## Deploying a NEAR Subgraph -Once you have a built subgraph, it is time to deploy it to Graph Node for indexing. NEAR subgraphs can be deployed to any Graph Node `>=v0.26.x` (this version has not yet been tagged & released). +Once you have a built Subgraph, it is time to deploy it to Graph Node for indexing. NEAR Subgraphs can be deployed to any Graph Node `>=v0.26.x` (this version has not yet been tagged & released). Subgraph Studio and the upgrade Indexer on The Graph Network currently supports indexing NEAR mainnet and testnet in beta, with the following network names: - `near-mainnet` - `near-testnet` -More information on creating and deploying subgraphs on Subgraph Studio can be found [here](/deploying/deploying-a-subgraph-to-studio/). +More information on creating and deploying Subgraphs on Subgraph Studio can be found [here](/deploying/deploying-a-subgraph-to-studio/). -As a quick primer - the first step is to "create" your subgraph - this only needs to be done once. On Subgraph Studio, this can be done from [your Dashboard](https://thegraph.com/studio/): "Create a subgraph". +As a quick primer - the first step is to "create" your Subgraph - this only needs to be done once. On Subgraph Studio, this can be done from [your Dashboard](https://thegraph.com/studio/): "Create a Subgraph". -Once your subgraph has been created, you can deploy your subgraph by using the `graph deploy` CLI command: +Once your Subgraph has been created, you can deploy your Subgraph by using the `graph deploy` CLI command: ```sh -$ graph create --node # creates a subgraph on a local Graph Node (on Subgraph Studio, this is done via the UI) -$ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # uploads the build files to a specified IPFS endpoint, and then deploys the subgraph to a specified Graph Node based on the manifest IPFS hash +$ graph create --node # creates a Subgraph on a local Graph Node (on Subgraph Studio, this is done via the UI) +$ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # uploads the build files to a specified IPFS endpoint, and then deploys the Subgraph to a specified Graph Node based on the manifest IPFS hash ``` -The node configuration will depend on where the subgraph is being deployed. +The node configuration will depend on where the Subgraph is being deployed. ### Subgraph Studio @@ -204,7 +204,7 @@ graph deploy graph deploy --node http://localhost:8020/ --ipfs http://localhost:5001 ``` -Once your subgraph has been deployed, it will be indexed by Graph Node. You can check its progress by querying the subgraph itself: +Once your Subgraph has been deployed, it will be indexed by Graph Node. You can check its progress by querying the Subgraph itself: ```graphql { @@ -228,11 +228,11 @@ We will provide more information on running the above components soon. ## Querying a NEAR Subgraph -The GraphQL endpoint for NEAR subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. +The GraphQL endpoint for NEAR Subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. ## Example Subgraphs -Here are some example subgraphs for reference: +Here are some example Subgraphs for reference: [NEAR Blocks](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-blocks) @@ -242,13 +242,13 @@ Here are some example subgraphs for reference: ### How does the beta work? -NEAR support is in beta, which means that there may be changes to the API as we continue to work on improving the integration. Please email near@thegraph.com so that we can support you in building NEAR subgraphs, and keep you up to date on the latest developments! +NEAR support is in beta, which means that there may be changes to the API as we continue to work on improving the integration. Please email near@thegraph.com so that we can support you in building NEAR Subgraphs, and keep you up to date on the latest developments! -### Can a subgraph index both NEAR and EVM chains? +### Can a Subgraph index both NEAR and EVM chains? -No, a subgraph can only support data sources from one chain/network. +No, a Subgraph can only support data sources from one chain/network. -### Can subgraphs react to more specific triggers? +### Can Subgraphs react to more specific triggers? Currently, only Block and Receipt triggers are supported. We are investigating triggers for function calls to a specified account. We are also interested in supporting event triggers, once NEAR has native event support. @@ -262,21 +262,21 @@ accounts: - mintbase1.near ``` -### Can NEAR subgraphs make view calls to NEAR accounts during mappings? +### Can NEAR Subgraphs make view calls to NEAR accounts during mappings? This is not supported. We are evaluating whether this functionality is required for indexing. -### Can I use data source templates in my NEAR subgraph? +### Can I use data source templates in my NEAR Subgraph? This is not currently supported. We are evaluating whether this functionality is required for indexing. -### Ethereum subgraphs support "pending" and "current" versions, how can I deploy a "pending" version of a NEAR subgraph? +### Ethereum Subgraphs support "pending" and "current" versions, how can I deploy a "pending" version of a NEAR Subgraph? -Pending functionality is not yet supported for NEAR subgraphs. In the interim, you can deploy a new version to a different "named" subgraph, and then when that is synced with the chain head, you can redeploy to your primary "named" subgraph, which will use the same underlying deployment ID, so the main subgraph will be instantly synced. +Pending functionality is not yet supported for NEAR Subgraphs. In the interim, you can deploy a new version to a different "named" Subgraph, and then when that is synced with the chain head, you can redeploy to your primary "named" Subgraph, which will use the same underlying deployment ID, so the main Subgraph will be instantly synced. -### My question hasn't been answered, where can I get more help building NEAR subgraphs? +### My question hasn't been answered, where can I get more help building NEAR Subgraphs? -If it is a general question about subgraph development, there is a lot more information in the rest of the [Developer documentation](/subgraphs/quick-start/). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. +If it is a general question about Subgraph development, there is a lot more information in the rest of the [Developer documentation](/subgraphs/quick-start/). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. ## References From 555db483b13c57aaee2cc488df1d1df933d4b080 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:19:39 -0500 Subject: [PATCH 0637/1789] New translations near.mdx (Portuguese) --- .../src/pages/pt/subgraphs/cookbook/near.mdx | 76 +++++++++---------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/website/src/pages/pt/subgraphs/cookbook/near.mdx b/website/src/pages/pt/subgraphs/cookbook/near.mdx index 58143e87a809..7fa170add596 100644 --- a/website/src/pages/pt/subgraphs/cookbook/near.mdx +++ b/website/src/pages/pt/subgraphs/cookbook/near.mdx @@ -2,17 +2,17 @@ title: Construção de Subgraphs na NEAR --- -Este guia é uma introdução à construção de subgraphs para indexar contratos inteligentes na blockchain [NEAR](https://docs.near.org/). +This guide is an introduction to building Subgraphs indexing smart contracts on the [NEAR blockchain](https://docs.near.org/). ## O que é NEAR? [NEAR](https://near.org/) é uma plataforma de contrato inteligente para construir aplicativos descentralizados. Visite a [documentação oficial](https://docs.near.org/concepts/basics/protocol) para mais informações. -## O que são subgraphs na NEAR? +## What are NEAR Subgraphs? -The Graph fornece aos programadores ferramentas para processar eventos de blockchain e tornar os dados resultantes facilmente disponíveis por meio de uma API GraphQL, conhecida individualmente como subgraph. O [Graph Node](https://github.com/graphprotocol/graph-node) agora é capaz de processar eventos NEAR, o que significa que programadores da NEAR agora podem criar subgraphs para indexar seus contratos inteligentes. +The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a Subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build Subgraphs to index their smart contracts. -Subgraphs são baseados em eventos; ou seja, eles esperam e então processam eventos on-chain. Atualmente há dois tipos de handlers que funcionam para subgraphs na NEAR: +Subgraphs are event-based, which means that they listen for and then process onchain events. There are currently two types of handlers supported for NEAR Subgraphs: - Handlers de blocos: executados em todos os blocos novos - Handlers de recibos: Executados sempre que uma mensagem é executada numa conta especificada @@ -23,32 +23,32 @@ Subgraphs são baseados em eventos; ou seja, eles esperam e então processam eve ## Construindo um Subgraph no NEAR -`@graphprotocol/graph-cli` é uma ferramenta de linha de comando para a construção e implantação de subgraphs. +`@graphprotocol/graph-cli` is a command-line tool for building and deploying Subgraphs. -`@graphprotocol/graph-ts` é uma biblioteca de tipos específicos a subgraphs. +`@graphprotocol/graph-ts` is a library of Subgraph-specific types. -A programação de subgraphs no NEAR exige o `graph-cli` acima da versão `0.23.0`, e o `graph-ts` acima da versão `0.23.0`. +NEAR Subgraph development requires `graph-cli` above version `0.23.0`, and `graph-ts` above version `0.23.0`. -> Construir um subgraph NEAR é um processo muito parecido com a construção de um subgraph que indexa o Ethereum. +> Building a NEAR Subgraph is very similar to building a Subgraph that indexes Ethereum. -Há três aspectos de definição de subgraph: +There are three aspects of Subgraph definition: -**subgraph.yaml:** o manifest do subgraph, que define as fontes de dados de interesse e como elas devem ser processadas. A NEAR é uma nova espécie (`kind`) de fonte de dados. +**subgraph.yaml:** the Subgraph manifest, defining the data sources of interest, and how they should be processed. NEAR is a new `kind` of data source. -**schema.graphql:** um arquivo de schema que define quais dados são armazenados para o seu subgraph e como consultá-los via GraphQL. Os requisitos para subgraphs NEAR são cobertos pela [documentação existente](/developing/creating-a-subgraph/#the-graphql-schema). +**schema.graphql:** a schema file that defines what data is stored for your Subgraph, and how to query it via GraphQL. The requirements for NEAR Subgraphs are covered by [the existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). **Mapeamentos de AssemblyScript:** [Código AssemblyScript](/subgraphs/developing/creating/graph-ts/api/) que traduz dos dados do evento para as entidades definidas no seu esquema. O apoio à NEAR introduz tipos de dados específicos da NEAR e novas funções de análise JSON. -Durante o desenvolvimento de um subgraph, existem dois comandos importantes: +During Subgraph development there are two key commands: ```bash -$ graph codegen # gera tipos do arquivo de schema identificado no manifest -$ graph build # gera Web Assembly dos arquivos AssemblyScript, e prepara todos os arquivos do subgraph em uma pasta /build +$ graph codegen # generates types from the schema file identified in the manifest +$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the Subgraph files in a /build folder ``` ### Definição de Manifest de Subgraph -O manifest do subgraph (`subgraph.yaml`) identifica as fontes de dados para o subgraph, os gatilhos de interesse, e as funções que devem ser executadas em resposta a tais gatilhos. Veja abaixo um exemplo de manifest para um subgraph na NEAR: +The Subgraph manifest (`subgraph.yaml`) identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest for a NEAR Subgraph: ```yaml specVersion: 0.0.2 @@ -70,7 +70,7 @@ dataSources: file: ./src/mapping.ts # link ao arq. com os mapeamentos de Assemblyscript ``` -- Subgraphs na NEAR introduzem um novo tipo (`kind`) de fonte de dados (`near`) +- NEAR Subgraphs introduce a new `kind` of data source (`near`) - O `network` deve corresponder a uma rede no Graph Node hóspede. No Subgraph Studio, a mainnet da NEAR é `near-mainnet`, e a testnet da NEAR é `near-testnet` - Fontes de dados na NEAR introduzem um campo `source.account` opcional: uma ID legível a humanos que corresponde a uma [conta na NEAR](https://docs.near.org/concepts/protocol/account-model). Isto pode ser uma conta ou subconta. - As fontes de dados da NEAR introduzem um campo alternativo `source.accounts` opcional, que contém sufixos e prefixos opcionais. Pelo menos prefix ou sufixo deve ser especificado, eles corresponderão a qualquer conta que comece ou termine com a lista de valores, respectivamente. O exemplo abaixo corresponderia a: `[app|good].*[morning.near|morning.testnet]`. Se apenas uma lista de prefixos ou sufixos for necessária, o outro campo pode ser omitido. @@ -92,7 +92,7 @@ As fontes de dados na NEAR apoiam duas categorias de handlers: ### Definição de Schema -A definição de Schema descreve a estrutura do banco de dados resultado do subgraph, e os relacionamentos entre entidades. Isto é agnóstico da fonte de dados original. Para mais detalhes na definição de schema de subgraph, [clique aqui](/developing/creating-a-subgraph/#the-graphql-schema). +Schema definition describes the structure of the resulting Subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on Subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). ### Mapeamentos em AssemblyScript @@ -165,31 +165,31 @@ Estes tipos são repassados para handlers de blocos e recibos: - Handlers de blocos receberão um `Block` - Handlers de recibos receberão um `ReceiptWithOutcome` -Caso contrário, o resto da [API do AssemblyScript](/subgraphs/developing/creating/graph-ts/api/) está à disposição dos programadores de subgraph na NEAR, durante a execução dos mapeamentos. +Otherwise, the rest of the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/) is available to NEAR Subgraph developers during mapping execution. Isto inclui uma nova função de análise em JSON: logs na NEAR são frequentemente emitidos como JSONs em string. A nova função json.fromString(...) está disponível como parte da [API JSON](/subgraphs/developing/creating/graph-ts/api/#json-api) para que programadores processem estes logs com mais facilidade. ## Lançando um Subgraph na NEAR -Quando tiver um subgraph pronto, chegará a hora de implantá-lo no Graph Node para indexar. Subgraphs na NEAR podem ser implantados em qualquer Graph Node `>=v0.26.x` (esta versão ainda não foi marcada ou liberada). +Once you have a built Subgraph, it is time to deploy it to Graph Node for indexing. NEAR Subgraphs can be deployed to any Graph Node `>=v0.26.x` (this version has not yet been tagged & released). O Subgraph Studio e o Indexador de atualização na Graph Network apoiam atualmente a indexação da mainnet e da testnet do NEAR em beta, com os seguintes nomes de rede: - `near-mainnet` - `near-testnet` -Para mais informações sobre criar e implantar subgraphs no Subgraph Studio, clique [aqui](/deploying/deploying-a-subgraph-to-studio/). +More information on creating and deploying Subgraphs on Subgraph Studio can be found [here](/deploying/deploying-a-subgraph-to-studio/). -Para começo de conversa, o primeiro passo consiste em "criar" o seu subgraph - isto só precisa ser feito uma vez. No Subgraph Studio, isto pode ser feito do [seu Painel](https://thegraph.com/studio/): "Criar um subgraph". +As a quick primer - the first step is to "create" your Subgraph - this only needs to be done once. On Subgraph Studio, this can be done from [your Dashboard](https://thegraph.com/studio/): "Create a Subgraph". -Quando o seu subgraph estiver pronto, implante o seu subgraph com o comando de CLI `graph deploy`: +Once your Subgraph has been created, you can deploy your Subgraph by using the `graph deploy` CLI command: ```sh -$ graph create --node # cria um subgraph num Graph Node local (no Subgraph Studio, isto é feito via a interface) -$ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # sobe os arquivos do build a um ponto final IPFS especificado, e implanta o subgraph num Graph Node com base no hash IPFS do manifest +$ graph create --node # creates a Subgraph on a local Graph Node (on Subgraph Studio, this is done via the UI) +$ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # uploads the build files to a specified IPFS endpoint, and then deploys the Subgraph to a specified Graph Node based on the manifest IPFS hash ``` -A configuração do nódulo dependerá de onde o subgraph será lançado. +The node configuration will depend on where the Subgraph is being deployed. ### Subgraph Studio @@ -204,7 +204,7 @@ graph deploy graph deploy --node http://localhost:8020/ --ipfs http://localhost:5001 ``` -Quando o seu subgraph for lançado, ele será indexado pelo Graph Node. O seu progresso pode ser conferido com um query no próprio subgraph: +Once your Subgraph has been deployed, it will be indexed by Graph Node. You can check its progress by querying the Subgraph itself: ```graphql { @@ -228,11 +228,11 @@ Em breve, falaremos mais sobre como executar os componentes acima. ## Como Consultar um Subgraph na NEAR -O ponto final do GraphQL para subgraphs na NEAR é determinado pela definição do schema, com a interface existente da API. Visite a [documentação da API da GraphQL](/subgraphs/querying/graphql-api/) para mais informações. +The GraphQL endpoint for NEAR Subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. ## Exemplos de Subgraphs -Aqui estão alguns exemplos de subgraphs para referência: +Here are some example Subgraphs for reference: [Blocos da NEAR](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-blocks) @@ -242,13 +242,13 @@ Aqui estão alguns exemplos de subgraphs para referência: ### Como o beta funciona? -O apoio à NEAR está em beta; podem ocorrer mais mudanças na API enquanto continuamos a melhorar a integração. Por favor, contacte-nos em near@thegraph.com para podermos apoiar-te na construção de subgraphs no NEAR e avisar-te sobre os acontecimentos mais recentes! +NEAR support is in beta, which means that there may be changes to the API as we continue to work on improving the integration. Please email near@thegraph.com so that we can support you in building NEAR Subgraphs, and keep you up to date on the latest developments! -### Um subgraph pode indexar chains da NEAR e da EVM? +### Can a Subgraph index both NEAR and EVM chains? -Não, um subgraph só pode apoiar fontes de dados de apenas uma chain/rede. +No, a Subgraph can only support data sources from one chain/network. -### Os subgraphs podem reagir a gatilhos mais específicos? +### Can Subgraphs react to more specific triggers? Atualmente, só há apoio a gatilhos de Blocos e Recibos. Estamos a investigar gatilhos para chamadas de função a uma conta específica. Também temos interesse em apoiar gatilhos de eventos, quando a NEAR receber apoio nativo a eventos. @@ -262,21 +262,21 @@ accounts: - mintbase1.near ``` -### Subgraphs na NEAR podem fazer chamadas de vistoria para contas NEAR durante os mapeamentos? +### Can NEAR Subgraphs make view calls to NEAR accounts during mappings? Não há apoio a isto. Estamos a avaliar se esta funcionalidade é necessária para indexação. -### Posso usar modelos de fontes de dados no meu subgraph na NEAR? +### Can I use data source templates in my NEAR Subgraph? Não há apoio a isto no momento. Estamos a avaliar se esta funcionalidade é necessária para indexação. -### Subgraphs no Ethereum apoiam versões "pendentes" e "atuais". Como posso lançar uma versão "pendente" de um subgraph no NEAR? +### Ethereum Subgraphs support "pending" and "current" versions, how can I deploy a "pending" version of a NEAR Subgraph? -No momento, não há apoio à funcionalidade de pendências para subgraphs na NEAR. Entretanto, podes lançar uma nova versão para um subgraph de "nome" diferente, e quando este for sincronizado com a cabeça da chain, podes relançá-la para seu subgraph de "nome" primário, que usará o mesmo ID de lançamento subjacente — e aí, o subgraph principal sincronizará instantaneamente. +Pending functionality is not yet supported for NEAR Subgraphs. In the interim, you can deploy a new version to a different "named" Subgraph, and then when that is synced with the chain head, you can redeploy to your primary "named" Subgraph, which will use the same underlying deployment ID, so the main Subgraph will be instantly synced. -### A minha pergunta não foi respondida. Onde posso conseguir mais ajuda sobre construir subgraphs na NEAR? +### My question hasn't been answered, where can I get more help building NEAR Subgraphs? -Se esta for uma pergunta geral sobre programação de subgraphs, há mais informações no resto da [documentação para programadores](/subgraphs/quick-start/). Caso contrário, entre no [Discord do Graph Protocol](https://discord.gg/graphprotocol) e pergunte no canal #near, ou mande a sua pergunta para near@thegraph.com. +If it is a general question about Subgraph development, there is a lot more information in the rest of the [Developer documentation](/subgraphs/quick-start/). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. ## Referências From 9487fc3c7bd5af3281ee38fe7266796efbbeadd5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:19:40 -0500 Subject: [PATCH 0638/1789] New translations near.mdx (Russian) --- .../src/pages/ru/subgraphs/cookbook/near.mdx | 76 +++++++++---------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/website/src/pages/ru/subgraphs/cookbook/near.mdx b/website/src/pages/ru/subgraphs/cookbook/near.mdx index ac22a9f8c015..5686dc86a2d4 100644 --- a/website/src/pages/ru/subgraphs/cookbook/near.mdx +++ b/website/src/pages/ru/subgraphs/cookbook/near.mdx @@ -2,17 +2,17 @@ title: Создание субграфов на NEAR --- -Это руководство является введением в создание субграфов для индексирования смарт-контрактов на [блокчейне NEAR](https://docs.near.org/). +This guide is an introduction to building Subgraphs indexing smart contracts on the [NEAR blockchain](https://docs.near.org/). ## Что такое NEAR? [NEAR](https://near.org/) — это платформа для смарт-контрактов, предназначенная для создания децентрализованных приложений. Для получения дополнительной информации ознакомьтесь с [официальной документацией](https://docs.near.org/concepts/basics/protocol). -## Что такое NEAR субграфы? +## What are NEAR Subgraphs? -The Graph предоставляет разработчикам инструменты для обработки событий блокчейна и упрощает доступ к полученным данным через API GraphQL, известный также как субграф. [Graph Node](https://github.com/graphprotocol/graph-node) теперь способен обрабатывать события NEAR, что позволяет разработчикам NEAR создавать субграфы для индексирования своих смарт-контрактов. +The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a Subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build Subgraphs to index their smart contracts. -Субграфы основаны на событиях, что означает, что они отслеживают и обрабатывают события в блокчейне. В настоящее время для субграфов NEAR поддерживаются два типа обработчиков: +Subgraphs are event-based, which means that they listen for and then process onchain events. There are currently two types of handlers supported for NEAR Subgraphs: - Обработчики блоков: они запускаются для каждого нового блока - Обработчики поступлений: запускаются каждый раз, когда сообщение выполняется в указанной учетной записи @@ -23,32 +23,32 @@ The Graph предоставляет разработчикам инструме ## Создание NEAR субграфа -`@graphprotocol/graph-cli` — это инструмент командной строки для создания и развертывания субграфов. +`@graphprotocol/graph-cli` is a command-line tool for building and deploying Subgraphs. -`@graphprotocol/graph-ts` — это библиотека типов, специфичных для субграфов. +`@graphprotocol/graph-ts` is a library of Subgraph-specific types. -Для разработки субграфов на платформе NEAR требуется `graph-cli` версии выше `0.23.0` и `graph-ts` версии выше `0.23.0`. +NEAR Subgraph development requires `graph-cli` above version `0.23.0`, and `graph-ts` above version `0.23.0`. -> Построение NEAR субграфа очень похоже на построение субграфа, индексирующего Ethereum. +> Building a NEAR Subgraph is very similar to building a Subgraph that indexes Ethereum. -Существует три аспекта определения субграфа: +There are three aspects of Subgraph definition: -**subgraph.yaml:** манифест субграфа, определяющий источники данных и способы их обработки. NEAR является новым `kind` (типом) источника данных. +**subgraph.yaml:** the Subgraph manifest, defining the data sources of interest, and how they should be processed. NEAR is a new `kind` of data source. -**schema.graphql:** файл схемы, который определяет, какие данные хранятся в Вашем субграфе и как к ним можно обращаться через GraphQL. Требования для субграфов NEAR описаны в [существующей документации](/developing/creating-a-subgraph/#the-graphql-schema). +**schema.graphql:** a schema file that defines what data is stored for your Subgraph, and how to query it via GraphQL. The requirements for NEAR Subgraphs are covered by [the existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). **Мэппинги на AssemblyScript:** [код на AssemblyScript](/subgraphs/developing/creating/graph-ts/api/), который преобразует данные событий в элементы, определенные в Вашей схеме. Поддержка NEAR вводит специфичные для NEAR типы данных и новую функциональность для парсинга JSON. -Во время разработки субграфа есть две ключевые команды: +During Subgraph development there are two key commands: ```bash -$ graph codegen # генерирует типы из файла схемы, указанного в манифесте -$ graph build # генерирует Web Assembly из файлов AssemblyScript и подготавливает все файлы субграфа в папке /build +$ graph codegen # generates types from the schema file identified in the manifest +$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the Subgraph files in a /build folder ``` ### Определение манифеста субграфа -Манифест субграфа (`subgraph.yaml`) определяет источники данных для субграфа, интересующие триггеры и функции, которые должны быть выполнены в ответ на эти триггеры. Пример манифеста субграфа для NEAR представлен ниже: +The Subgraph manifest (`subgraph.yaml`) identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest for a NEAR Subgraph: ```yaml specVersion: 0.0.2 @@ -70,7 +70,7 @@ dataSources: file: ./src/mapping.ts # link to the file with the Assemblyscript mappings ``` -- Субграфы NEAR вводят новый тип источника данных (`near`) +- NEAR Subgraphs introduce a new `kind` of data source (`near`) - `network` должен соответствовать сети на хостинговой Graph Node. В Subgraph Studio майннет NEAR называется `near-mainnet`, а теснет NEAR — `near-testnet` - Источники данных NEAR содержат необязательное поле `source.account`, которое представляет собой удобочитаемый идентификатор, соответствующий [учетной записи NEAR] (https://docs.near.org/concepts/protocol/account-model). Это может быть как основной аккаунт, так и суб-аккаунт. - Источники данных NEAR вводят альтернативное необязательное поле `source.accounts`, которое содержит необязательные префиксы и суффиксы. Необходимо указать хотя бы один префикс или суффикс, они будут соответствовать любому аккаунту, начинающемуся или заканчивающемуся на значения из списка соответственно. Приведенный ниже пример будет совпадать с: `[app|good].*[morning.near|morning.testnet]`. Если необходим только список префиксов или суффиксов, другое поле можно опустить. @@ -92,7 +92,7 @@ accounts: ### Определение схемы -Определение схемы описывает структуру итоговой базы данных субграфа и отношения между объектами. Это не зависит от исходного источника данных. Более подробную информацию об определении схемы субграфа можно найти [здесь](/developing/creating-a-subgraph/#the-graphql-schema). +Schema definition describes the structure of the resulting Subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on Subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). ### Мэппинги AssemblyScript @@ -165,31 +165,31 @@ class ReceiptWithOutcome { - Обработчики блоков получат `Block` - Обработчики поступлений получат `ReceiptWithOutcome` -В остальном, весь [API для AssemblyScript](/subgraphs/developing/creating/graph-ts/api/) доступен разработчикам субграфов для NEAR во время выполнения мэппинга. +Otherwise, the rest of the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/) is available to NEAR Subgraph developers during mapping execution. Это включает в себя новую функцию для парсинга JSON — логи в NEAR часто выводятся как строковые JSON. Новая функция `json.fromString(...)` доступна в рамках [JSON API](/subgraphs/developing/creating/graph-ts/api/#json-api), что позволяет разработчикам легко обрабатывать эти логи. ## Развертывание NEAR субграфа -Как только Ваш субграф будет создан, наступает время развернуть его на Graph Node для индексирования. Субграфы NEAR можно развернуть на любом Graph Node версии `>=v0.26.x` (эта версия еще не отмечена и не выпущена). +Once you have a built Subgraph, it is time to deploy it to Graph Node for indexing. NEAR Subgraphs can be deployed to any Graph Node `>=v0.26.x` (this version has not yet been tagged & released). Subgraph Studio и Индексатор обновлений в The Graph Network в настоящее время поддерживают индексирование основной и тестовой сети NEAR в бета-версии со следующими именами сетей: - `near-mainnet` - `near-testnet` -Дополнительную информацию о создании и развертывании субграфов в Subgraph Studio можно найти [здесь](/deploying/deploying-a-subgraph-to-studio/). +More information on creating and deploying Subgraphs on Subgraph Studio can be found [here](/deploying/deploying-a-subgraph-to-studio/). -В качестве краткого примера — первый шаг заключается в "создании" Вашего субграфа — это нужно сделать только один раз. В Subgraph Studio это можно сделать на Вашей [панели управления](https://thegraph.com/studio/), выбрав опцию "Создать субграф". +As a quick primer - the first step is to "create" your Subgraph - this only needs to be done once. On Subgraph Studio, this can be done from [your Dashboard](https://thegraph.com/studio/): "Create a Subgraph". -После того как субграф создан, его можно развернуть с помощью команды `graph deploy` в CLI: +Once your Subgraph has been created, you can deploy your Subgraph by using the `graph deploy` CLI command: ```sh -$ graph create --node # создает субграф на локальной Graph Node (в Subgraph Studio это делается через UI) -$ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # загружает файлы сборки на указанную конечную точку IPFS, а затем разворачивает субграф на указанной Graph Node на основе хеша манифеста IPFS +$ graph create --node # creates a Subgraph on a local Graph Node (on Subgraph Studio, this is done via the UI) +$ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # uploads the build files to a specified IPFS endpoint, and then deploys the Subgraph to a specified Graph Node based on the manifest IPFS hash ``` -Конфигурация ноды будет зависеть от того, где развертывается субграф. +The node configuration will depend on where the Subgraph is being deployed. ### Subgraph Studio @@ -204,7 +204,7 @@ graph deploy graph deploy --node http://localhost:8020/ --ipfs http://localhost:5001 ``` -Как только Ваш субграф будет развернут, он будет проиндексирован Graph Node. Вы можете проверить его прогресс, сделав запрос к самому субграфу: +Once your Subgraph has been deployed, it will be indexed by Graph Node. You can check its progress by querying the Subgraph itself: ```graphql { @@ -228,11 +228,11 @@ graph deploy --node http://localhost:8020/ --ipfs http://localhost:5001 Date: Tue, 25 Feb 2025 17:19:41 -0500 Subject: [PATCH 0639/1789] New translations near.mdx (Swedish) --- .../src/pages/sv/subgraphs/cookbook/near.mdx | 74 +++++++++---------- 1 file changed, 37 insertions(+), 37 deletions(-) diff --git a/website/src/pages/sv/subgraphs/cookbook/near.mdx b/website/src/pages/sv/subgraphs/cookbook/near.mdx index 833a4b7c997d..a33c23a249c6 100644 --- a/website/src/pages/sv/subgraphs/cookbook/near.mdx +++ b/website/src/pages/sv/subgraphs/cookbook/near.mdx @@ -2,17 +2,17 @@ title: Bygger subgrafer på NEAR --- -This guide is an introduction to building subgraphs indexing smart contracts on the [NEAR blockchain](https://docs.near.org/). +This guide is an introduction to building Subgraphs indexing smart contracts on the [NEAR blockchain](https://docs.near.org/). ## Vad är NEAR? [NEAR](https://near.org/) is a smart contract platform for building decentralized applications. Visit the [official documentation](https://docs.near.org/concepts/basics/protocol) for more information. -## Vad är NEAR subgrafer? +## What are NEAR Subgraphs? -The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build subgraphs to index their smart contracts. +The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a Subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build Subgraphs to index their smart contracts. -Subgraphs are event-based, which means that they listen for and then process onchain events. There are currently two types of handlers supported for NEAR subgraphs: +Subgraphs are event-based, which means that they listen for and then process onchain events. There are currently two types of handlers supported for NEAR Subgraphs: - Blockhanterare: dessa körs på varje nytt block - Kvittohanterare: körs varje gång ett meddelande körs på ett angivet konto @@ -23,32 +23,32 @@ Subgraphs are event-based, which means that they listen for and then process onc ## Att bygga en NEAR Subgraf -`@graphprotocol/graph-cli` is a command-line tool for building and deploying subgraphs. +`@graphprotocol/graph-cli` is a command-line tool for building and deploying Subgraphs. -`@graphprotocol/graph-ts` is a library of subgraph-specific types. +`@graphprotocol/graph-ts` is a library of Subgraph-specific types. -NEAR subgraph development requires `graph-cli` above version `0.23.0`, and `graph-ts` above version `0.23.0`. +NEAR Subgraph development requires `graph-cli` above version `0.23.0`, and `graph-ts` above version `0.23.0`. -> Att bygga en NEAR subgraf är mycket lik att bygga en subgraf som indexerar Ethereum. +> Building a NEAR Subgraph is very similar to building a Subgraph that indexes Ethereum. -Det finns tre aspekter av subgraf definition: +There are three aspects of Subgraph definition: -**subgraph.yaml:** the subgraph manifest, defining the data sources of interest, and how they should be processed. NEAR is a new `kind` of data source. +**subgraph.yaml:** the Subgraph manifest, defining the data sources of interest, and how they should be processed. NEAR is a new `kind` of data source. -**schema.graphql:** a schema file that defines what data is stored for your subgraph, and how to query it via GraphQL. The requirements for NEAR subgraphs are covered by [the existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). +**schema.graphql:** a schema file that defines what data is stored for your Subgraph, and how to query it via GraphQL. The requirements for NEAR Subgraphs are covered by [the existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). **AssemblyScript Mappings:** [AssemblyScript code](/subgraphs/developing/creating/graph-ts/api/) that translates from the event data to the entities defined in your schema. NEAR support introduces NEAR-specific data types and new JSON parsing functionality. -Under subgrafutveckling finns det två nyckelkommandon: +During Subgraph development there are two key commands: ```bash $ graph codegen # generates types from the schema file identified in the manifest -$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the subgraph files in a /build folder +$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the Subgraph files in a /build folder ``` ### Definition av subgraf manifestet -The subgraph manifest (`subgraph.yaml`) identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for a NEAR subgraph: +The Subgraph manifest (`subgraph.yaml`) identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest for a NEAR Subgraph: ```yaml specVersion: 0.0.2 @@ -70,7 +70,7 @@ dataSources: file: ./src/mapping.ts # link to the file with the Assemblyscript mappings ``` -- NEAR subgraphs introduce a new `kind` of data source (`near`) +- NEAR Subgraphs introduce a new `kind` of data source (`near`) - The `network` should correspond to a network on the hosting Graph Node. On Subgraph Studio, NEAR's mainnet is `near-mainnet`, and NEAR's testnet is `near-testnet` - NEAR data sources introduce an optional `source.account` field, which is a human-readable ID corresponding to a [NEAR account](https://docs.near.org/concepts/protocol/account-model). This can be an account or a sub-account. - NEAR data sources introduce an alternative optional `source.accounts` field, which contains optional suffixes and prefixes. At least prefix or suffix must be specified, they will match the any account starting or ending with the list of values respectively. The example below would match: `[app|good].*[morning.near|morning.testnet]`. If only a list of prefixes or suffixes is necessary the other field can be omitted. @@ -92,7 +92,7 @@ NEAR datakällor stöder två typer av hanterare: ### Schema Definition -Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). +Schema definition describes the structure of the resulting Subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on Subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). ### AssemblyScript mappningar @@ -165,31 +165,31 @@ These types are passed to block & receipt handlers: - Block handlers will receive a `Block` - Receipt handlers will receive a `ReceiptWithOutcome` -Otherwise, the rest of the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/) is available to NEAR subgraph developers during mapping execution. +Otherwise, the rest of the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/) is available to NEAR Subgraph developers during mapping execution. This includes a new JSON parsing function - logs on NEAR are frequently emitted as stringified JSONs. A new `json.fromString(...)` function is available as part of the [JSON API](/subgraphs/developing/creating/graph-ts/api/#json-api) to allow developers to easily process these logs. ## Utplacera en NEAR Subgraf -Once you have a built subgraph, it is time to deploy it to Graph Node for indexing. NEAR subgraphs can be deployed to any Graph Node `>=v0.26.x` (this version has not yet been tagged & released). +Once you have a built Subgraph, it is time to deploy it to Graph Node for indexing. NEAR Subgraphs can be deployed to any Graph Node `>=v0.26.x` (this version has not yet been tagged & released). Subgraph Studio and the upgrade Indexer on The Graph Network currently supports indexing NEAR mainnet and testnet in beta, with the following network names: - `near-mainnet` - `near-testnet` -More information on creating and deploying subgraphs on Subgraph Studio can be found [here](/deploying/deploying-a-subgraph-to-studio/). +More information on creating and deploying Subgraphs on Subgraph Studio can be found [here](/deploying/deploying-a-subgraph-to-studio/). -As a quick primer - the first step is to "create" your subgraph - this only needs to be done once. On Subgraph Studio, this can be done from [your Dashboard](https://thegraph.com/studio/): "Create a subgraph". +As a quick primer - the first step is to "create" your Subgraph - this only needs to be done once. On Subgraph Studio, this can be done from [your Dashboard](https://thegraph.com/studio/): "Create a Subgraph". -Once your subgraph has been created, you can deploy your subgraph by using the `graph deploy` CLI command: +Once your Subgraph has been created, you can deploy your Subgraph by using the `graph deploy` CLI command: ```sh -$ graph create --node # creates a subgraph on a local Graph Node (on Subgraph Studio, this is done via the UI) -$ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # uploads the build files to a specified IPFS endpoint, and then deploys the subgraph to a specified Graph Node based on the manifest IPFS hash +$ graph create --node # creates a Subgraph on a local Graph Node (on Subgraph Studio, this is done via the UI) +$ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # uploads the build files to a specified IPFS endpoint, and then deploys the Subgraph to a specified Graph Node based on the manifest IPFS hash ``` -Nodkonfigurationen beror på var subgrafen distribueras. +The node configuration will depend on where the Subgraph is being deployed. ### Subgraf Studion @@ -204,7 +204,7 @@ graph deploy graph deploy --node http://localhost:8020/ --ipfs http://localhost:5001 ``` -När din subgraf har distribuerats kommer den att indexeras av Graph Node. Du kan kontrollera dess framsteg genom att fråga själva subgrafen: +Once your Subgraph has been deployed, it will be indexed by Graph Node. You can check its progress by querying the Subgraph itself: ```graphql { @@ -228,11 +228,11 @@ Vi kommer snart att ge mer information om hur du kör ovanstående komponenter. ## Fråga efter en NEAR subgraf -The GraphQL endpoint for NEAR subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. +The GraphQL endpoint for NEAR Subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. ## Exempel på subgrafer -Here are some example subgraphs for reference: +Here are some example Subgraphs for reference: [NEAR Blocks](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-blocks) @@ -242,13 +242,13 @@ Here are some example subgraphs for reference: ### Hur fungerar betan? -NEAR stödet är i beta, vilket innebär att det kan bli ändringar i API:t när vi fortsätter att arbeta med att förbättra integrationen. Skicka ett e-postmeddelande till near@thegraph.com så att vi kan hjälpa dig att bygga NEAR subgrafer och hålla dig uppdaterad om den senaste utvecklingen! +NEAR support is in beta, which means that there may be changes to the API as we continue to work on improving the integration. Please email near@thegraph.com so that we can support you in building NEAR Subgraphs, and keep you up to date on the latest developments! -### Kan en subgraf indexera både NEAR och EVM kedjor? +### Can a Subgraph index both NEAR and EVM chains? -Nej, en subgraf kan bara stödja datakällor från en kedja/nätverk. +No, a Subgraph can only support data sources from one chain/network. -### Kan subgrafer reagera på mer specifika triggers? +### Can Subgraphs react to more specific triggers? För närvarande stöds endast blockerings- och kvittoutlösare. Vi undersöker utlösare för funktionsanrop till ett specificerat konto. Vi är också intresserade av att stödja eventutlösare, när NEAR har inbyggt eventsupport. @@ -262,21 +262,21 @@ accounts: - mintbase1.near ``` -### Kan NEAR subgrafer göra visningsanrop till NEAR konton under mappningar? +### Can NEAR Subgraphs make view calls to NEAR accounts during mappings? Detta stöds inte. Vi utvärderar om denna funktionalitet krävs för indexering. -### Kan jag använda data källmallar i min NEAR subgraf? +### Can I use data source templates in my NEAR Subgraph? Detta stöds inte för närvarande. Vi utvärderar om denna funktionalitet krävs för indexering. -### Ethereum subgrafer stöder "väntande" och "nuvarande" versioner, hur kan jag distribuera en "väntande" version av en NEAR subgraf? +### Ethereum Subgraphs support "pending" and "current" versions, how can I deploy a "pending" version of a NEAR Subgraph? -Väntande funktionalitet stöds ännu inte för NEAR subgrafer. Under tiden kan du distribuera en ny version till en annan "namngiven" subgraf, och när den sedan synkroniseras med kedjehuvudet kan du distribuera om till din primära "namngivna" subgraf, som kommer att använda samma underliggande implementerings-ID, så huvudsubgrafen synkroniseras omedelbart. +Pending functionality is not yet supported for NEAR Subgraphs. In the interim, you can deploy a new version to a different "named" Subgraph, and then when that is synced with the chain head, you can redeploy to your primary "named" Subgraph, which will use the same underlying deployment ID, so the main Subgraph will be instantly synced. -### Min fråga har inte besvarats, var kan jag få mer hjälp med att bygga NEAR subgrafer? +### My question hasn't been answered, where can I get more help building NEAR Subgraphs? -If it is a general question about subgraph development, there is a lot more information in the rest of the [Developer documentation](/subgraphs/quick-start/). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. +If it is a general question about Subgraph development, there is a lot more information in the rest of the [Developer documentation](/subgraphs/quick-start/). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. ## Referenser From 28895f01143a21a066fa9bd30b2badd916d97d6a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:19:42 -0500 Subject: [PATCH 0640/1789] New translations near.mdx (Turkish) --- .../src/pages/tr/subgraphs/cookbook/near.mdx | 76 +++++++++---------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/website/src/pages/tr/subgraphs/cookbook/near.mdx b/website/src/pages/tr/subgraphs/cookbook/near.mdx index 42ecff83f4f1..1cc353f1847c 100644 --- a/website/src/pages/tr/subgraphs/cookbook/near.mdx +++ b/website/src/pages/tr/subgraphs/cookbook/near.mdx @@ -2,17 +2,17 @@ title: NEAR Üzerinde Subgraphlar Oluşturma --- -Bu rehber, [NEAR blokzinciri](https://docs.near.org/) üzerindeki akıllı sözleşmeleri endeksleyen subgraph'ler inşa etmeye giriş niteliğindedir. +This guide is an introduction to building Subgraphs indexing smart contracts on the [NEAR blockchain](https://docs.near.org/). ## NEAR Nedir? [NEAR](https://near.org/), merkezi olmayan uygulamalar geliştirmek için kullanılan bir akıllı sözleşme platformudur. Daha fazla bilgi için [resmi dokümantasyona](https://docs.near.org/concepts/basics/protocol) bakabilirsiniz. -## NEAR subgraphları nedir? +## What are NEAR Subgraphs? -The Graph, geliştiricilere blokzinciri olaylarını işleyip, sonuçtaki veriyi bir GraphQL API'ı (subgraph olarak da bilinir) aracılığıyla kolayca erişilebilir kılacak araçlar sunar. [Graph Düğümü](https://github.com/graphprotocol/graph-node) artık NEAR olaylarını işleyebiliyor, bu da NEAR geliştiricilerinin akıllı sözleşmelerini endekslemek için subgraph'ler oluşturabileceği anlamına gelir. +The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a Subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build Subgraphs to index their smart contracts. -Subgraph'ler olay tabanlıdır., yani zincir üzerindeki olayları dinler ve sonrasında işlerler. Şu anda NEAR subgraph'leri için desteklenen iki tür işleyici bulunmaktadır: +Subgraphs are event-based, which means that they listen for and then process onchain events. There are currently two types of handlers supported for NEAR Subgraphs: - Blok işleyicileri: Bunlar her yeni blokta çalışır - Makbuz işleyicileri: Belirli bir hesapta her mesaj yürütüldüğünde çalışır @@ -23,32 +23,32 @@ Subgraph'ler olay tabanlıdır., yani zincir üzerindeki olayları dinler ve son ## NEAR Subgraph'ı Oluşturma -`@graphprotocol/graph-cli`, subgraph'ler oluşturmak ve dağıtmak için kullanılan bir komut satırı aracıdır. +`@graphprotocol/graph-cli` is a command-line tool for building and deploying Subgraphs. -`@graphprotocol/graph-ts`, subgraph'e özgü türlerden oluşan bir kütüphanedir. +`@graphprotocol/graph-ts` is a library of Subgraph-specific types. -NEAR ağında subgraph geliştirmek, `graph-cli`'nin `0.23.0` üstü sürümünü ve `graph-ts`'nin `0.23.0` üstü sürümünü gerektirir. +NEAR Subgraph development requires `graph-cli` above version `0.23.0`, and `graph-ts` above version `0.23.0`. -> Bir NEAR subgraph'ı oluşturmak, Ethereum'u indeksleyen bir subgraph oluşturmakla çok benzerdir. +> Building a NEAR Subgraph is very similar to building a Subgraph that indexes Ethereum. -Subgraph tanımının üç yönü vardır: +There are three aspects of Subgraph definition: -**subgraph.yaml:** subgraph manifestosudur, ilgi duyulan veri kaynaklarını tanımlar ve bunların nasıl işleneceğini açıklar. NEAR, yeni bir `kind` (tür) veri kaynağıdır. +**subgraph.yaml:** the Subgraph manifest, defining the data sources of interest, and how they should be processed. NEAR is a new `kind` of data source. -**schema.graphql:** subgraph'iniz için hangi verilerin depolanacağını ve bunların GraphQL kullanılarak nasıl sorgulanacağını tanımlayan şema dosyasıdır. NEAR subgraph'leri için gereksinimler [mevcut dokümantasyon](/developing/creating-a-subgraph/#the-graphql-schema) tarafından kapsanmaktadır. +**schema.graphql:** a schema file that defines what data is stored for your Subgraph, and how to query it via GraphQL. The requirements for NEAR Subgraphs are covered by [the existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). **AssemblyScript Eşlemeleri:** Olay verisini, şemanızda tanımlanan varlıklara dönüştüren [AssemblyScript kodudur](/subgraphs/developing/creating/graph-ts/api/). NEAR desteği, NEAR'a özgü veri türleri ve yeni JSON ayrıştırma işlevi sunar. -Subgraph geliştirme sırasında iki anahtar komut vardır: +During Subgraph development there are two key commands: ```bash -$ graph codegen # manifest'de tanımlanan şema dosyasından tipleri üretir -$ graph build # AssemblyScript dosyalarından Web Assembly oluşturur ve tüm subgraph dosyalarını bir /build klasöründe hazırlar +$ graph codegen # generates types from the schema file identified in the manifest +$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the Subgraph files in a /build folder ``` ### Subgraph Manifest Tanımı -Subgraph manifestosu (`subgraph.yaml`), subgraph için veri kaynaklarını, ilgili tetikleyicileri ve bu tetikleyicilere yanıt olarak çalıştırılması gereken fonksiyonları tanımlar. Aşağıda bir NEAR subgraph'i için örnek bir subgraph manifestosu bulunmaktadır: +The Subgraph manifest (`subgraph.yaml`) identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest for a NEAR Subgraph: ```yaml specVersion: 0.0.2 @@ -70,7 +70,7 @@ dataSources: file: ./src/mapping.ts # Assemblyscript eşleştirmelerinin bulunduğu dosyaya bağlantı ``` -- NEAR subgraph'leri yeni bir veri kaynağı `kind`'ı (türü) sunar (`near`) +- NEAR Subgraphs introduce a new `kind` of data source (`near`) - `network`, subgraph'i sunan Graph Düğümü üzerindeki bir ağa karşılık gelmelidir. Subgraph Studio'da, NEAR'ın ana ağı `near-mainnet`, ve NEAR'ın test ağı `near-testnet`'tir - NEAR veri kaynakları, [NEAR hesabı](https://docs.near.org/concepts/protocol/account-model) ile ilişkili, insan tarafından okunabilir bir kimlik olan isteğe bağlı `source.account` alanını sunar. Bu, bir hesap veya alt hesap olabilir. - NEAR veri kaynakları, isteğe bağlı ek `source.accounts` alanını tanıtır. Bu alan isteğe bağlı sonekler ve önekler içerir. En azından bir önek veya sonek belirtilmelidir. Bu ekler ilgili listedeki değerlerle başlayan veya biten herhangi bir hesabı eşleştirirler. Aşağıdaki örnek şunlarla eşleşecektir: `[app|good].*[morning.near|morning.testnet]`. Sadece önekler veya sonekler listesi gerekiyorsa diğer alan atlanabilir. @@ -92,7 +92,7 @@ NEAR veri kaynakları iki tür işleyiciyi destekler: ### Şema Tanımı -Şema tanımı, ortaya çıkan subgraph veritabanının yapısını ve varlıklar arasındaki ilişkileri açıklar. Bu, orijinal veri kaynağından bağımsızdır. Subgraph şema tanımı hakkında daha fazla detay [burada](/developing/creating-a-subgraph/#the-graphql-schema) bulunabilir. +Schema definition describes the structure of the resulting Subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on Subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). ### AssemblyScript Eşlemeleri @@ -165,31 +165,31 @@ Bu türler blok & makbuz işleyicilerine aktarılır: - Blok işleyiciler bir `Block` alacaktır - Makbuz işleyiciler bir `ReceiptWithOutcome` alacaktır -Aksi takdirde, NEAR subgraph geliştiricileri eşleme yürütme sırasında [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/)'ının geri kalanını kullanabilir. +Otherwise, the rest of the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/) is available to NEAR Subgraph developers during mapping execution. Bu, yeni bir JSON ayrıştırma fonksiyonunu içerir - NEAR üzerindeki günlükler sıklıkla dizeleştirilmiş JSON olarak yayılır. Geliştiricilerin bu günlükleri kolayca işlemelerine olanak tanımak için [JSON API](/subgraphs/developing/creating/graph-ts/api/#json-api) kapsamında yeni bir `json.fromString(...)` fonksiyonu mevcuttur. ## NEAR Subgraph'ını Dağıtma -Bir subgraph'i oluşturduktan sonra sıradaki adım bu subgraph'i endeksleme için Graph Düğümü'ne dağıtmaktır. NEAR subgraph'leri, herhangi bir Graph Düğümü `>=v0.26.x` sürümüne dağıtılabilir (bu sürüm henüz etiketlenmemiş ve yayımlanmamıştır). +Once you have a built Subgraph, it is time to deploy it to Graph Node for indexing. NEAR Subgraphs can be deployed to any Graph Node `>=v0.26.x` (this version has not yet been tagged & released). The Graph Ağı'ndaki Subgraph Studio ve yükseltme Endeksleyicisi şu anda beta olarak NEAR ana ağı ve test ağını endekslemeyi, aşağıdaki ağ isimleriyle desteklemektedir: - `near-mainnet` - `near-testnet` -Subgraph Studio'da subgraph'ler oluşturma ve dağıtma hakkında daha fazla bilgi [burada](/deploying/deploying-a-subgraph-to-studio/) bulunabilir. +More information on creating and deploying Subgraphs on Subgraph Studio can be found [here](/deploying/deploying-a-subgraph-to-studio/). -Kısa bir ön bilgi olarak - ilk adım subgraph'inizi "oluşturmak"tır - bu sadece bir kez yapılması gereken bir işlemdir. Subgraph Studio'da, [Gösterge Paneliniz](https://thegraph.com/studio/)'deki "Bir subgraph oluştur" kısmında yapılabilir. +As a quick primer - the first step is to "create" your Subgraph - this only needs to be done once. On Subgraph Studio, this can be done from [your Dashboard](https://thegraph.com/studio/): "Create a Subgraph". -Subgraph oluşturulduktan sonra, `graph deploy` CLI komutunu kullanarak subgraph'inizi dağıtabilirsiniz: +Once your Subgraph has been created, you can deploy your Subgraph by using the `graph deploy` CLI command: ```sh -$ graph create --node # yerel bir Graph Düğümünde bir subgraph oluşturur (Subgraph Studio'da, bu işlem UI üzerinden yapılır) -$ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # manifesto IPFS hash'ine göre belirtilen bir Graph Düğümü'ne subgraph'i dağıtır ve yapım dosyalarını belirtilen IPFS uç noktasına yükler +$ graph create --node # creates a Subgraph on a local Graph Node (on Subgraph Studio, this is done via the UI) +$ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # uploads the build files to a specified IPFS endpoint, and then deploys the Subgraph to a specified Graph Node based on the manifest IPFS hash ``` -Düğüm yapılandırması, subgraph'ın nerede dağıtıldığına bağlı olacaktır. +The node configuration will depend on where the Subgraph is being deployed. ### Subgraph Stüdyosu @@ -204,7 +204,7 @@ graph deploy graph deploy --node http://localhost:8020/ --ipfs http://localhost:5001 ``` -Subgraph'ınız dağıtıldıktan sonra Graph Düğüme tarafından indekslenecektir. Subgraph'ın kendisini sorgulayarak ilerlemesini kontrol edebilirsiniz: +Once your Subgraph has been deployed, it will be indexed by Graph Node. You can check its progress by querying the Subgraph itself: ```graphql { @@ -228,11 +228,11 @@ Yukarıdaki bileşenlerin çalıştırılması hakkında yakında daha fazla bil ## NEAR Subgraph'ını Sorgulama -NEAR subgraph'leri için GraphQL uç noktası, mevcut API arayüzü ile şema tanımına göre belirlenir. Daha fazla bilgi için [GraphQL API dokümantasyonuna](/subgraphs/querying/graphql-api/) bakabilirsiniz. +The GraphQL endpoint for NEAR Subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. ## Örnek Subgraph'ler -Aşağıda bazı örnek subgraph'leri bulabilirsiniz: +Here are some example Subgraphs for reference: [NEAR Blokları](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-blocks) @@ -242,13 +242,13 @@ Aşağıda bazı örnek subgraph'leri bulabilirsiniz: ### Beta nasıl çalışır? -NEAR desteği beta aşamasındadır, bu da entegrasyonu geliştirmek için çalışmaya devam ederken API'de değişiklikler olabileceği anlamına gelir. NEAR subgraphları oluştururken size destek olabilmemiz ve en son gelişmelerden sizi haberdar edebilmemiz için lütfen near@thegraph.com adresine e-posta gönderin! +NEAR support is in beta, which means that there may be changes to the API as we continue to work on improving the integration. Please email near@thegraph.com so that we can support you in building NEAR Subgraphs, and keep you up to date on the latest developments! -### Bir subgraph hem NEAR hem de EVM zincirlerini indeksleyebilir mi? +### Can a Subgraph index both NEAR and EVM chains? -Hayır, bir subgraph yalnızca bir zincirden/ağdan veri kaynaklarını destekleyebilir. +No, a Subgraph can only support data sources from one chain/network. -### Subgraphlar daha spesifik tetikleyicilere tepki verebilir mi? +### Can Subgraphs react to more specific triggers? Şu anda yalnızca Blok ve Makbuz tetikleyicileri desteklenmektedir. Belirli bir hesaba yapılan fonksiyon çağrıları için tetikleyicileri araştırma aşamasındayız. NEAR yerel olay desteğine sahip oldu takdirde, olay tetikleyicilerini desteklemekle de ilgileneceğiz. @@ -262,21 +262,21 @@ accounts: - mintbase1.near ``` -### NEAR subgraphları eşleştirmeler sırasında NEAR hesaplarına görünüm çağrıları yapabilir mi? +### Can NEAR Subgraphs make view calls to NEAR accounts during mappings? Bu desteklenmemektedir. Bu fonksiyonelliğin indeksleme için gerekli olup olmadığını değerlendiriyoruz. -### NEAR subgraph'ımda veri kaynağı şablonları kullanabilir miyim? +### Can I use data source templates in my NEAR Subgraph? Bu şu anda desteklenmemektedir. Bu fonksiyonelliğin indeksleme için gerekli olup olmadığını değerlendiriyoruz. -### Ethereum subgraphları "beklemedeki" ve "mevcut" sürümleri destekler, bir NEAR subgraph'ının "beklemedeki" sürümünü nasıl dağıtabilirim? +### Ethereum Subgraphs support "pending" and "current" versions, how can I deploy a "pending" version of a NEAR Subgraph? -Bekleme fonksiyonelliği henüz NEAR subgraphları için desteklenmemektedir. Bu arada, farklı "adlandırılmış" bir subgraph'a yeni bir sürüm dağıtabilir ve daha sonra bu zincir başı ile senkronize edildiğinde, aynı temel dağıtım ID'sini kullanacak olan birincil "adlandırılmış" subgraph'ınıza yeniden dağıtabilirsiniz. Böylece ana subgraph anında senkronize olur. +Pending functionality is not yet supported for NEAR Subgraphs. In the interim, you can deploy a new version to a different "named" Subgraph, and then when that is synced with the chain head, you can redeploy to your primary "named" Subgraph, which will use the same underlying deployment ID, so the main Subgraph will be instantly synced. -### Sorum yanıtlanmadı, NEAR subgraphları oluşturma konusunda nereden daha fazla yardım alabilirim? +### My question hasn't been answered, where can I get more help building NEAR Subgraphs? -Eğer subgraph geliştirme ile ilgili genel bir soru ise, [Geliştirici dokümantasyonunun](/subgraphs/quick-start/) geri kalanında çok daha fazla bilgi bulunmaktadır. Eğer burada aradığınızı bulamazsanız lütfen [The Graph Protocol Discord](https://discord.gg/graphprotocol) sunucusuna katılın ve #near kanalında sorunuzu sorun. Veya near@thegraph.com adresine e-posta gönderin. +If it is a general question about Subgraph development, there is a lot more information in the rest of the [Developer documentation](/subgraphs/quick-start/). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. ## Referanslar From e60ab8b474c4adb1bef42fc5a854951915d43f99 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:19:43 -0500 Subject: [PATCH 0641/1789] New translations near.mdx (Ukrainian) --- .../src/pages/uk/subgraphs/cookbook/near.mdx | 74 +++++++++---------- 1 file changed, 37 insertions(+), 37 deletions(-) diff --git a/website/src/pages/uk/subgraphs/cookbook/near.mdx b/website/src/pages/uk/subgraphs/cookbook/near.mdx index a94bd9531fdb..fd9f55109391 100644 --- a/website/src/pages/uk/subgraphs/cookbook/near.mdx +++ b/website/src/pages/uk/subgraphs/cookbook/near.mdx @@ -2,17 +2,17 @@ title: Building Subgraphs on NEAR --- -This guide is an introduction to building subgraphs indexing smart contracts on the [NEAR blockchain](https://docs.near.org/). +This guide is an introduction to building Subgraphs indexing smart contracts on the [NEAR blockchain](https://docs.near.org/). ## What is NEAR? [NEAR](https://near.org/) is a smart contract platform for building decentralized applications. Visit the [official documentation](https://docs.near.org/concepts/basics/protocol) for more information. -## What are NEAR subgraphs? +## What are NEAR Subgraphs? -The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build subgraphs to index their smart contracts. +The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a Subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build Subgraphs to index their smart contracts. -Subgraphs are event-based, which means that they listen for and then process onchain events. There are currently two types of handlers supported for NEAR subgraphs: +Subgraphs are event-based, which means that they listen for and then process onchain events. There are currently two types of handlers supported for NEAR Subgraphs: - Block handlers: these are run on every new block - Receipt handlers: run every time a message is executed at a specified account @@ -23,32 +23,32 @@ Subgraphs are event-based, which means that they listen for and then process onc ## Building a NEAR Subgraph -`@graphprotocol/graph-cli` is a command-line tool for building and deploying subgraphs. +`@graphprotocol/graph-cli` is a command-line tool for building and deploying Subgraphs. -`@graphprotocol/graph-ts` is a library of subgraph-specific types. +`@graphprotocol/graph-ts` is a library of Subgraph-specific types. -NEAR subgraph development requires `graph-cli` above version `0.23.0`, and `graph-ts` above version `0.23.0`. +NEAR Subgraph development requires `graph-cli` above version `0.23.0`, and `graph-ts` above version `0.23.0`. -> Building a NEAR subgraph is very similar to building a subgraph that indexes Ethereum. +> Building a NEAR Subgraph is very similar to building a Subgraph that indexes Ethereum. -There are three aspects of subgraph definition: +There are three aspects of Subgraph definition: -**subgraph.yaml:** the subgraph manifest, defining the data sources of interest, and how they should be processed. NEAR is a new `kind` of data source. +**subgraph.yaml:** the Subgraph manifest, defining the data sources of interest, and how they should be processed. NEAR is a new `kind` of data source. -**schema.graphql:** a schema file that defines what data is stored for your subgraph, and how to query it via GraphQL. The requirements for NEAR subgraphs are covered by [the existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). +**schema.graphql:** a schema file that defines what data is stored for your Subgraph, and how to query it via GraphQL. The requirements for NEAR Subgraphs are covered by [the existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). **AssemblyScript Mappings:** [AssemblyScript code](/subgraphs/developing/creating/graph-ts/api/) that translates from the event data to the entities defined in your schema. NEAR support introduces NEAR-specific data types and new JSON parsing functionality. -During subgraph development there are two key commands: +During Subgraph development there are two key commands: ```bash $ graph codegen # generates types from the schema file identified in the manifest -$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the subgraph files in a /build folder +$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the Subgraph files in a /build folder ``` ### Визначення маніфесту підграфів -The subgraph manifest (`subgraph.yaml`) identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for a NEAR subgraph: +The Subgraph manifest (`subgraph.yaml`) identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest for a NEAR Subgraph: ```yaml specVersion: 0.0.2 @@ -70,7 +70,7 @@ dataSources: file: ./src/mapping.ts # link to the file with the Assemblyscript mappings ``` -- NEAR subgraphs introduce a new `kind` of data source (`near`) +- NEAR Subgraphs introduce a new `kind` of data source (`near`) - The `network` should correspond to a network on the hosting Graph Node. On Subgraph Studio, NEAR's mainnet is `near-mainnet`, and NEAR's testnet is `near-testnet` - NEAR data sources introduce an optional `source.account` field, which is a human-readable ID corresponding to a [NEAR account](https://docs.near.org/concepts/protocol/account-model). This can be an account or a sub-account. - NEAR data sources introduce an alternative optional `source.accounts` field, which contains optional suffixes and prefixes. At least prefix or suffix must be specified, they will match the any account starting or ending with the list of values respectively. The example below would match: `[app|good].*[morning.near|morning.testnet]`. If only a list of prefixes or suffixes is necessary the other field can be omitted. @@ -92,7 +92,7 @@ NEAR data sources support two types of handlers: ### Визначення схеми -Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). +Schema definition describes the structure of the resulting Subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on Subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). ### AssemblyScript Mappings @@ -165,31 +165,31 @@ These types are passed to block & receipt handlers: - Block handlers will receive a `Block` - Receipt handlers will receive a `ReceiptWithOutcome` -Otherwise, the rest of the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/) is available to NEAR subgraph developers during mapping execution. +Otherwise, the rest of the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/) is available to NEAR Subgraph developers during mapping execution. This includes a new JSON parsing function - logs on NEAR are frequently emitted as stringified JSONs. A new `json.fromString(...)` function is available as part of the [JSON API](/subgraphs/developing/creating/graph-ts/api/#json-api) to allow developers to easily process these logs. ## Deploying a NEAR Subgraph -Once you have a built subgraph, it is time to deploy it to Graph Node for indexing. NEAR subgraphs can be deployed to any Graph Node `>=v0.26.x` (this version has not yet been tagged & released). +Once you have a built Subgraph, it is time to deploy it to Graph Node for indexing. NEAR Subgraphs can be deployed to any Graph Node `>=v0.26.x` (this version has not yet been tagged & released). Subgraph Studio and the upgrade Indexer on The Graph Network currently supports indexing NEAR mainnet and testnet in beta, with the following network names: - `near-mainnet` - `near-testnet` -More information on creating and deploying subgraphs on Subgraph Studio can be found [here](/deploying/deploying-a-subgraph-to-studio/). +More information on creating and deploying Subgraphs on Subgraph Studio can be found [here](/deploying/deploying-a-subgraph-to-studio/). -As a quick primer - the first step is to "create" your subgraph - this only needs to be done once. On Subgraph Studio, this can be done from [your Dashboard](https://thegraph.com/studio/): "Create a subgraph". +As a quick primer - the first step is to "create" your Subgraph - this only needs to be done once. On Subgraph Studio, this can be done from [your Dashboard](https://thegraph.com/studio/): "Create a Subgraph". -Once your subgraph has been created, you can deploy your subgraph by using the `graph deploy` CLI command: +Once your Subgraph has been created, you can deploy your Subgraph by using the `graph deploy` CLI command: ```sh -$ graph create --node # creates a subgraph on a local Graph Node (on Subgraph Studio, this is done via the UI) -$ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # uploads the build files to a specified IPFS endpoint, and then deploys the subgraph to a specified Graph Node based on the manifest IPFS hash +$ graph create --node # creates a Subgraph on a local Graph Node (on Subgraph Studio, this is done via the UI) +$ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # uploads the build files to a specified IPFS endpoint, and then deploys the Subgraph to a specified Graph Node based on the manifest IPFS hash ``` -The node configuration will depend on where the subgraph is being deployed. +The node configuration will depend on where the Subgraph is being deployed. ### Субграф Студія @@ -204,7 +204,7 @@ graph deploy graph deploy --node http://localhost:8020/ --ipfs http://localhost:5001 ``` -Once your subgraph has been deployed, it will be indexed by Graph Node. You can check its progress by querying the subgraph itself: +Once your Subgraph has been deployed, it will be indexed by Graph Node. You can check its progress by querying the Subgraph itself: ```graphql { @@ -228,11 +228,11 @@ We will provide more information on running the above components soon. ## Querying a NEAR Subgraph -The GraphQL endpoint for NEAR subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. +The GraphQL endpoint for NEAR Subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. ## Приклади підграфів -Here are some example subgraphs for reference: +Here are some example Subgraphs for reference: [NEAR Blocks](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-blocks) @@ -242,13 +242,13 @@ Here are some example subgraphs for reference: ### How does the beta work? -NEAR support is in beta, which means that there may be changes to the API as we continue to work on improving the integration. Please email near@thegraph.com so that we can support you in building NEAR subgraphs, and keep you up to date on the latest developments! +NEAR support is in beta, which means that there may be changes to the API as we continue to work on improving the integration. Please email near@thegraph.com so that we can support you in building NEAR Subgraphs, and keep you up to date on the latest developments! -### Can a subgraph index both NEAR and EVM chains? +### Can a Subgraph index both NEAR and EVM chains? -No, a subgraph can only support data sources from one chain/network. +No, a Subgraph can only support data sources from one chain/network. -### Can subgraphs react to more specific triggers? +### Can Subgraphs react to more specific triggers? Currently, only Block and Receipt triggers are supported. We are investigating triggers for function calls to a specified account. We are also interested in supporting event triggers, once NEAR has native event support. @@ -262,21 +262,21 @@ accounts: - mintbase1.near ``` -### Can NEAR subgraphs make view calls to NEAR accounts during mappings? +### Can NEAR Subgraphs make view calls to NEAR accounts during mappings? This is not supported. We are evaluating whether this functionality is required for indexing. -### Can I use data source templates in my NEAR subgraph? +### Can I use data source templates in my NEAR Subgraph? This is not currently supported. We are evaluating whether this functionality is required for indexing. -### Ethereum subgraphs support "pending" and "current" versions, how can I deploy a "pending" version of a NEAR subgraph? +### Ethereum Subgraphs support "pending" and "current" versions, how can I deploy a "pending" version of a NEAR Subgraph? -Pending functionality is not yet supported for NEAR subgraphs. In the interim, you can deploy a new version to a different "named" subgraph, and then when that is synced with the chain head, you can redeploy to your primary "named" subgraph, which will use the same underlying deployment ID, so the main subgraph will be instantly synced. +Pending functionality is not yet supported for NEAR Subgraphs. In the interim, you can deploy a new version to a different "named" Subgraph, and then when that is synced with the chain head, you can redeploy to your primary "named" Subgraph, which will use the same underlying deployment ID, so the main Subgraph will be instantly synced. -### My question hasn't been answered, where can I get more help building NEAR subgraphs? +### My question hasn't been answered, where can I get more help building NEAR Subgraphs? -If it is a general question about subgraph development, there is a lot more information in the rest of the [Developer documentation](/subgraphs/quick-start/). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. +If it is a general question about Subgraph development, there is a lot more information in the rest of the [Developer documentation](/subgraphs/quick-start/). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. ## References From 10484707bed2fbcdccab10451bc5519b142b0b04 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:19:44 -0500 Subject: [PATCH 0642/1789] New translations near.mdx (Chinese Simplified) --- .../src/pages/zh/subgraphs/cookbook/near.mdx | 76 +++++++++---------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/website/src/pages/zh/subgraphs/cookbook/near.mdx b/website/src/pages/zh/subgraphs/cookbook/near.mdx index 6bac46becff8..5a10f6407abe 100644 --- a/website/src/pages/zh/subgraphs/cookbook/near.mdx +++ b/website/src/pages/zh/subgraphs/cookbook/near.mdx @@ -2,17 +2,17 @@ title: 在 NEAR 上构建子图 --- -This guide is an introduction to building subgraphs indexing smart contracts on the [NEAR blockchain](https://docs.near.org/). +This guide is an introduction to building Subgraphs indexing smart contracts on the [NEAR blockchain](https://docs.near.org/). ## What is NEAR? [NEAR](https://near.org/) is a smart contract platform for building decentralized applications. Visit the [official documentation](https://docs.near.org/concepts/basics/protocol) for more information. -## What are NEAR subgraphs? +## What are NEAR Subgraphs? -The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build subgraphs to index their smart contracts. +The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a Subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build Subgraphs to index their smart contracts. -Subgraphs are event-based, which means that they listen for and then process onchain events. There are currently two types of handlers supported for NEAR subgraphs: +Subgraphs are event-based, which means that they listen for and then process onchain events. There are currently two types of handlers supported for NEAR Subgraphs: - Block handlers: these are run on every new block - Receipt handlers: run every time a message is executed at a specified account @@ -23,32 +23,32 @@ Subgraphs are event-based, which means that they listen for and then process onc ## 构建 NEAR 子图 -`@graphprotocol/graph-cli` is a command-line tool for building and deploying subgraphs. +`@graphprotocol/graph-cli` is a command-line tool for building and deploying Subgraphs. -`@graphprotocol/graph-ts` is a library of subgraph-specific types. +`@graphprotocol/graph-ts` is a library of Subgraph-specific types. -NEAR subgraph development requires `graph-cli` above version `0.23.0`, and `graph-ts` above version `0.23.0`. +NEAR Subgraph development requires `graph-cli` above version `0.23.0`, and `graph-ts` above version `0.23.0`. -> 构建 NEAR 子图与构建索引以太坊的子图非常相似。 +> Building a NEAR Subgraph is very similar to building a Subgraph that indexes Ethereum. -子图定义包括三个方面: +There are three aspects of Subgraph definition: -**subgraph.yaml:** the subgraph manifest, defining the data sources of interest, and how they should be processed. NEAR is a new `kind` of data source. +**subgraph.yaml:** the Subgraph manifest, defining the data sources of interest, and how they should be processed. NEAR is a new `kind` of data source. -**schema.graphql:** a schema file that defines what data is stored for your subgraph, and how to query it via GraphQL. The requirements for NEAR subgraphs are covered by [the existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). +**schema.graphql:** a schema file that defines what data is stored for your Subgraph, and how to query it via GraphQL. The requirements for NEAR Subgraphs are covered by [the existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). **AssemblyScript Mappings:** [AssemblyScript code](/subgraphs/developing/creating/graph-ts/api/) that translates from the event data to the entities defined in your schema. NEAR support introduces NEAR-specific data types and new JSON parsing functionality. -在子图开发过程中,有两个关键命令: +During Subgraph development there are two key commands: ```bash -$ graph codegen # 从清单中标识的模式文件生成类型 -$ graph build # 从 AssemblyScript 文件生成 Web Assembly,并在 /build 文件夹中准备所有子图文件 +$ graph codegen # generates types from the schema file identified in the manifest +$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the Subgraph files in a /build folder ``` ### 子图清单定义 -The subgraph manifest (`subgraph.yaml`) identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for a NEAR subgraph: +The Subgraph manifest (`subgraph.yaml`) identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest for a NEAR Subgraph: ```yaml specVersion: 0.0.2 @@ -70,7 +70,7 @@ dataSources: file: ./src/mapping.ts # link to the file with the Assemblyscript mappings ``` -- NEAR subgraphs introduce a new `kind` of data source (`near`) +- NEAR Subgraphs introduce a new `kind` of data source (`near`) - The `network` should correspond to a network on the hosting Graph Node. On Subgraph Studio, NEAR's mainnet is `near-mainnet`, and NEAR's testnet is `near-testnet` - NEAR data sources introduce an optional `source.account` field, which is a human-readable ID corresponding to a [NEAR account](https://docs.near.org/concepts/protocol/account-model). This can be an account or a sub-account. - NEAR data sources introduce an alternative optional `source.accounts` field, which contains optional suffixes and prefixes. At least prefix or suffix must be specified, they will match the any account starting or ending with the list of values respectively. The example below would match: `[app|good].*[morning.near|morning.testnet]`. If only a list of prefixes or suffixes is necessary the other field can be omitted. @@ -92,7 +92,7 @@ NEAR data sources support two types of handlers: ### 模式定义 -Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). +Schema definition describes the structure of the resulting Subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on Subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). ### AssemblyScript 映射 @@ -165,31 +165,31 @@ These types are passed to block & receipt handlers: - Block handlers will receive a `Block` - Receipt handlers will receive a `ReceiptWithOutcome` -Otherwise, the rest of the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/) is available to NEAR subgraph developers during mapping execution. +Otherwise, the rest of the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/) is available to NEAR Subgraph developers during mapping execution. This includes a new JSON parsing function - logs on NEAR are frequently emitted as stringified JSONs. A new `json.fromString(...)` function is available as part of the [JSON API](/subgraphs/developing/creating/graph-ts/api/#json-api) to allow developers to easily process these logs. ## 部署 NEAR 子图 -Once you have a built subgraph, it is time to deploy it to Graph Node for indexing. NEAR subgraphs can be deployed to any Graph Node `>=v0.26.x` (this version has not yet been tagged & released). +Once you have a built Subgraph, it is time to deploy it to Graph Node for indexing. NEAR Subgraphs can be deployed to any Graph Node `>=v0.26.x` (this version has not yet been tagged & released). Subgraph Studio and the upgrade Indexer on The Graph Network currently supports indexing NEAR mainnet and testnet in beta, with the following network names: - `near-mainnet` - `near-testnet` -More information on creating and deploying subgraphs on Subgraph Studio can be found [here](/deploying/deploying-a-subgraph-to-studio/). +More information on creating and deploying Subgraphs on Subgraph Studio can be found [here](/deploying/deploying-a-subgraph-to-studio/). -As a quick primer - the first step is to "create" your subgraph - this only needs to be done once. On Subgraph Studio, this can be done from [your Dashboard](https://thegraph.com/studio/): "Create a subgraph". +As a quick primer - the first step is to "create" your Subgraph - this only needs to be done once. On Subgraph Studio, this can be done from [your Dashboard](https://thegraph.com/studio/): "Create a Subgraph". -Once your subgraph has been created, you can deploy your subgraph by using the `graph deploy` CLI command: +Once your Subgraph has been created, you can deploy your Subgraph by using the `graph deploy` CLI command: ```sh -$ graph create --node # creates a subgraph on a local Graph Node (on Subgraph Studio, this is done via the UI) -$ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # uploads the build files to a specified IPFS endpoint, and then deploys the subgraph to a specified Graph Node based on the manifest IPFS hash +$ graph create --node # creates a Subgraph on a local Graph Node (on Subgraph Studio, this is done via the UI) +$ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # uploads the build files to a specified IPFS endpoint, and then deploys the Subgraph to a specified Graph Node based on the manifest IPFS hash ``` -节点配置将取决于子图的部署位置。 +The node configuration will depend on where the Subgraph is being deployed. ### 子图工作室 @@ -204,7 +204,7 @@ graph deploy graph deploy --node http://localhost:8020/ --ipfs http://localhost:5001 ``` -部署子图后,它将由 Graph节点索引。 您可以通过查询子图本身来检查其进度: +Once your Subgraph has been deployed, it will be indexed by Graph Node. You can check its progress by querying the Subgraph itself: ```graphql { @@ -228,11 +228,11 @@ graph deploy --node http://localhost:8020/ --ipfs http://localhost:5001 Date: Tue, 25 Feb 2025 17:19:45 -0500 Subject: [PATCH 0643/1789] New translations near.mdx (Urdu (Pakistan)) --- .../src/pages/ur/subgraphs/cookbook/near.mdx | 76 +++++++++---------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/website/src/pages/ur/subgraphs/cookbook/near.mdx b/website/src/pages/ur/subgraphs/cookbook/near.mdx index 3ff637ebf556..f505b8c06283 100644 --- a/website/src/pages/ur/subgraphs/cookbook/near.mdx +++ b/website/src/pages/ur/subgraphs/cookbook/near.mdx @@ -2,17 +2,17 @@ title: سب گرافس کو NEAR پر بنانا --- -This guide is an introduction to building subgraphs indexing smart contracts on the [NEAR blockchain](https://docs.near.org/). +This guide is an introduction to building Subgraphs indexing smart contracts on the [NEAR blockchain](https://docs.near.org/). ## NEAR کیا ہے؟ [NEAR](https://near.org/) is a smart contract platform for building decentralized applications. Visit the [official documentation](https://docs.near.org/concepts/basics/protocol) for more information. -## NEAR سب گراف کیا ہیں؟ +## What are NEAR Subgraphs? -The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build subgraphs to index their smart contracts. +The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a Subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build Subgraphs to index their smart contracts. -Subgraphs are event-based, which means that they listen for and then process onchain events. There are currently two types of handlers supported for NEAR subgraphs: +Subgraphs are event-based, which means that they listen for and then process onchain events. There are currently two types of handlers supported for NEAR Subgraphs: - بلاک ہینڈلرز: یہ ہر نۓ بلاک پر چلتے ہیں - ریسیپٹ ہینڈلرز: ہر بار جب کسی مخصوص اکاؤنٹ پر کوئی پیغام عمل میں آۓ تو چلتا ہے @@ -23,32 +23,32 @@ Subgraphs are event-based, which means that they listen for and then process onc ## NEAR سب گراف بنانا -`@graphprotocol/graph-cli` is a command-line tool for building and deploying subgraphs. +`@graphprotocol/graph-cli` is a command-line tool for building and deploying Subgraphs. -`@graphprotocol/graph-ts` is a library of subgraph-specific types. +`@graphprotocol/graph-ts` is a library of Subgraph-specific types. -NEAR subgraph development requires `graph-cli` above version `0.23.0`, and `graph-ts` above version `0.23.0`. +NEAR Subgraph development requires `graph-cli` above version `0.23.0`, and `graph-ts` above version `0.23.0`. -> NEAR سب گراف کی تعمیر ایک سب گراف بنانے کے مترادف ہے جو ایتھریم کو انڈیکس کرتا ہے. +> Building a NEAR Subgraph is very similar to building a Subgraph that indexes Ethereum. -سب گراف کی تعریف کے تین پہلو ہیں: +There are three aspects of Subgraph definition: -**subgraph.yaml:** the subgraph manifest, defining the data sources of interest, and how they should be processed. NEAR is a new `kind` of data source. +**subgraph.yaml:** the Subgraph manifest, defining the data sources of interest, and how they should be processed. NEAR is a new `kind` of data source. -**schema.graphql:** a schema file that defines what data is stored for your subgraph, and how to query it via GraphQL. The requirements for NEAR subgraphs are covered by [the existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). +**schema.graphql:** a schema file that defines what data is stored for your Subgraph, and how to query it via GraphQL. The requirements for NEAR Subgraphs are covered by [the existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). **AssemblyScript Mappings:** [AssemblyScript code](/subgraphs/developing/creating/graph-ts/api/) that translates from the event data to the entities defined in your schema. NEAR support introduces NEAR-specific data types and new JSON parsing functionality. -سب گراف کی ترقی کے دوران دو اہم کمانڈز ہیں: +During Subgraph development there are two key commands: ```bash -$graph codegen # ظاہر میں شناخت کردہ اسکیما فائل سے اقسام تیار کرتا ہے۔ -$graph build # اسمبلی سکرپٹ فائلوں سے ویب اسمبلی تیار کرتا ہے، اور تمام ذیلی گراف فائلوں کو /build فولڈر میں تیار کرتا ہے۔ +$ graph codegen # generates types from the schema file identified in the manifest +$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the Subgraph files in a /build folder ``` ### سب گراف مینی فیسٹ کی تعریف -The subgraph manifest (`subgraph.yaml`) identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for a NEAR subgraph: +The Subgraph manifest (`subgraph.yaml`) identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest for a NEAR Subgraph: ```yaml specVersion: 0.0.2 @@ -70,7 +70,7 @@ dataSources: file: ./src/mapping.ts # link to the file with the Assemblyscript mappings ``` -- NEAR subgraphs introduce a new `kind` of data source (`near`) +- NEAR Subgraphs introduce a new `kind` of data source (`near`) - The `network` should correspond to a network on the hosting Graph Node. On Subgraph Studio, NEAR's mainnet is `near-mainnet`, and NEAR's testnet is `near-testnet` - NEAR data sources introduce an optional `source.account` field, which is a human-readable ID corresponding to a [NEAR account](https://docs.near.org/concepts/protocol/account-model). This can be an account or a sub-account. - NEAR data sources introduce an alternative optional `source.accounts` field, which contains optional suffixes and prefixes. At least prefix or suffix must be specified, they will match the any account starting or ending with the list of values respectively. The example below would match: `[app|good].*[morning.near|morning.testnet]`. If only a list of prefixes or suffixes is necessary the other field can be omitted. @@ -92,7 +92,7 @@ accounts: ### اسکیما کی تعریف -Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). +Schema definition describes the structure of the resulting Subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on Subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). ### اسمبلی اسکرپٹ سب میپنک @@ -165,31 +165,31 @@ These types are passed to block & receipt handlers: - Block handlers will receive a `Block` - Receipt handlers will receive a `ReceiptWithOutcome` -Otherwise, the rest of the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/) is available to NEAR subgraph developers during mapping execution. +Otherwise, the rest of the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/) is available to NEAR Subgraph developers during mapping execution. This includes a new JSON parsing function - logs on NEAR are frequently emitted as stringified JSONs. A new `json.fromString(...)` function is available as part of the [JSON API](/subgraphs/developing/creating/graph-ts/api/#json-api) to allow developers to easily process these logs. ## NEAR سب گراف کی تعیناتی -Once you have a built subgraph, it is time to deploy it to Graph Node for indexing. NEAR subgraphs can be deployed to any Graph Node `>=v0.26.x` (this version has not yet been tagged & released). +Once you have a built Subgraph, it is time to deploy it to Graph Node for indexing. NEAR Subgraphs can be deployed to any Graph Node `>=v0.26.x` (this version has not yet been tagged & released). Subgraph Studio and the upgrade Indexer on The Graph Network currently supports indexing NEAR mainnet and testnet in beta, with the following network names: - `near-mainnet` - `near-testnet` -More information on creating and deploying subgraphs on Subgraph Studio can be found [here](/deploying/deploying-a-subgraph-to-studio/). +More information on creating and deploying Subgraphs on Subgraph Studio can be found [here](/deploying/deploying-a-subgraph-to-studio/). -As a quick primer - the first step is to "create" your subgraph - this only needs to be done once. On Subgraph Studio, this can be done from [your Dashboard](https://thegraph.com/studio/): "Create a subgraph". +As a quick primer - the first step is to "create" your Subgraph - this only needs to be done once. On Subgraph Studio, this can be done from [your Dashboard](https://thegraph.com/studio/): "Create a Subgraph". -Once your subgraph has been created, you can deploy your subgraph by using the `graph deploy` CLI command: +Once your Subgraph has been created, you can deploy your Subgraph by using the `graph deploy` CLI command: ```sh -$ graph create --node # creates a subgraph on a local Graph Node (on Subgraph Studio, this is done via the UI) -$ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # uploads the build files to a specified IPFS endpoint, and then deploys the subgraph to a specified Graph Node based on the manifest IPFS hash +$ graph create --node # creates a Subgraph on a local Graph Node (on Subgraph Studio, this is done via the UI) +$ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # uploads the build files to a specified IPFS endpoint, and then deploys the Subgraph to a specified Graph Node based on the manifest IPFS hash ``` -نوڈ کنفیگریشن کا انحصار اس بات پر ہوگا کہ سب گراف کہاں تعینات کیا جا رہا ہے. +The node configuration will depend on where the Subgraph is being deployed. ### سب گراف سٹوڈیو @@ -204,7 +204,7 @@ graph deploy graph deploy --node http://localhost:8020/ --ipfs http://localhost:5001 ``` -ایک دفعہ آپ کا سب گراف تعینات ہو جاۓ، گراف نوڈ اسے انڈیکس کرے گا. آپ سب گراف سے ہی کیوری کرکے اس کی پیشرفت چیک کرسکتے ہیں: +Once your Subgraph has been deployed, it will be indexed by Graph Node. You can check its progress by querying the Subgraph itself: ```graphql { @@ -228,11 +228,11 @@ graph deploy --node http://localhost:8020/ --ipfs http://localhost:5001 Date: Tue, 25 Feb 2025 17:19:47 -0500 Subject: [PATCH 0644/1789] New translations near.mdx (Vietnamese) --- .../src/pages/vi/subgraphs/cookbook/near.mdx | 74 +++++++++---------- 1 file changed, 37 insertions(+), 37 deletions(-) diff --git a/website/src/pages/vi/subgraphs/cookbook/near.mdx b/website/src/pages/vi/subgraphs/cookbook/near.mdx index 6060eb27e761..698a0ac3486c 100644 --- a/website/src/pages/vi/subgraphs/cookbook/near.mdx +++ b/website/src/pages/vi/subgraphs/cookbook/near.mdx @@ -2,17 +2,17 @@ title: Building Subgraphs on NEAR --- -This guide is an introduction to building subgraphs indexing smart contracts on the [NEAR blockchain](https://docs.near.org/). +This guide is an introduction to building Subgraphs indexing smart contracts on the [NEAR blockchain](https://docs.near.org/). ## What is NEAR? [NEAR](https://near.org/) is a smart contract platform for building decentralized applications. Visit the [official documentation](https://docs.near.org/concepts/basics/protocol) for more information. -## What are NEAR subgraphs? +## What are NEAR Subgraphs? -The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build subgraphs to index their smart contracts. +The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a Subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build Subgraphs to index their smart contracts. -Subgraphs are event-based, which means that they listen for and then process onchain events. There are currently two types of handlers supported for NEAR subgraphs: +Subgraphs are event-based, which means that they listen for and then process onchain events. There are currently two types of handlers supported for NEAR Subgraphs: - Block handlers: these are run on every new block - Receipt handlers: run every time a message is executed at a specified account @@ -23,32 +23,32 @@ Subgraphs are event-based, which means that they listen for and then process onc ## Building a NEAR Subgraph -`@graphprotocol/graph-cli` is a command-line tool for building and deploying subgraphs. +`@graphprotocol/graph-cli` is a command-line tool for building and deploying Subgraphs. -`@graphprotocol/graph-ts` is a library of subgraph-specific types. +`@graphprotocol/graph-ts` is a library of Subgraph-specific types. -NEAR subgraph development requires `graph-cli` above version `0.23.0`, and `graph-ts` above version `0.23.0`. +NEAR Subgraph development requires `graph-cli` above version `0.23.0`, and `graph-ts` above version `0.23.0`. -> Building a NEAR subgraph is very similar to building a subgraph that indexes Ethereum. +> Building a NEAR Subgraph is very similar to building a Subgraph that indexes Ethereum. -There are three aspects of subgraph definition: +There are three aspects of Subgraph definition: -**subgraph.yaml:** the subgraph manifest, defining the data sources of interest, and how they should be processed. NEAR is a new `kind` of data source. +**subgraph.yaml:** the Subgraph manifest, defining the data sources of interest, and how they should be processed. NEAR is a new `kind` of data source. -**schema.graphql:** a schema file that defines what data is stored for your subgraph, and how to query it via GraphQL. The requirements for NEAR subgraphs are covered by [the existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). +**schema.graphql:** a schema file that defines what data is stored for your Subgraph, and how to query it via GraphQL. The requirements for NEAR Subgraphs are covered by [the existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). **AssemblyScript Mappings:** [AssemblyScript code](/subgraphs/developing/creating/graph-ts/api/) that translates from the event data to the entities defined in your schema. NEAR support introduces NEAR-specific data types and new JSON parsing functionality. -During subgraph development there are two key commands: +During Subgraph development there are two key commands: ```bash $ graph codegen # generates types from the schema file identified in the manifest -$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the subgraph files in a /build folder +$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the Subgraph files in a /build folder ``` ### Subgraph Manifest Definition -The subgraph manifest (`subgraph.yaml`) identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for a NEAR subgraph: +The Subgraph manifest (`subgraph.yaml`) identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest for a NEAR Subgraph: ```yaml specVersion: 0.0.2 @@ -70,7 +70,7 @@ dataSources: file: ./src/mapping.ts # link to the file with the Assemblyscript mappings ``` -- NEAR subgraphs introduce a new `kind` of data source (`near`) +- NEAR Subgraphs introduce a new `kind` of data source (`near`) - The `network` should correspond to a network on the hosting Graph Node. On Subgraph Studio, NEAR's mainnet is `near-mainnet`, and NEAR's testnet is `near-testnet` - NEAR data sources introduce an optional `source.account` field, which is a human-readable ID corresponding to a [NEAR account](https://docs.near.org/concepts/protocol/account-model). This can be an account or a sub-account. - NEAR data sources introduce an alternative optional `source.accounts` field, which contains optional suffixes and prefixes. At least prefix or suffix must be specified, they will match the any account starting or ending with the list of values respectively. The example below would match: `[app|good].*[morning.near|morning.testnet]`. If only a list of prefixes or suffixes is necessary the other field can be omitted. @@ -92,7 +92,7 @@ NEAR data sources support two types of handlers: ### Schema Definition -Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). +Schema definition describes the structure of the resulting Subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on Subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). ### AssemblyScript Mappings @@ -165,31 +165,31 @@ These types are passed to block & receipt handlers: - Block handlers will receive a `Block` - Receipt handlers will receive a `ReceiptWithOutcome` -Otherwise, the rest of the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/) is available to NEAR subgraph developers during mapping execution. +Otherwise, the rest of the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/) is available to NEAR Subgraph developers during mapping execution. This includes a new JSON parsing function - logs on NEAR are frequently emitted as stringified JSONs. A new `json.fromString(...)` function is available as part of the [JSON API](/subgraphs/developing/creating/graph-ts/api/#json-api) to allow developers to easily process these logs. ## Deploying a NEAR Subgraph -Once you have a built subgraph, it is time to deploy it to Graph Node for indexing. NEAR subgraphs can be deployed to any Graph Node `>=v0.26.x` (this version has not yet been tagged & released). +Once you have a built Subgraph, it is time to deploy it to Graph Node for indexing. NEAR Subgraphs can be deployed to any Graph Node `>=v0.26.x` (this version has not yet been tagged & released). Subgraph Studio and the upgrade Indexer on The Graph Network currently supports indexing NEAR mainnet and testnet in beta, with the following network names: - `near-mainnet` - `near-testnet` -More information on creating and deploying subgraphs on Subgraph Studio can be found [here](/deploying/deploying-a-subgraph-to-studio/). +More information on creating and deploying Subgraphs on Subgraph Studio can be found [here](/deploying/deploying-a-subgraph-to-studio/). -As a quick primer - the first step is to "create" your subgraph - this only needs to be done once. On Subgraph Studio, this can be done from [your Dashboard](https://thegraph.com/studio/): "Create a subgraph". +As a quick primer - the first step is to "create" your Subgraph - this only needs to be done once. On Subgraph Studio, this can be done from [your Dashboard](https://thegraph.com/studio/): "Create a Subgraph". -Once your subgraph has been created, you can deploy your subgraph by using the `graph deploy` CLI command: +Once your Subgraph has been created, you can deploy your Subgraph by using the `graph deploy` CLI command: ```sh -$ graph create --node # creates a subgraph on a local Graph Node (on Subgraph Studio, this is done via the UI) -$ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # uploads the build files to a specified IPFS endpoint, and then deploys the subgraph to a specified Graph Node based on the manifest IPFS hash +$ graph create --node # creates a Subgraph on a local Graph Node (on Subgraph Studio, this is done via the UI) +$ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # uploads the build files to a specified IPFS endpoint, and then deploys the Subgraph to a specified Graph Node based on the manifest IPFS hash ``` -The node configuration will depend on where the subgraph is being deployed. +The node configuration will depend on where the Subgraph is being deployed. ### Subgraph Studio @@ -204,7 +204,7 @@ graph deploy graph deploy --node http://localhost:8020/ --ipfs http://localhost:5001 ``` -Once your subgraph has been deployed, it will be indexed by Graph Node. You can check its progress by querying the subgraph itself: +Once your Subgraph has been deployed, it will be indexed by Graph Node. You can check its progress by querying the Subgraph itself: ```graphql { @@ -228,11 +228,11 @@ We will provide more information on running the above components soon. ## Querying a NEAR Subgraph -The GraphQL endpoint for NEAR subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. +The GraphQL endpoint for NEAR Subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. ## Example Subgraphs -Here are some example subgraphs for reference: +Here are some example Subgraphs for reference: [NEAR Blocks](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-blocks) @@ -242,13 +242,13 @@ Here are some example subgraphs for reference: ### How does the beta work? -NEAR support is in beta, which means that there may be changes to the API as we continue to work on improving the integration. Please email near@thegraph.com so that we can support you in building NEAR subgraphs, and keep you up to date on the latest developments! +NEAR support is in beta, which means that there may be changes to the API as we continue to work on improving the integration. Please email near@thegraph.com so that we can support you in building NEAR Subgraphs, and keep you up to date on the latest developments! -### Can a subgraph index both NEAR and EVM chains? +### Can a Subgraph index both NEAR and EVM chains? -No, a subgraph can only support data sources from one chain/network. +No, a Subgraph can only support data sources from one chain/network. -### Can subgraphs react to more specific triggers? +### Can Subgraphs react to more specific triggers? Currently, only Block and Receipt triggers are supported. We are investigating triggers for function calls to a specified account. We are also interested in supporting event triggers, once NEAR has native event support. @@ -262,21 +262,21 @@ accounts: - mintbase1.near ``` -### Can NEAR subgraphs make view calls to NEAR accounts during mappings? +### Can NEAR Subgraphs make view calls to NEAR accounts during mappings? This is not supported. We are evaluating whether this functionality is required for indexing. -### Can I use data source templates in my NEAR subgraph? +### Can I use data source templates in my NEAR Subgraph? This is not currently supported. We are evaluating whether this functionality is required for indexing. -### Ethereum subgraphs support "pending" and "current" versions, how can I deploy a "pending" version of a NEAR subgraph? +### Ethereum Subgraphs support "pending" and "current" versions, how can I deploy a "pending" version of a NEAR Subgraph? -Pending functionality is not yet supported for NEAR subgraphs. In the interim, you can deploy a new version to a different "named" subgraph, and then when that is synced with the chain head, you can redeploy to your primary "named" subgraph, which will use the same underlying deployment ID, so the main subgraph will be instantly synced. +Pending functionality is not yet supported for NEAR Subgraphs. In the interim, you can deploy a new version to a different "named" Subgraph, and then when that is synced with the chain head, you can redeploy to your primary "named" Subgraph, which will use the same underlying deployment ID, so the main Subgraph will be instantly synced. -### My question hasn't been answered, where can I get more help building NEAR subgraphs? +### My question hasn't been answered, where can I get more help building NEAR Subgraphs? -If it is a general question about subgraph development, there is a lot more information in the rest of the [Developer documentation](/subgraphs/quick-start/). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. +If it is a general question about Subgraph development, there is a lot more information in the rest of the [Developer documentation](/subgraphs/quick-start/). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. ## References From a573d0dff98569fbdf757fb5a36ca01eedd4dd3c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:19:48 -0500 Subject: [PATCH 0645/1789] New translations near.mdx (Marathi) --- .../src/pages/mr/subgraphs/cookbook/near.mdx | 76 +++++++++---------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/website/src/pages/mr/subgraphs/cookbook/near.mdx b/website/src/pages/mr/subgraphs/cookbook/near.mdx index 6e790fdcb0cf..ca1594912fca 100644 --- a/website/src/pages/mr/subgraphs/cookbook/near.mdx +++ b/website/src/pages/mr/subgraphs/cookbook/near.mdx @@ -2,17 +2,17 @@ title: NEAR वर सबग्राफ तयार करणे --- -This guide is an introduction to building subgraphs indexing smart contracts on the [NEAR blockchain](https://docs.near.org/). +This guide is an introduction to building Subgraphs indexing smart contracts on the [NEAR blockchain](https://docs.near.org/). ## जवळ म्हणजे काय? [NEAR](https://near.org/) is a smart contract platform for building decentralized applications. Visit the [official documentation](https://docs.near.org/concepts/basics/protocol) for more information. -## NEAR subgraphs म्हणजे काय? +## What are NEAR Subgraphs? -The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build subgraphs to index their smart contracts. +The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a Subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build Subgraphs to index their smart contracts. -Subgraphs are event-based, which means that they listen for and then process onchain events. There are currently two types of handlers supported for NEAR subgraphs: +Subgraphs are event-based, which means that they listen for and then process onchain events. There are currently two types of handlers supported for NEAR Subgraphs: - ब्लॉक हँडलर: हे प्रत्येक नवीन ब्लॉकवर चालवले जातात - पावती हँडलर्स: निर्दिष्ट खात्यावर संदेश कार्यान्वित झाल्यावर प्रत्येक वेळी चालवा @@ -23,32 +23,32 @@ Subgraphs are event-based, which means that they listen for and then process onc ## एक NEAR सबग्राफतयार करणे -`@graphprotocol/graph-cli` is a command-line tool for building and deploying subgraphs. +`@graphprotocol/graph-cli` is a command-line tool for building and deploying Subgraphs. -`@graphprotocol/graph-ts` is a library of subgraph-specific types. +`@graphprotocol/graph-ts` is a library of Subgraph-specific types. -NEAR subgraph development requires `graph-cli` above version `0.23.0`, and `graph-ts` above version `0.23.0`. +NEAR Subgraph development requires `graph-cli` above version `0.23.0`, and `graph-ts` above version `0.23.0`. -> NEAR सबग्राफ तयार करणे, याची प्रक्रिया इथेरियमवरील सबग्राफ तयार करण्याशी खूप सामान्यतेने सादर करते. +> Building a NEAR Subgraph is very similar to building a Subgraph that indexes Ethereum. -सबग्राफ व्याख्येचे तीन पैलू आहेत: +There are three aspects of Subgraph definition: -**subgraph.yaml:** the subgraph manifest, defining the data sources of interest, and how they should be processed. NEAR is a new `kind` of data source. +**subgraph.yaml:** the Subgraph manifest, defining the data sources of interest, and how they should be processed. NEAR is a new `kind` of data source. -**schema.graphql:** a schema file that defines what data is stored for your subgraph, and how to query it via GraphQL. The requirements for NEAR subgraphs are covered by [the existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). +**schema.graphql:** a schema file that defines what data is stored for your Subgraph, and how to query it via GraphQL. The requirements for NEAR Subgraphs are covered by [the existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). **AssemblyScript Mappings:** [AssemblyScript code](/subgraphs/developing/creating/graph-ts/api/) that translates from the event data to the entities defined in your schema. NEAR support introduces NEAR-specific data types and new JSON parsing functionality. -सबग्राफ विकासादरम्यान दोन प्रमुख आज्ञा आहेत: +During Subgraph development there are two key commands: ```bash -$ graph codegen # मॅनिफेस्टमध्ये ओळखल्या गेलेल्या स्कीमा फाइलमधून प्रकार व्युत्पन्न करते -$ graph build # असेंबलीस्क्रिप्ट फायलींमधून वेब असेंब्ली तयार करते आणि /बिल्ड फोल्डरमध्ये सर्व सबग्राफ फाइल्स तयार करते +$ graph codegen # generates types from the schema file identified in the manifest +$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the Subgraph files in a /build folder ``` ### सबग्राफ मॅनिफेस्ट व्याख्या -The subgraph manifest (`subgraph.yaml`) identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for a NEAR subgraph: +The Subgraph manifest (`subgraph.yaml`) identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest for a NEAR Subgraph: ```yaml specVersion: 0.0.2 @@ -70,7 +70,7 @@ dataSources: file: ./src/mapping.ts # link to the file with the Assemblyscript mappings ``` -- NEAR subgraphs introduce a new `kind` of data source (`near`) +- NEAR Subgraphs introduce a new `kind` of data source (`near`) - The `network` should correspond to a network on the hosting Graph Node. On Subgraph Studio, NEAR's mainnet is `near-mainnet`, and NEAR's testnet is `near-testnet` - NEAR data sources introduce an optional `source.account` field, which is a human-readable ID corresponding to a [NEAR account](https://docs.near.org/concepts/protocol/account-model). This can be an account or a sub-account. - NEAR data sources introduce an alternative optional `source.accounts` field, which contains optional suffixes and prefixes. At least prefix or suffix must be specified, they will match the any account starting or ending with the list of values respectively. The example below would match: `[app|good].*[morning.near|morning.testnet]`. If only a list of prefixes or suffixes is necessary the other field can be omitted. @@ -92,7 +92,7 @@ accounts: ### स्कीमा व्याख्या -Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). +Schema definition describes the structure of the resulting Subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on Subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). ### असेंबलीस्क्रिप्ट मॅपिंग @@ -165,31 +165,31 @@ These types are passed to block & receipt handlers: - Block handlers will receive a `Block` - Receipt handlers will receive a `ReceiptWithOutcome` -Otherwise, the rest of the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/) is available to NEAR subgraph developers during mapping execution. +Otherwise, the rest of the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/) is available to NEAR Subgraph developers during mapping execution. This includes a new JSON parsing function - logs on NEAR are frequently emitted as stringified JSONs. A new `json.fromString(...)` function is available as part of the [JSON API](/subgraphs/developing/creating/graph-ts/api/#json-api) to allow developers to easily process these logs. ## NEAR सबग्राफ डिप्लॉय करण्यासाठी -Once you have a built subgraph, it is time to deploy it to Graph Node for indexing. NEAR subgraphs can be deployed to any Graph Node `>=v0.26.x` (this version has not yet been tagged & released). +Once you have a built Subgraph, it is time to deploy it to Graph Node for indexing. NEAR Subgraphs can be deployed to any Graph Node `>=v0.26.x` (this version has not yet been tagged & released). Subgraph Studio and the upgrade Indexer on The Graph Network currently supports indexing NEAR mainnet and testnet in beta, with the following network names: - `near-mainnet` - `near-testnet` -More information on creating and deploying subgraphs on Subgraph Studio can be found [here](/deploying/deploying-a-subgraph-to-studio/). +More information on creating and deploying Subgraphs on Subgraph Studio can be found [here](/deploying/deploying-a-subgraph-to-studio/). -As a quick primer - the first step is to "create" your subgraph - this only needs to be done once. On Subgraph Studio, this can be done from [your Dashboard](https://thegraph.com/studio/): "Create a subgraph". +As a quick primer - the first step is to "create" your Subgraph - this only needs to be done once. On Subgraph Studio, this can be done from [your Dashboard](https://thegraph.com/studio/): "Create a Subgraph". -Once your subgraph has been created, you can deploy your subgraph by using the `graph deploy` CLI command: +Once your Subgraph has been created, you can deploy your Subgraph by using the `graph deploy` CLI command: ```sh -$ graph create --node # creates a subgraph on a local Graph Node (on Subgraph Studio, this is done via the UI) -$ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # uploads the build files to a specified IPFS endpoint, and then deploys the subgraph to a specified Graph Node based on the manifest IPFS hash +$ graph create --node # creates a Subgraph on a local Graph Node (on Subgraph Studio, this is done via the UI) +$ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # uploads the build files to a specified IPFS endpoint, and then deploys the Subgraph to a specified Graph Node based on the manifest IPFS hash ``` -नोड कॉन्फिगरेशन सबग्राफ कोठे तैनात केले जात आहे यावर अवलंबून असेल. +The node configuration will depend on where the Subgraph is being deployed. ### Subgraph Studio @@ -204,7 +204,7 @@ graph deploy graph deploy --node http://localhost:8020/ --ipfs http://localhost:5001 ``` -एकदा तुमचा सबग्राफ तैनात केला गेला की, तो ग्राफ नोडद्वारे अनुक्रमित केला जाईल. तुम्ही सबग्राफवरच क्वेरी करून त्याची प्रगती तपासू शकता: +Once your Subgraph has been deployed, it will be indexed by Graph Node. You can check its progress by querying the Subgraph itself: ```graphql { @@ -228,11 +228,11 @@ NEAR ची अनुक्रमणिका देणारा आलेख ## NEAR सबग्राफची क्वेरी करणे -The GraphQL endpoint for NEAR subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. +The GraphQL endpoint for NEAR Subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. ## उदाहरणे सबग्राफ -Here are some example subgraphs for reference: +Here are some example Subgraphs for reference: [NEAR Blocks](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-blocks) @@ -242,13 +242,13 @@ Here are some example subgraphs for reference: ### बीटा कसे कार्य करते? -NEAR सपोर्ट बीटामध्ये आहे, याचा अर्थ असा की API मध्ये बदल होऊ शकतात कारण आम्ही एकत्रीकरण सुधारण्यासाठी काम करत आहोत. कृपया near@thegraph.com वर ईमेल करा जेणेकरुन आम्‍ही तुम्‍हाला जवळचे सबग्राफ तयार करण्‍यात मदत करू शकू आणि तुम्‍हाला नवीनतम घडामोडींबद्दल अद्ययावत ठेवू शकू! +NEAR support is in beta, which means that there may be changes to the API as we continue to work on improving the integration. Please email near@thegraph.com so that we can support you in building NEAR Subgraphs, and keep you up to date on the latest developments! -### सबग्राफ इंडेक्स NEAR आणि EVM दोन्ही चेन करू शकतो का? +### Can a Subgraph index both NEAR and EVM chains? -नाही, सबग्राफ केवळ एका साखळी/नेटवर्कमधील डेटा स्रोतांना समर्थन देऊ शकतो. +No, a Subgraph can only support data sources from one chain/network. -### सबग्राफ अधिक विशिष्ट ट्रिगरवर प्रतिक्रिया देऊ शकतात? +### Can Subgraphs react to more specific triggers? सध्या, फक्त ब्लॉक आणि पावती ट्रिगर समर्थित आहेत. आम्ही एका निर्दिष्ट खात्यावर फंक्शन कॉलसाठी ट्रिगर तपासत आहोत. आम्‍हाला इव्‍हेंट ट्रिगरला सपोर्ट करण्‍यात देखील रस आहे, एकदा NEAR ला नेटिव्ह इव्‍हेंट सपोर्ट असेल. @@ -262,21 +262,21 @@ accounts: - mintbase1.near ``` -### मॅपिंग दरम्यान NEAR subgraphs NEAR खात्यांना व्ह्यू कॉल करू शकतात? +### Can NEAR Subgraphs make view calls to NEAR accounts during mappings? हे समर्थित नाही. अनुक्रमणिकेसाठी ही कार्यक्षमता आवश्यक आहे का याचे आम्ही मूल्यमापन करत आहोत. -### NEARのサブグラフでデータソーステンプレートを使用できますか? +### Can I use data source templates in my NEAR Subgraph? हे सध्या समर्थित नाही. अनुक्रमणिकेसाठी ही कार्यक्षमता आवश्यक आहे का याचे आम्ही मूल्यमापन करत आहोत. -### イーサリアムのサブグラフでは、「pending」および「current」のバージョンがサポートされていますが、NEARのサブグラフの「pending」バージョンをどのようにデプロイできるでしょうか? +### Ethereum Subgraphs support "pending" and "current" versions, how can I deploy a "pending" version of a NEAR Subgraph? -NEARサブグラフの「保留中」機能はまだサポートされていません。その間、異なる「名前付き」サブグラフに新しいバージョンをデプロイし、それがチェーンヘッドと同期された後、主要な「名前付き」サブグラフに再デプロイすることができます。この場合、同じ基礎となるデプロイメントIDを使用するため、メインのサブグラフは即座に同期されます. +Pending functionality is not yet supported for NEAR Subgraphs. In the interim, you can deploy a new version to a different "named" Subgraph, and then when that is synced with the chain head, you can redeploy to your primary "named" Subgraph, which will use the same underlying deployment ID, so the main Subgraph will be instantly synced. -### माझा प्रश्न उत्तर दिला नाही, NEAR सबग्राफ तयार करण्यासाठी अधिक मदत कुठे मिळेल? +### My question hasn't been answered, where can I get more help building NEAR Subgraphs? -If it is a general question about subgraph development, there is a lot more information in the rest of the [Developer documentation](/subgraphs/quick-start/). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. +If it is a general question about Subgraph development, there is a lot more information in the rest of the [Developer documentation](/subgraphs/quick-start/). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. ## संदर्भ From 293a96ca571966979258ef372853e0de0d6f15d9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:19:49 -0500 Subject: [PATCH 0646/1789] New translations near.mdx (Hindi) --- .../src/pages/hi/subgraphs/cookbook/near.mdx | 74 +++++++++---------- 1 file changed, 37 insertions(+), 37 deletions(-) diff --git a/website/src/pages/hi/subgraphs/cookbook/near.mdx b/website/src/pages/hi/subgraphs/cookbook/near.mdx index 6aab3eeedbb4..74930a75bc5e 100644 --- a/website/src/pages/hi/subgraphs/cookbook/near.mdx +++ b/website/src/pages/hi/subgraphs/cookbook/near.mdx @@ -2,17 +2,17 @@ title: NEAR पर सबग्राफ बनाना --- -This guide is an introduction to building subgraphs indexing smart contracts on the [NEAR blockchain](https://docs.near.org/). +This guide is an introduction to building Subgraphs indexing smart contracts on the [NEAR blockchain](https://docs.near.org/). ## NEAR क्या है? [NEAR](https://near.org/) is a smart contract platform for building decentralized applications. Visit the [official documentation](https://docs.near.org/concepts/basics/protocol) for more information. -## NEAR सबग्राफ क्या हैं? +## What are NEAR Subgraphs? -The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build subgraphs to index their smart contracts. +The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a Subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build Subgraphs to index their smart contracts. -Subgraphs are event-based, which means that they listen for and then process onchain events. There are currently two types of handlers supported for NEAR subgraphs: +Subgraphs are event-based, which means that they listen for and then process onchain events. There are currently two types of handlers supported for NEAR Subgraphs: - ब्लॉक हैंडलर्स: ये हर नए ब्लॉक पर चलते हैं - रसीद हैंडलर: किसी निर्दिष्ट खाते पर संदेश निष्पादित होने पर हर बार चलें @@ -23,32 +23,32 @@ Subgraphs are event-based, which means that they listen for and then process onc ## NEAR सबग्राफ बनाना -`@graphprotocol/graph-cli` is a command-line tool for building and deploying subgraphs. +`@graphprotocol/graph-cli` is a command-line tool for building and deploying Subgraphs. -`@graphprotocol/graph-ts` is a library of subgraph-specific types. +`@graphprotocol/graph-ts` is a library of Subgraph-specific types. -NEAR subgraph development requires `graph-cli` above version `0.23.0`, and `graph-ts` above version `0.23.0`. +NEAR Subgraph development requires `graph-cli` above version `0.23.0`, and `graph-ts` above version `0.23.0`. -> NEAR सबग्राफ का निर्माण वह सबग्राफ के निर्माण के समान है जो एथेरियम को अनुक्रमित करता है। +> Building a NEAR Subgraph is very similar to building a Subgraph that indexes Ethereum. -सबग्राफ परिभाषा के तीन पहलू हैं: +There are three aspects of Subgraph definition: -**subgraph.yaml:** the subgraph manifest, defining the data sources of interest, and how they should be processed. NEAR is a new `kind` of data source. +**subgraph.yaml:** the Subgraph manifest, defining the data sources of interest, and how they should be processed. NEAR is a new `kind` of data source. -**schema.graphql:** a schema file that defines what data is stored for your subgraph, and how to query it via GraphQL. The requirements for NEAR subgraphs are covered by [the existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). +**schema.graphql:** a schema file that defines what data is stored for your Subgraph, and how to query it via GraphQL. The requirements for NEAR Subgraphs are covered by [the existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). **AssemblyScript Mappings:** [AssemblyScript code](/subgraphs/developing/creating/graph-ts/api/) that translates from the event data to the entities defined in your schema. NEAR support introduces NEAR-specific data types and new JSON parsing functionality. -सब ग्राफ को बनाते वक़्त दो मुख्य कमांड हैं: +During Subgraph development there are two key commands: ```bash $ graph codegen # generates types from the schema file identified in the manifest -$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the subgraph files in a /build folder +$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the Subgraph files in a /build folder ``` ### सब ग्राफ मैनिफेस्ट की परिभाषा -The subgraph manifest (`subgraph.yaml`) identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for a NEAR subgraph: +The Subgraph manifest (`subgraph.yaml`) identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest for a NEAR Subgraph: ```yaml specVersion: 0.0.2 @@ -70,7 +70,7 @@ dataSources: file: ./src/mapping.ts # link to the file with the Assemblyscript mappings ``` -- NEAR subgraphs introduce a new `kind` of data source (`near`) +- NEAR Subgraphs introduce a new `kind` of data source (`near`) - The `network` should correspond to a network on the hosting Graph Node. On Subgraph Studio, NEAR's mainnet is `near-mainnet`, and NEAR's testnet is `near-testnet` - NEAR data sources introduce an optional `source.account` field, which is a human-readable ID corresponding to a [NEAR account](https://docs.near.org/concepts/protocol/account-model). This can be an account or a sub-account. - NEAR data sources introduce an alternative optional `source.accounts` field, which contains optional suffixes and prefixes. At least prefix or suffix must be specified, they will match the any account starting or ending with the list of values respectively. The example below would match: `[app|good].*[morning.near|morning.testnet]`. If only a list of prefixes or suffixes is necessary the other field can be omitted. @@ -92,7 +92,7 @@ NEAR डेटा स्रोत दो प्रकार के हैंड ### स्कीमा की परिभाषा -Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). +Schema definition describes the structure of the resulting Subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on Subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). ### असेंबली स्क्रिप्ट मैप्पिंग्स @@ -165,31 +165,31 @@ These types are passed to block & receipt handlers: - Block handlers will receive a `Block` - Receipt handlers will receive a `ReceiptWithOutcome` -Otherwise, the rest of the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/) is available to NEAR subgraph developers during mapping execution. +Otherwise, the rest of the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/) is available to NEAR Subgraph developers during mapping execution. This includes a new JSON parsing function - logs on NEAR are frequently emitted as stringified JSONs. A new `json.fromString(...)` function is available as part of the [JSON API](/subgraphs/developing/creating/graph-ts/api/#json-api) to allow developers to easily process these logs. ## एक NEAR सबग्राफ की तैनाती -Once you have a built subgraph, it is time to deploy it to Graph Node for indexing. NEAR subgraphs can be deployed to any Graph Node `>=v0.26.x` (this version has not yet been tagged & released). +Once you have a built Subgraph, it is time to deploy it to Graph Node for indexing. NEAR Subgraphs can be deployed to any Graph Node `>=v0.26.x` (this version has not yet been tagged & released). Subgraph Studio and the upgrade Indexer on The Graph Network currently supports indexing NEAR mainnet and testnet in beta, with the following network names: - `near-mainnet` - `near-testnet` -More information on creating and deploying subgraphs on Subgraph Studio can be found [here](/deploying/deploying-a-subgraph-to-studio/). +More information on creating and deploying Subgraphs on Subgraph Studio can be found [here](/deploying/deploying-a-subgraph-to-studio/). -As a quick primer - the first step is to "create" your subgraph - this only needs to be done once. On Subgraph Studio, this can be done from [your Dashboard](https://thegraph.com/studio/): "Create a subgraph". +As a quick primer - the first step is to "create" your Subgraph - this only needs to be done once. On Subgraph Studio, this can be done from [your Dashboard](https://thegraph.com/studio/): "Create a Subgraph". -Once your subgraph has been created, you can deploy your subgraph by using the `graph deploy` CLI command: +Once your Subgraph has been created, you can deploy your Subgraph by using the `graph deploy` CLI command: ```sh -$ graph create --node # creates a subgraph on a local Graph Node (on Subgraph Studio, this is done via the UI) -$ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # uploads the build files to a specified IPFS endpoint, and then deploys the subgraph to a specified Graph Node based on the manifest IPFS hash +$ graph create --node # creates a Subgraph on a local Graph Node (on Subgraph Studio, this is done via the UI) +$ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # uploads the build files to a specified IPFS endpoint, and then deploys the Subgraph to a specified Graph Node based on the manifest IPFS hash ``` -नोड कॉन्फ़िगरेशन इस बात पर निर्भर करेगा कि सबग्राफ को कहाँ तैनात किया जा रहा है। +The node configuration will depend on where the Subgraph is being deployed. ### Subgraph Studio @@ -204,7 +204,7 @@ graph deploy graph deploy --node http://localhost:8020/ --ipfs http://localhost:5001 ``` -एक बार आपका सबग्राफ तैनात हो जाने के बाद, इसे ग्राफ़ नोड द्वारा अनुक्रमित किया जाएगा। आप सबग्राफ को क्वेरी करके इसकी प्रगति की जांच कर सकते हैं: +Once your Subgraph has been deployed, it will be indexed by Graph Node. You can check its progress by querying the Subgraph itself: ```graphql { @@ -228,11 +228,11 @@ NEAR को अनुक्रमित करने वाले ग्रा ## NEAR सबग्राफ को क्वेरी करना -The GraphQL endpoint for NEAR subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. +The GraphQL endpoint for NEAR Subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. ## सब-ग्राफ के उदाहरण -Here are some example subgraphs for reference: +Here are some example Subgraphs for reference: [NEAR Blocks](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-blocks) @@ -242,13 +242,13 @@ Here are some example subgraphs for reference: ### बीटा कैसे काम करता है? -NEAR समर्थन बीटा में है, जिसका मतलब है कि एपीआई में बदलाव हो सकते हैं क्योंकि हम इंटीग्रेशन में सुधार पर काम करना जारी रखेंगे। कृपया near@thegraph.com पर ईमेल करें ताकि हम NEAR सबग्राफ बनाने में आपकी सहायता कर सकें, और आपको नवीनतम विकासों के बारे में अपडेट रख सकें! +NEAR support is in beta, which means that there may be changes to the API as we continue to work on improving the integration. Please email near@thegraph.com so that we can support you in building NEAR Subgraphs, and keep you up to date on the latest developments! -### Can a subgraph index both NEAR and EVM chains? +### Can a Subgraph index both NEAR and EVM chains? -नहीं, एक सब-ग्राफ केवल एक चेन/नेटवर्क से डाटा सोर्स को सपोर्ट कर सकता है +No, a Subgraph can only support data sources from one chain/network. -### क्या सबग्राफ अधिक विशिष्ट ट्रिगर्स पर प्रतिक्रिया कर सकते हैं? +### Can Subgraphs react to more specific triggers? वर्तमान में, केवल अवरोधित करें और प्राप्त करें ट्रिगर समर्थित हैं। हम एक निर्दिष्ट खाते में फ़ंक्शन कॉल के लिए ट्रिगर्स की जांच कर रहे हैं। एक बार जब NEAR को नेटिव ईवेंट समर्थन मिल जाता है, तो हम ईवेंट ट्रिगर्स का समर्थन करने में भी रुचि रखते हैं। @@ -262,21 +262,21 @@ accounts: - mintbase1.near ``` -### क्या मैपिंग के दौरान NEAR सबग्राफ, NEAR खातों को व्यू कॉल कर सकते हैं? +### Can NEAR Subgraphs make view calls to NEAR accounts during mappings? यह समर्थित नहीं है। हम मूल्यांकन कर रहे हैं कि अनुक्रमण के लिए यह कार्यक्षमता आवश्यक है या नहीं। -### क्या मैं अपने NEAR सबग्राफ में डेटा स्रोत टेम्प्लेट का उपयोग कर सकता हूँ? +### Can I use data source templates in my NEAR Subgraph? यह वर्तमान में समर्थित नहीं है। हम मूल्यांकन कर रहे हैं कि अनुक्रमण के लिए यह कार्यक्षमता आवश्यक है या नहीं। -### Ethereum subgraphs support "pending" and "current" versions, how can I deploy a "pending" version of a NEAR subgraph? +### Ethereum Subgraphs support "pending" and "current" versions, how can I deploy a "pending" version of a NEAR Subgraph? -NEAR सबग्राफ के लिए पेंडिंग कार्यक्षमता अभी तक समर्थित नहीं है। अंतरिम में, आप एक अलग "नामित" सबग्राफ के लिए एक नया संस्करण तैनात कर सकते हैं, और फिर जब वह चेन हेड के साथ सिंक हो जाता है, तो आप अपने प्राथमिक "नामित" सबग्राफ में फिर से तैनात कर सकते हैं, जो उसी अंतर्निहित डेप्लॉयमेंट आईडी का उपयोग करेगा, इसलिए मुख्य सबग्राफ तुरंत सिंक हो जाएगा। +Pending functionality is not yet supported for NEAR Subgraphs. In the interim, you can deploy a new version to a different "named" Subgraph, and then when that is synced with the chain head, you can redeploy to your primary "named" Subgraph, which will use the same underlying deployment ID, so the main Subgraph will be instantly synced. -### My question hasn't been answered, where can I get more help building NEAR subgraphs? +### My question hasn't been answered, where can I get more help building NEAR Subgraphs? -If it is a general question about subgraph development, there is a lot more information in the rest of the [Developer documentation](/subgraphs/quick-start/). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. +If it is a general question about Subgraph development, there is a lot more information in the rest of the [Developer documentation](/subgraphs/quick-start/). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. ## संदर्भ From 80fc7e72cd0c4cf4b645585698a33a557a2a4e9e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:19:50 -0500 Subject: [PATCH 0647/1789] New translations near.mdx (Swahili) --- .../src/pages/sw/subgraphs/cookbook/near.mdx | 283 ++++++++++++++++++ 1 file changed, 283 insertions(+) create mode 100644 website/src/pages/sw/subgraphs/cookbook/near.mdx diff --git a/website/src/pages/sw/subgraphs/cookbook/near.mdx b/website/src/pages/sw/subgraphs/cookbook/near.mdx new file mode 100644 index 000000000000..698a0ac3486c --- /dev/null +++ b/website/src/pages/sw/subgraphs/cookbook/near.mdx @@ -0,0 +1,283 @@ +--- +title: Building Subgraphs on NEAR +--- + +This guide is an introduction to building Subgraphs indexing smart contracts on the [NEAR blockchain](https://docs.near.org/). + +## What is NEAR? + +[NEAR](https://near.org/) is a smart contract platform for building decentralized applications. Visit the [official documentation](https://docs.near.org/concepts/basics/protocol) for more information. + +## What are NEAR Subgraphs? + +The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a Subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build Subgraphs to index their smart contracts. + +Subgraphs are event-based, which means that they listen for and then process onchain events. There are currently two types of handlers supported for NEAR Subgraphs: + +- Block handlers: these are run on every new block +- Receipt handlers: run every time a message is executed at a specified account + +[From the NEAR documentation](https://docs.near.org/build/data-infrastructure/lake-data-structures/receipt): + +> A Receipt is the only actionable object in the system. When we talk about "processing a transaction" on the NEAR platform, this eventually means "applying receipts" at some point. + +## Building a NEAR Subgraph + +`@graphprotocol/graph-cli` is a command-line tool for building and deploying Subgraphs. + +`@graphprotocol/graph-ts` is a library of Subgraph-specific types. + +NEAR Subgraph development requires `graph-cli` above version `0.23.0`, and `graph-ts` above version `0.23.0`. + +> Building a NEAR Subgraph is very similar to building a Subgraph that indexes Ethereum. + +There are three aspects of Subgraph definition: + +**subgraph.yaml:** the Subgraph manifest, defining the data sources of interest, and how they should be processed. NEAR is a new `kind` of data source. + +**schema.graphql:** a schema file that defines what data is stored for your Subgraph, and how to query it via GraphQL. The requirements for NEAR Subgraphs are covered by [the existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). + +**AssemblyScript Mappings:** [AssemblyScript code](/subgraphs/developing/creating/graph-ts/api/) that translates from the event data to the entities defined in your schema. NEAR support introduces NEAR-specific data types and new JSON parsing functionality. + +During Subgraph development there are two key commands: + +```bash +$ graph codegen # generates types from the schema file identified in the manifest +$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the Subgraph files in a /build folder +``` + +### Subgraph Manifest Definition + +The Subgraph manifest (`subgraph.yaml`) identifies the data sources for the Subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example Subgraph manifest for a NEAR Subgraph: + +```yaml +specVersion: 0.0.2 +schema: + file: ./src/schema.graphql # link to the schema file +dataSources: + - kind: near + network: near-mainnet + source: + account: app.good-morning.near # This data source will monitor this account + startBlock: 10662188 # Required for NEAR + mapping: + apiVersion: 0.0.5 + language: wasm/assemblyscript + blockHandlers: + - handler: handleNewBlock # the function name in the mapping file + receiptHandlers: + - handler: handleReceipt # the function name in the mapping file + file: ./src/mapping.ts # link to the file with the Assemblyscript mappings +``` + +- NEAR Subgraphs introduce a new `kind` of data source (`near`) +- The `network` should correspond to a network on the hosting Graph Node. On Subgraph Studio, NEAR's mainnet is `near-mainnet`, and NEAR's testnet is `near-testnet` +- NEAR data sources introduce an optional `source.account` field, which is a human-readable ID corresponding to a [NEAR account](https://docs.near.org/concepts/protocol/account-model). This can be an account or a sub-account. +- NEAR data sources introduce an alternative optional `source.accounts` field, which contains optional suffixes and prefixes. At least prefix or suffix must be specified, they will match the any account starting or ending with the list of values respectively. The example below would match: `[app|good].*[morning.near|morning.testnet]`. If only a list of prefixes or suffixes is necessary the other field can be omitted. + +```yaml +accounts: + prefixes: + - app + - good + suffixes: + - morning.near + - morning.testnet +``` + +NEAR data sources support two types of handlers: + +- `blockHandlers`: run on every new NEAR block. No `source.account` is required. +- `receiptHandlers`: run on every receipt where the data source's `source.account` is the recipient. Note that only exact matches are processed ([subaccounts](https://docs.near.org/tutorials/crosswords/basics/add-functions-call#create-a-subaccount) must be added as independent data sources). + +### Schema Definition + +Schema definition describes the structure of the resulting Subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on Subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). + +### AssemblyScript Mappings + +The handlers for processing events are written in [AssemblyScript](https://www.assemblyscript.org/). + +NEAR indexing introduces NEAR-specific data types to the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/). + +```typescript + +class ExecutionOutcome { + gasBurnt: u64, + blockHash: Bytes, + id: Bytes, + logs: Array, + receiptIds: Array, + tokensBurnt: BigInt, + executorId: string, + } + +class ActionReceipt { + predecessorId: string, + receiverId: string, + id: CryptoHash, + signerId: string, + gasPrice: BigInt, + outputDataReceivers: Array, + inputDataIds: Array, + actions: Array, + } + +class BlockHeader { + height: u64, + prevHeight: u64,// Always zero when version < V3 + epochId: Bytes, + nextEpochId: Bytes, + chunksIncluded: u64, + hash: Bytes, + prevHash: Bytes, + timestampNanosec: u64, + randomValue: Bytes, + gasPrice: BigInt, + totalSupply: BigInt, + latestProtocolVersion: u32, + } + +class ChunkHeader { + gasUsed: u64, + gasLimit: u64, + shardId: u64, + chunkHash: Bytes, + prevBlockHash: Bytes, + balanceBurnt: BigInt, + } + +class Block { + author: string, + header: BlockHeader, + chunks: Array, + } + +class ReceiptWithOutcome { + outcome: ExecutionOutcome, + receipt: ActionReceipt, + block: Block, + } +``` + +These types are passed to block & receipt handlers: + +- Block handlers will receive a `Block` +- Receipt handlers will receive a `ReceiptWithOutcome` + +Otherwise, the rest of the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/) is available to NEAR Subgraph developers during mapping execution. + +This includes a new JSON parsing function - logs on NEAR are frequently emitted as stringified JSONs. A new `json.fromString(...)` function is available as part of the [JSON API](/subgraphs/developing/creating/graph-ts/api/#json-api) to allow developers to easily process these logs. + +## Deploying a NEAR Subgraph + +Once you have a built Subgraph, it is time to deploy it to Graph Node for indexing. NEAR Subgraphs can be deployed to any Graph Node `>=v0.26.x` (this version has not yet been tagged & released). + +Subgraph Studio and the upgrade Indexer on The Graph Network currently supports indexing NEAR mainnet and testnet in beta, with the following network names: + +- `near-mainnet` +- `near-testnet` + +More information on creating and deploying Subgraphs on Subgraph Studio can be found [here](/deploying/deploying-a-subgraph-to-studio/). + +As a quick primer - the first step is to "create" your Subgraph - this only needs to be done once. On Subgraph Studio, this can be done from [your Dashboard](https://thegraph.com/studio/): "Create a Subgraph". + +Once your Subgraph has been created, you can deploy your Subgraph by using the `graph deploy` CLI command: + +```sh +$ graph create --node # creates a Subgraph on a local Graph Node (on Subgraph Studio, this is done via the UI) +$ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # uploads the build files to a specified IPFS endpoint, and then deploys the Subgraph to a specified Graph Node based on the manifest IPFS hash +``` + +The node configuration will depend on where the Subgraph is being deployed. + +### Subgraph Studio + +```sh +graph auth +graph deploy +``` + +### Local Graph Node (based on default configuration) + +```sh +graph deploy --node http://localhost:8020/ --ipfs http://localhost:5001 +``` + +Once your Subgraph has been deployed, it will be indexed by Graph Node. You can check its progress by querying the Subgraph itself: + +```graphql +{ + _meta { + block { + number + } + } +} +``` + +### Indexing NEAR with a Local Graph Node + +Running a Graph Node that indexes NEAR has the following operational requirements: + +- NEAR Indexer Framework with Firehose instrumentation +- NEAR Firehose Component(s) +- Graph Node with Firehose endpoint configured + +We will provide more information on running the above components soon. + +## Querying a NEAR Subgraph + +The GraphQL endpoint for NEAR Subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. + +## Example Subgraphs + +Here are some example Subgraphs for reference: + +[NEAR Blocks](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-blocks) + +[NEAR Receipts](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-receipts) + +## FAQ + +### How does the beta work? + +NEAR support is in beta, which means that there may be changes to the API as we continue to work on improving the integration. Please email near@thegraph.com so that we can support you in building NEAR Subgraphs, and keep you up to date on the latest developments! + +### Can a Subgraph index both NEAR and EVM chains? + +No, a Subgraph can only support data sources from one chain/network. + +### Can Subgraphs react to more specific triggers? + +Currently, only Block and Receipt triggers are supported. We are investigating triggers for function calls to a specified account. We are also interested in supporting event triggers, once NEAR has native event support. + +### Will receipt handlers trigger for accounts and their sub-accounts? + +If an `account` is specified, that will only match the exact account name. It is possible to match sub-accounts by specifying an `accounts` field, with `suffixes` and `prefixes` specified to match accounts and sub-accounts, for example the following would match all `mintbase1.near` sub-accounts: + +```yaml +accounts: + suffixes: + - mintbase1.near +``` + +### Can NEAR Subgraphs make view calls to NEAR accounts during mappings? + +This is not supported. We are evaluating whether this functionality is required for indexing. + +### Can I use data source templates in my NEAR Subgraph? + +This is not currently supported. We are evaluating whether this functionality is required for indexing. + +### Ethereum Subgraphs support "pending" and "current" versions, how can I deploy a "pending" version of a NEAR Subgraph? + +Pending functionality is not yet supported for NEAR Subgraphs. In the interim, you can deploy a new version to a different "named" Subgraph, and then when that is synced with the chain head, you can redeploy to your primary "named" Subgraph, which will use the same underlying deployment ID, so the main Subgraph will be instantly synced. + +### My question hasn't been answered, where can I get more help building NEAR Subgraphs? + +If it is a general question about Subgraph development, there is a lot more information in the rest of the [Developer documentation](/subgraphs/quick-start/). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. + +## References + +- [NEAR developer documentation](https://docs.near.org/tutorials/crosswords/basics/set-up-skeleton) From 3a8ce603c7e1012fa9aa720aba1663b3c4928963 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:19:51 -0500 Subject: [PATCH 0648/1789] New translations secure-api-keys-nextjs.mdx (Romanian) --- .../pages/ro/subgraphs/cookbook/secure-api-keys-nextjs.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/ro/subgraphs/cookbook/secure-api-keys-nextjs.mdx b/website/src/pages/ro/subgraphs/cookbook/secure-api-keys-nextjs.mdx index fc7e0ff52eb4..e17e594408ff 100644 --- a/website/src/pages/ro/subgraphs/cookbook/secure-api-keys-nextjs.mdx +++ b/website/src/pages/ro/subgraphs/cookbook/secure-api-keys-nextjs.mdx @@ -4,9 +4,9 @@ title: How to Secure API Keys Using Next.js Server Components ## Overview -We can use [Next.js server components](https://nextjs.org/docs/app/building-your-application/rendering/server-components) to properly secure our API key from exposure in the frontend of our dapp. To further increase our API key security, we can also [restrict our API key to certain subgraphs or domains in Subgraph Studio](/cookbook/upgrading-a-subgraph/#securing-your-api-key). +We can use [Next.js server components](https://nextjs.org/docs/app/building-your-application/rendering/server-components) to properly secure our API key from exposure in the frontend of our dapp. To further increase our API key security, we can also [restrict our API key to certain Subgraphs or domains in Subgraph Studio](/cookbook/upgrading-a-subgraph/#securing-your-api-key). -In this cookbook, we will go over how to create a Next.js server component that queries a subgraph while also hiding the API key from the frontend. +In this cookbook, we will go over how to create a Next.js server component that queries a Subgraph while also hiding the API key from the frontend. ### Caveats @@ -18,7 +18,7 @@ In this cookbook, we will go over how to create a Next.js server component that In a standard React application, API keys included in the frontend code can be exposed to the client-side, posing a security risk. While `.env` files are commonly used, they don't fully protect the keys since React's code is executed on the client side, exposing the API key in the headers. Next.js Server Components address this issue by handling sensitive operations server-side. -### Using client-side rendering to query a subgraph +### Using client-side rendering to query a Subgraph ![Client-side rendering](/img/api-key-client-side-rendering.png) From 3d4f24566f79e7011bcd525034d52cbb51186d46 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:19:52 -0500 Subject: [PATCH 0649/1789] New translations secure-api-keys-nextjs.mdx (French) --- .../fr/subgraphs/cookbook/secure-api-keys-nextjs.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/fr/subgraphs/cookbook/secure-api-keys-nextjs.mdx b/website/src/pages/fr/subgraphs/cookbook/secure-api-keys-nextjs.mdx index cd3b3b46b7f9..928e76cf6a57 100644 --- a/website/src/pages/fr/subgraphs/cookbook/secure-api-keys-nextjs.mdx +++ b/website/src/pages/fr/subgraphs/cookbook/secure-api-keys-nextjs.mdx @@ -4,9 +4,9 @@ title: Comment sécuriser les clés d'API en utilisant les composants serveur de ## Aperçu -Nous pouvons utiliser [les composants serveur de Next.js](https://nextjs.org/docs/app/building-your-application/rendering/server-components) pour sécuriser correctement notre clé API contre l'exposition dans le frontend de notre dapp. Pour augmenter encore la sécurité de notre clé API, nous pouvons également [restreindre notre clé API à certains subgraphs ou domaines dans Subgraph Studio.](/cookbook/upgrading-a-subgraph/#securing-your-api-key). +We can use [Next.js server components](https://nextjs.org/docs/app/building-your-application/rendering/server-components) to properly secure our API key from exposure in the frontend of our dapp. To further increase our API key security, we can also [restrict our API key to certain Subgraphs or domains in Subgraph Studio](/cookbook/upgrading-a-subgraph/#securing-your-api-key). -Dans ce guide pratique, nous allons passer en revue la création d'un composant de serveur Next.js qui interroge un subgraph tout en masquant la clé API du frontend. +In this cookbook, we will go over how to create a Next.js server component that queries a Subgraph while also hiding the API key from the frontend. ### Mise en garde @@ -18,7 +18,7 @@ Dans ce guide pratique, nous allons passer en revue la création d'un composant Dans une application React standard, les clés API incluses dans le code frontend peuvent être exposées du côté client, posant un risque de sécurité. Bien que les fichiers `.env` soient couramment utilisés, ils ne protègent pas complètement les clés car le code de React est exécuté côté client, exposant ainsi la clé API dans les headers. Les composants serveur Next.js résolvent ce problème en gérant les opérations sensibles côté serveur. -### Utilisation du rendu côté client pour interroger un subgraph +### Using client-side rendering to query a Subgraph ![rendu côté client](/img/api-key-client-side-rendering.png) @@ -120,4 +120,4 @@ Démarrez notre application Next.js en utilisant `npm run dev`. Vérifiez que le ### Conclusion -By utilizing Next.js Server Components, we've effectively hidden the API key from the client-side, enhancing the security of our application. This method ensures that sensitive operations are handled server-side, away from potential client-side vulnerabilities. Finally, be sure to explore [other API key security measures](/subgraphs/querying/managing-api-keys/) to increase your API key security even further. +En utilisant les composants serveur de Next.js, nous avons effectivement caché la clé API du côté client, améliorant ainsi la sécurité de notre application. Cette méthode garantit que les opérations sensibles sont traitées côté serveur, à l'abri des vulnérabilités potentielles côté client. Enfin, n'oubliez pas d'explorer [d'autres mesures de sécurité des clés d'API](/subgraphs/querying/managing-api-keys/) pour renforcer encore davantage la sécurité de vos clés d'API. From ed02471d75736f39f1db4f64afeeb1dc945c36a7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:19:53 -0500 Subject: [PATCH 0650/1789] New translations secure-api-keys-nextjs.mdx (Spanish) --- .../pages/es/subgraphs/cookbook/secure-api-keys-nextjs.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/es/subgraphs/cookbook/secure-api-keys-nextjs.mdx b/website/src/pages/es/subgraphs/cookbook/secure-api-keys-nextjs.mdx index 07b297aff006..f6b5193787c9 100644 --- a/website/src/pages/es/subgraphs/cookbook/secure-api-keys-nextjs.mdx +++ b/website/src/pages/es/subgraphs/cookbook/secure-api-keys-nextjs.mdx @@ -4,9 +4,9 @@ title: How to Secure API Keys Using Next.js Server Components ## Descripción -We can use [Next.js server components](https://nextjs.org/docs/app/building-your-application/rendering/server-components) to properly secure our API key from exposure in the frontend of our dapp. To further increase our API key security, we can also [restrict our API key to certain subgraphs or domains in Subgraph Studio](/cookbook/upgrading-a-subgraph/#securing-your-api-key). +We can use [Next.js server components](https://nextjs.org/docs/app/building-your-application/rendering/server-components) to properly secure our API key from exposure in the frontend of our dapp. To further increase our API key security, we can also [restrict our API key to certain Subgraphs or domains in Subgraph Studio](/cookbook/upgrading-a-subgraph/#securing-your-api-key). -In this cookbook, we will go over how to create a Next.js server component that queries a subgraph while also hiding the API key from the frontend. +In this cookbook, we will go over how to create a Next.js server component that queries a Subgraph while also hiding the API key from the frontend. ### Caveats @@ -18,7 +18,7 @@ In this cookbook, we will go over how to create a Next.js server component that In a standard React application, API keys included in the frontend code can be exposed to the client-side, posing a security risk. While `.env` files are commonly used, they don't fully protect the keys since React's code is executed on the client side, exposing the API key in the headers. Next.js Server Components address this issue by handling sensitive operations server-side. -### Using client-side rendering to query a subgraph +### Using client-side rendering to query a Subgraph ![Client-side rendering](/img/api-key-client-side-rendering.png) From c7b367380826f5fdcd5f6b88f6ca15939016e0f9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:19:54 -0500 Subject: [PATCH 0651/1789] New translations secure-api-keys-nextjs.mdx (Arabic) --- .../pages/ar/subgraphs/cookbook/secure-api-keys-nextjs.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/ar/subgraphs/cookbook/secure-api-keys-nextjs.mdx b/website/src/pages/ar/subgraphs/cookbook/secure-api-keys-nextjs.mdx index 485f597e25ba..21ac0b74d31d 100644 --- a/website/src/pages/ar/subgraphs/cookbook/secure-api-keys-nextjs.mdx +++ b/website/src/pages/ar/subgraphs/cookbook/secure-api-keys-nextjs.mdx @@ -4,9 +4,9 @@ title: How to Secure API Keys Using Next.js Server Components ## نظره عامة -We can use [Next.js server components](https://nextjs.org/docs/app/building-your-application/rendering/server-components) to properly secure our API key from exposure in the frontend of our dapp. To further increase our API key security, we can also [restrict our API key to certain subgraphs or domains in Subgraph Studio](/cookbook/upgrading-a-subgraph/#securing-your-api-key). +We can use [Next.js server components](https://nextjs.org/docs/app/building-your-application/rendering/server-components) to properly secure our API key from exposure in the frontend of our dapp. To further increase our API key security, we can also [restrict our API key to certain Subgraphs or domains in Subgraph Studio](/cookbook/upgrading-a-subgraph/#securing-your-api-key). -In this cookbook, we will go over how to create a Next.js server component that queries a subgraph while also hiding the API key from the frontend. +In this cookbook, we will go over how to create a Next.js server component that queries a Subgraph while also hiding the API key from the frontend. ### Caveats @@ -18,7 +18,7 @@ In this cookbook, we will go over how to create a Next.js server component that In a standard React application, API keys included in the frontend code can be exposed to the client-side, posing a security risk. While `.env` files are commonly used, they don't fully protect the keys since React's code is executed on the client side, exposing the API key in the headers. Next.js Server Components address this issue by handling sensitive operations server-side. -### Using client-side rendering to query a subgraph +### Using client-side rendering to query a Subgraph ![Client-side rendering](/img/api-key-client-side-rendering.png) From 12f166de6fda6957ba8994eaf69cfc5b7e6a59c3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:19:55 -0500 Subject: [PATCH 0652/1789] New translations secure-api-keys-nextjs.mdx (Czech) --- .../pages/cs/subgraphs/cookbook/secure-api-keys-nextjs.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/cs/subgraphs/cookbook/secure-api-keys-nextjs.mdx b/website/src/pages/cs/subgraphs/cookbook/secure-api-keys-nextjs.mdx index de502a0ed526..d311cfa5117e 100644 --- a/website/src/pages/cs/subgraphs/cookbook/secure-api-keys-nextjs.mdx +++ b/website/src/pages/cs/subgraphs/cookbook/secure-api-keys-nextjs.mdx @@ -4,9 +4,9 @@ title: Jak zabezpečit klíče API pomocí komponent serveru Next.js ## Přehled -K řádnému zabezpečení našeho klíče API před odhalením ve frontendu naší aplikace můžeme použít [komponenty serveru Next.js](https://nextjs.org/docs/app/building-your-application/rendering/server-components). Pro další zvýšení zabezpečení našeho klíče API můžeme také [omezit náš klíč API na určité podgrafy nebo domény v Podgraf Studio](/cookbook/upgrading-a-subgraph/#securing-your-api-key). +We can use [Next.js server components](https://nextjs.org/docs/app/building-your-application/rendering/server-components) to properly secure our API key from exposure in the frontend of our dapp. To further increase our API key security, we can also [restrict our API key to certain Subgraphs or domains in Subgraph Studio](/cookbook/upgrading-a-subgraph/#securing-your-api-key). -V této kuchařce probereme, jak vytvořit serverovou komponentu Next.js, která se dotazuje na podgraf a zároveň skrývá klíč API před frontend. +In this cookbook, we will go over how to create a Next.js server component that queries a Subgraph while also hiding the API key from the frontend. ### Upozornění @@ -18,7 +18,7 @@ V této kuchařce probereme, jak vytvořit serverovou komponentu Next.js, která Ve standardní aplikaci React mohou být klíče API obsažené v kódu frontendu vystaveny na straně klienta, což představuje bezpečnostní riziko. Soubory `.env` se sice běžně používají, ale plně klíče nechrání, protože kód Reactu se spouští na straně klienta a vystavuje klíč API v hlavičkách. Serverové komponenty Next.js tento problém řeší tím, že citlivé operace zpracovávají na straně serveru. -### Použití vykreslování na straně klienta k dotazování podgrafu +### Using client-side rendering to query a Subgraph ![Client-side rendering](/img/api-key-client-side-rendering.png) From 655a0fd04829d670815e4a212d775b5c9263cb44 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:19:56 -0500 Subject: [PATCH 0653/1789] New translations secure-api-keys-nextjs.mdx (German) --- .../pages/de/subgraphs/cookbook/secure-api-keys-nextjs.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/de/subgraphs/cookbook/secure-api-keys-nextjs.mdx b/website/src/pages/de/subgraphs/cookbook/secure-api-keys-nextjs.mdx index 4122439152b8..5ee0f292e2c7 100644 --- a/website/src/pages/de/subgraphs/cookbook/secure-api-keys-nextjs.mdx +++ b/website/src/pages/de/subgraphs/cookbook/secure-api-keys-nextjs.mdx @@ -4,9 +4,9 @@ title: How to Secure API Keys Using Next.js Server Components ## Überblick -We can use [Next.js server components](https://nextjs.org/docs/app/building-your-application/rendering/server-components) to properly secure our API key from exposure in the frontend of our dapp. To further increase our API key security, we can also [restrict our API key to certain subgraphs or domains in Subgraph Studio](/cookbook/upgrading-a-subgraph/#securing-your-api-key). +We can use [Next.js server components](https://nextjs.org/docs/app/building-your-application/rendering/server-components) to properly secure our API key from exposure in the frontend of our dapp. To further increase our API key security, we can also [restrict our API key to certain Subgraphs or domains in Subgraph Studio](/cookbook/upgrading-a-subgraph/#securing-your-api-key). -In this cookbook, we will go over how to create a Next.js server component that queries a subgraph while also hiding the API key from the frontend. +In this cookbook, we will go over how to create a Next.js server component that queries a Subgraph while also hiding the API key from the frontend. ### Caveats @@ -18,7 +18,7 @@ In this cookbook, we will go over how to create a Next.js server component that In a standard React application, API keys included in the frontend code can be exposed to the client-side, posing a security risk. While `.env` files are commonly used, they don't fully protect the keys since React's code is executed on the client side, exposing the API key in the headers. Next.js Server Components address this issue by handling sensitive operations server-side. -### Using client-side rendering to query a subgraph +### Using client-side rendering to query a Subgraph ![Client-side rendering](/img/api-key-client-side-rendering.png) From a6e2995574eb039b186e0a204120b18a40133c32 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:19:57 -0500 Subject: [PATCH 0654/1789] New translations secure-api-keys-nextjs.mdx (Italian) --- .../pages/it/subgraphs/cookbook/secure-api-keys-nextjs.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/it/subgraphs/cookbook/secure-api-keys-nextjs.mdx b/website/src/pages/it/subgraphs/cookbook/secure-api-keys-nextjs.mdx index fba106e6eaf6..b247912c90e6 100644 --- a/website/src/pages/it/subgraphs/cookbook/secure-api-keys-nextjs.mdx +++ b/website/src/pages/it/subgraphs/cookbook/secure-api-keys-nextjs.mdx @@ -4,9 +4,9 @@ title: How to Secure API Keys Using Next.js Server Components ## Panoramica -We can use [Next.js server components](https://nextjs.org/docs/app/building-your-application/rendering/server-components) to properly secure our API key from exposure in the frontend of our dapp. To further increase our API key security, we can also [restrict our API key to certain subgraphs or domains in Subgraph Studio](/cookbook/upgrading-a-subgraph/#securing-your-api-key). +We can use [Next.js server components](https://nextjs.org/docs/app/building-your-application/rendering/server-components) to properly secure our API key from exposure in the frontend of our dapp. To further increase our API key security, we can also [restrict our API key to certain Subgraphs or domains in Subgraph Studio](/cookbook/upgrading-a-subgraph/#securing-your-api-key). -In this cookbook, we will go over how to create a Next.js server component that queries a subgraph while also hiding the API key from the frontend. +In this cookbook, we will go over how to create a Next.js server component that queries a Subgraph while also hiding the API key from the frontend. ### Caveats @@ -18,7 +18,7 @@ In this cookbook, we will go over how to create a Next.js server component that In a standard React application, API keys included in the frontend code can be exposed to the client-side, posing a security risk. While `.env` files are commonly used, they don't fully protect the keys since React's code is executed on the client side, exposing the API key in the headers. Next.js Server Components address this issue by handling sensitive operations server-side. -### Using client-side rendering to query a subgraph +### Using client-side rendering to query a Subgraph ![Client-side rendering](/img/api-key-client-side-rendering.png) From e5462fda61f3b479ef857928fc42dce8ae13e6fc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:19:58 -0500 Subject: [PATCH 0655/1789] New translations secure-api-keys-nextjs.mdx (Japanese) --- .../pages/ja/subgraphs/cookbook/secure-api-keys-nextjs.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/ja/subgraphs/cookbook/secure-api-keys-nextjs.mdx b/website/src/pages/ja/subgraphs/cookbook/secure-api-keys-nextjs.mdx index bac42648b0fc..ead239aa93e1 100644 --- a/website/src/pages/ja/subgraphs/cookbook/secure-api-keys-nextjs.mdx +++ b/website/src/pages/ja/subgraphs/cookbook/secure-api-keys-nextjs.mdx @@ -4,9 +4,9 @@ title: How to Secure API Keys Using Next.js Server Components ## 概要 -We can use [Next.js server components](https://nextjs.org/docs/app/building-your-application/rendering/server-components) to properly secure our API key from exposure in the frontend of our dapp. To further increase our API key security, we can also [restrict our API key to certain subgraphs or domains in Subgraph Studio](/cookbook/upgrading-a-subgraph/#securing-your-api-key). +We can use [Next.js server components](https://nextjs.org/docs/app/building-your-application/rendering/server-components) to properly secure our API key from exposure in the frontend of our dapp. To further increase our API key security, we can also [restrict our API key to certain Subgraphs or domains in Subgraph Studio](/cookbook/upgrading-a-subgraph/#securing-your-api-key). -In this cookbook, we will go over how to create a Next.js server component that queries a subgraph while also hiding the API key from the frontend. +In this cookbook, we will go over how to create a Next.js server component that queries a Subgraph while also hiding the API key from the frontend. ### Caveats @@ -18,7 +18,7 @@ In this cookbook, we will go over how to create a Next.js server component that In a standard React application, API keys included in the frontend code can be exposed to the client-side, posing a security risk. While `.env` files are commonly used, they don't fully protect the keys since React's code is executed on the client side, exposing the API key in the headers. Next.js Server Components address this issue by handling sensitive operations server-side. -### Using client-side rendering to query a subgraph +### Using client-side rendering to query a Subgraph ![Client-side rendering](/img/api-key-client-side-rendering.png) From 242bfece51d03da7730863793c3044f9c5b15304 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:19:58 -0500 Subject: [PATCH 0656/1789] New translations secure-api-keys-nextjs.mdx (Korean) --- .../pages/ko/subgraphs/cookbook/secure-api-keys-nextjs.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/ko/subgraphs/cookbook/secure-api-keys-nextjs.mdx b/website/src/pages/ko/subgraphs/cookbook/secure-api-keys-nextjs.mdx index fc7e0ff52eb4..e17e594408ff 100644 --- a/website/src/pages/ko/subgraphs/cookbook/secure-api-keys-nextjs.mdx +++ b/website/src/pages/ko/subgraphs/cookbook/secure-api-keys-nextjs.mdx @@ -4,9 +4,9 @@ title: How to Secure API Keys Using Next.js Server Components ## Overview -We can use [Next.js server components](https://nextjs.org/docs/app/building-your-application/rendering/server-components) to properly secure our API key from exposure in the frontend of our dapp. To further increase our API key security, we can also [restrict our API key to certain subgraphs or domains in Subgraph Studio](/cookbook/upgrading-a-subgraph/#securing-your-api-key). +We can use [Next.js server components](https://nextjs.org/docs/app/building-your-application/rendering/server-components) to properly secure our API key from exposure in the frontend of our dapp. To further increase our API key security, we can also [restrict our API key to certain Subgraphs or domains in Subgraph Studio](/cookbook/upgrading-a-subgraph/#securing-your-api-key). -In this cookbook, we will go over how to create a Next.js server component that queries a subgraph while also hiding the API key from the frontend. +In this cookbook, we will go over how to create a Next.js server component that queries a Subgraph while also hiding the API key from the frontend. ### Caveats @@ -18,7 +18,7 @@ In this cookbook, we will go over how to create a Next.js server component that In a standard React application, API keys included in the frontend code can be exposed to the client-side, posing a security risk. While `.env` files are commonly used, they don't fully protect the keys since React's code is executed on the client side, exposing the API key in the headers. Next.js Server Components address this issue by handling sensitive operations server-side. -### Using client-side rendering to query a subgraph +### Using client-side rendering to query a Subgraph ![Client-side rendering](/img/api-key-client-side-rendering.png) From 7b5a394497e2a85ce98303560e4bf868c6965441 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:19:59 -0500 Subject: [PATCH 0657/1789] New translations secure-api-keys-nextjs.mdx (Dutch) --- .../pages/nl/subgraphs/cookbook/secure-api-keys-nextjs.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/nl/subgraphs/cookbook/secure-api-keys-nextjs.mdx b/website/src/pages/nl/subgraphs/cookbook/secure-api-keys-nextjs.mdx index fc7e0ff52eb4..e17e594408ff 100644 --- a/website/src/pages/nl/subgraphs/cookbook/secure-api-keys-nextjs.mdx +++ b/website/src/pages/nl/subgraphs/cookbook/secure-api-keys-nextjs.mdx @@ -4,9 +4,9 @@ title: How to Secure API Keys Using Next.js Server Components ## Overview -We can use [Next.js server components](https://nextjs.org/docs/app/building-your-application/rendering/server-components) to properly secure our API key from exposure in the frontend of our dapp. To further increase our API key security, we can also [restrict our API key to certain subgraphs or domains in Subgraph Studio](/cookbook/upgrading-a-subgraph/#securing-your-api-key). +We can use [Next.js server components](https://nextjs.org/docs/app/building-your-application/rendering/server-components) to properly secure our API key from exposure in the frontend of our dapp. To further increase our API key security, we can also [restrict our API key to certain Subgraphs or domains in Subgraph Studio](/cookbook/upgrading-a-subgraph/#securing-your-api-key). -In this cookbook, we will go over how to create a Next.js server component that queries a subgraph while also hiding the API key from the frontend. +In this cookbook, we will go over how to create a Next.js server component that queries a Subgraph while also hiding the API key from the frontend. ### Caveats @@ -18,7 +18,7 @@ In this cookbook, we will go over how to create a Next.js server component that In a standard React application, API keys included in the frontend code can be exposed to the client-side, posing a security risk. While `.env` files are commonly used, they don't fully protect the keys since React's code is executed on the client side, exposing the API key in the headers. Next.js Server Components address this issue by handling sensitive operations server-side. -### Using client-side rendering to query a subgraph +### Using client-side rendering to query a Subgraph ![Client-side rendering](/img/api-key-client-side-rendering.png) From 6a90163f8526638d4699e43190e43568332a5904 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:00 -0500 Subject: [PATCH 0658/1789] New translations secure-api-keys-nextjs.mdx (Polish) --- .../pages/pl/subgraphs/cookbook/secure-api-keys-nextjs.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/pl/subgraphs/cookbook/secure-api-keys-nextjs.mdx b/website/src/pages/pl/subgraphs/cookbook/secure-api-keys-nextjs.mdx index fc7e0ff52eb4..e17e594408ff 100644 --- a/website/src/pages/pl/subgraphs/cookbook/secure-api-keys-nextjs.mdx +++ b/website/src/pages/pl/subgraphs/cookbook/secure-api-keys-nextjs.mdx @@ -4,9 +4,9 @@ title: How to Secure API Keys Using Next.js Server Components ## Overview -We can use [Next.js server components](https://nextjs.org/docs/app/building-your-application/rendering/server-components) to properly secure our API key from exposure in the frontend of our dapp. To further increase our API key security, we can also [restrict our API key to certain subgraphs or domains in Subgraph Studio](/cookbook/upgrading-a-subgraph/#securing-your-api-key). +We can use [Next.js server components](https://nextjs.org/docs/app/building-your-application/rendering/server-components) to properly secure our API key from exposure in the frontend of our dapp. To further increase our API key security, we can also [restrict our API key to certain Subgraphs or domains in Subgraph Studio](/cookbook/upgrading-a-subgraph/#securing-your-api-key). -In this cookbook, we will go over how to create a Next.js server component that queries a subgraph while also hiding the API key from the frontend. +In this cookbook, we will go over how to create a Next.js server component that queries a Subgraph while also hiding the API key from the frontend. ### Caveats @@ -18,7 +18,7 @@ In this cookbook, we will go over how to create a Next.js server component that In a standard React application, API keys included in the frontend code can be exposed to the client-side, posing a security risk. While `.env` files are commonly used, they don't fully protect the keys since React's code is executed on the client side, exposing the API key in the headers. Next.js Server Components address this issue by handling sensitive operations server-side. -### Using client-side rendering to query a subgraph +### Using client-side rendering to query a Subgraph ![Client-side rendering](/img/api-key-client-side-rendering.png) From 45fce48cd9c233ff7cc7f697e04bc1d4420b81a8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:01 -0500 Subject: [PATCH 0659/1789] New translations secure-api-keys-nextjs.mdx (Portuguese) --- .../pages/pt/subgraphs/cookbook/secure-api-keys-nextjs.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/pt/subgraphs/cookbook/secure-api-keys-nextjs.mdx b/website/src/pages/pt/subgraphs/cookbook/secure-api-keys-nextjs.mdx index 768ee1418880..c6fcc17c7caa 100644 --- a/website/src/pages/pt/subgraphs/cookbook/secure-api-keys-nextjs.mdx +++ b/website/src/pages/pt/subgraphs/cookbook/secure-api-keys-nextjs.mdx @@ -4,9 +4,9 @@ title: Como Proteger Chaves de API com Componentes do Servidor Next.js ## Visão geral -Podemos proteger a nossa chave API no frontend do nosso dApp com [componentes do servidor Next.js](https://nextjs.org/docs/app/building-your-application/rendering/server-components). Para ainda mais segurança, também podemos [restringir a nossa chave API a certos domínios ou subgraphs no Subgraph Studio](/cookbook/upgrading-a-subgraph/#securing-your-api-key). +We can use [Next.js server components](https://nextjs.org/docs/app/building-your-application/rendering/server-components) to properly secure our API key from exposure in the frontend of our dapp. To further increase our API key security, we can also [restrict our API key to certain Subgraphs or domains in Subgraph Studio](/cookbook/upgrading-a-subgraph/#securing-your-api-key). -Neste manual, veremos como criar um componente de servidor Next.js que faz queries em um subgraph enquanto esconde a chave API do frontend. +In this cookbook, we will go over how to create a Next.js server component that queries a Subgraph while also hiding the API key from the frontend. ### Porém... @@ -18,7 +18,7 @@ Neste manual, veremos como criar um componente de servidor Next.js que faz queri Num aplicativo de React normal, chaves API incluídas no código do frontend podem ser expostas ao lado do cliente, o que apresenta um risco de segurança. É normal o uso de arquivos `.env`, mas estes não protegem as chaves por completo, já que o código do React é executado no lado do cliente (client-side), o que expõe a chave API nos headers. Os componentes do servidor Next.js abordam este problema via a execução de operações sensíveis server-side. -### Como usar renderização client-side para fazer queries em um subgraph +### Using client-side rendering to query a Subgraph ![Renderização client-side](/img/api-key-client-side-rendering.png) From a90b7d3dcdf28ed8a0adc10cdc958167c7eceb53 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:02 -0500 Subject: [PATCH 0660/1789] New translations secure-api-keys-nextjs.mdx (Russian) --- .../pages/ru/subgraphs/cookbook/secure-api-keys-nextjs.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/ru/subgraphs/cookbook/secure-api-keys-nextjs.mdx b/website/src/pages/ru/subgraphs/cookbook/secure-api-keys-nextjs.mdx index 963188b67823..02d4c64db86e 100644 --- a/website/src/pages/ru/subgraphs/cookbook/secure-api-keys-nextjs.mdx +++ b/website/src/pages/ru/subgraphs/cookbook/secure-api-keys-nextjs.mdx @@ -4,9 +4,9 @@ title: Как обезопасить API-ключи с использовани ## Обзор -Мы можем использовать [серверные компоненты Next.js](https://nextjs.org/docs/app/building-your-application/rendering/server-components), чтобы должным образом защитить наш API-ключ от взлома во внешнем интерфейсе нашего децентрализованного приложения (dapp). Чтобы дополнительно повысить безопасность API-ключа, мы также можем [ограничить доступ к нашему API-ключу для определённых субграфов или доменов в Subgraph Studio](/cookbook/upgrading-a-subgraph/#securing-your-api-key). +We can use [Next.js server components](https://nextjs.org/docs/app/building-your-application/rendering/server-components) to properly secure our API key from exposure in the frontend of our dapp. To further increase our API key security, we can also [restrict our API key to certain Subgraphs or domains in Subgraph Studio](/cookbook/upgrading-a-subgraph/#securing-your-api-key). -В этом руководстве мы рассмотрим, как создать серверный компонент Next.js, который запрашивает субграф, одновременно скрывая API-ключ от фронтенда. +In this cookbook, we will go over how to create a Next.js server component that queries a Subgraph while also hiding the API key from the frontend. ### Предостережения @@ -18,7 +18,7 @@ title: Как обезопасить API-ключи с использовани В стандартном React-приложении API-ключи, включённые в код внешнего интерфейса, могут быть раскрыты на стороне клиента, что созает угрозу безопасности. Хотя обычно используются файлы `.env`, они не обеспечивают полной защиты ключей, так как код React выполняется на стороне клиента, раскрывая API-ключ в заголовках. Серверные компоненты Next.js решают эту проблему, обрабатывая конфиденциальные операции на сервере. -### Использование рендеринга на клиентской стороне для запроса к субграфу +### Using client-side rendering to query a Subgraph ![Рендеринг на клиентской стороне](/img/api-key-client-side-rendering.png) From 7854c1c4d6c89f4a13cda94c2a8e3f30b459d96b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:03 -0500 Subject: [PATCH 0661/1789] New translations secure-api-keys-nextjs.mdx (Swedish) --- .../pages/sv/subgraphs/cookbook/secure-api-keys-nextjs.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/sv/subgraphs/cookbook/secure-api-keys-nextjs.mdx b/website/src/pages/sv/subgraphs/cookbook/secure-api-keys-nextjs.mdx index a9e82a6baa72..f90b30ccdd8c 100644 --- a/website/src/pages/sv/subgraphs/cookbook/secure-api-keys-nextjs.mdx +++ b/website/src/pages/sv/subgraphs/cookbook/secure-api-keys-nextjs.mdx @@ -4,9 +4,9 @@ title: How to Secure API Keys Using Next.js Server Components ## Översikt -We can use [Next.js server components](https://nextjs.org/docs/app/building-your-application/rendering/server-components) to properly secure our API key from exposure in the frontend of our dapp. To further increase our API key security, we can also [restrict our API key to certain subgraphs or domains in Subgraph Studio](/cookbook/upgrading-a-subgraph/#securing-your-api-key). +We can use [Next.js server components](https://nextjs.org/docs/app/building-your-application/rendering/server-components) to properly secure our API key from exposure in the frontend of our dapp. To further increase our API key security, we can also [restrict our API key to certain Subgraphs or domains in Subgraph Studio](/cookbook/upgrading-a-subgraph/#securing-your-api-key). -In this cookbook, we will go over how to create a Next.js server component that queries a subgraph while also hiding the API key from the frontend. +In this cookbook, we will go over how to create a Next.js server component that queries a Subgraph while also hiding the API key from the frontend. ### Caveats @@ -18,7 +18,7 @@ In this cookbook, we will go over how to create a Next.js server component that In a standard React application, API keys included in the frontend code can be exposed to the client-side, posing a security risk. While `.env` files are commonly used, they don't fully protect the keys since React's code is executed on the client side, exposing the API key in the headers. Next.js Server Components address this issue by handling sensitive operations server-side. -### Using client-side rendering to query a subgraph +### Using client-side rendering to query a Subgraph ![Client-side rendering](/img/api-key-client-side-rendering.png) From c0dee7530e371daab6adec47b7ff63c9060b4abd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:04 -0500 Subject: [PATCH 0662/1789] New translations secure-api-keys-nextjs.mdx (Turkish) --- .../pages/tr/subgraphs/cookbook/secure-api-keys-nextjs.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/tr/subgraphs/cookbook/secure-api-keys-nextjs.mdx b/website/src/pages/tr/subgraphs/cookbook/secure-api-keys-nextjs.mdx index 50a3741afa22..b93a81626d72 100644 --- a/website/src/pages/tr/subgraphs/cookbook/secure-api-keys-nextjs.mdx +++ b/website/src/pages/tr/subgraphs/cookbook/secure-api-keys-nextjs.mdx @@ -4,9 +4,9 @@ title: Next.js Sunucu Bileşenlerini Kullanarak API Anahtarları Nasıl Güvenli ## Genel Bakış -API anahtarımızı dapp'imizin ön yüzünde açığa çıkmasını düzgün bir şekilde engellemek için [Next.js sunucu bileşenlerini](https://nextjs.org/docs/app/building-your-application/rendering/server-components) kullanabiliriz. API anahtarımızın güvenliğini daha da artırmak için, ayrıca [API anahtarımızı belirli subgraph'lar veya Subgraph Studio'daki alanlarla sınırlandırabiliriz](/cookbook/upgrading-a-subgraph/#securing-your-api-key). +We can use [Next.js server components](https://nextjs.org/docs/app/building-your-application/rendering/server-components) to properly secure our API key from exposure in the frontend of our dapp. To further increase our API key security, we can also [restrict our API key to certain Subgraphs or domains in Subgraph Studio](/cookbook/upgrading-a-subgraph/#securing-your-api-key). -Bu talimatlarda, bir subgraph'i sorgularken aynı zamanda API anahtarını ön yüzden gizleyen bir Next.js sunucu bileşeni oluşturmayı ele alacağız. +In this cookbook, we will go over how to create a Next.js server component that queries a Subgraph while also hiding the API key from the frontend. ### Kısıtlamalar @@ -18,7 +18,7 @@ Bu talimatlarda, bir subgraph'i sorgularken aynı zamanda API anahtarını ön y Standart bir React uygulamasında, ön yüz koduna dahil edilen API anahtarları istemci tarafında açığa çıkabilir ve güvenlik riski oluşturabilir. `.env` dosyaları yaygın olarak kullanılsa da React kodu istemci tarafında çalıştığı için anahtarları tam olarak korumazlar ve API anahtarı başlıklarda açığa çıkar. Next.js Sunucu Bileşenleri bu sorunu, hassas işlemleri sunucu tarafında yürüterek çözer. -### Bir subgraph'i sorgulamak için istemci tarafında işleme (render) +### Using client-side rendering to query a Subgraph ![İstemci tarafında işleme](/img/api-key-client-side-rendering.png) From 97d797869024ba28ed21488d75cd5d28057fe691 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:05 -0500 Subject: [PATCH 0663/1789] New translations secure-api-keys-nextjs.mdx (Ukrainian) --- .../pages/uk/subgraphs/cookbook/secure-api-keys-nextjs.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/uk/subgraphs/cookbook/secure-api-keys-nextjs.mdx b/website/src/pages/uk/subgraphs/cookbook/secure-api-keys-nextjs.mdx index fc7e0ff52eb4..e17e594408ff 100644 --- a/website/src/pages/uk/subgraphs/cookbook/secure-api-keys-nextjs.mdx +++ b/website/src/pages/uk/subgraphs/cookbook/secure-api-keys-nextjs.mdx @@ -4,9 +4,9 @@ title: How to Secure API Keys Using Next.js Server Components ## Overview -We can use [Next.js server components](https://nextjs.org/docs/app/building-your-application/rendering/server-components) to properly secure our API key from exposure in the frontend of our dapp. To further increase our API key security, we can also [restrict our API key to certain subgraphs or domains in Subgraph Studio](/cookbook/upgrading-a-subgraph/#securing-your-api-key). +We can use [Next.js server components](https://nextjs.org/docs/app/building-your-application/rendering/server-components) to properly secure our API key from exposure in the frontend of our dapp. To further increase our API key security, we can also [restrict our API key to certain Subgraphs or domains in Subgraph Studio](/cookbook/upgrading-a-subgraph/#securing-your-api-key). -In this cookbook, we will go over how to create a Next.js server component that queries a subgraph while also hiding the API key from the frontend. +In this cookbook, we will go over how to create a Next.js server component that queries a Subgraph while also hiding the API key from the frontend. ### Caveats @@ -18,7 +18,7 @@ In this cookbook, we will go over how to create a Next.js server component that In a standard React application, API keys included in the frontend code can be exposed to the client-side, posing a security risk. While `.env` files are commonly used, they don't fully protect the keys since React's code is executed on the client side, exposing the API key in the headers. Next.js Server Components address this issue by handling sensitive operations server-side. -### Using client-side rendering to query a subgraph +### Using client-side rendering to query a Subgraph ![Client-side rendering](/img/api-key-client-side-rendering.png) From a823887ffb890d84e41a59d9a1388067cee103e9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:06 -0500 Subject: [PATCH 0664/1789] New translations secure-api-keys-nextjs.mdx (Chinese Simplified) --- .../zh/subgraphs/cookbook/secure-api-keys-nextjs.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/zh/subgraphs/cookbook/secure-api-keys-nextjs.mdx b/website/src/pages/zh/subgraphs/cookbook/secure-api-keys-nextjs.mdx index ae2201109356..e17e594408ff 100644 --- a/website/src/pages/zh/subgraphs/cookbook/secure-api-keys-nextjs.mdx +++ b/website/src/pages/zh/subgraphs/cookbook/secure-api-keys-nextjs.mdx @@ -2,11 +2,11 @@ title: How to Secure API Keys Using Next.js Server Components --- -## 概述 +## Overview -We can use [Next.js server components](https://nextjs.org/docs/app/building-your-application/rendering/server-components) to properly secure our API key from exposure in the frontend of our dapp. To further increase our API key security, we can also [restrict our API key to certain subgraphs or domains in Subgraph Studio](/cookbook/upgrading-a-subgraph/#securing-your-api-key). +We can use [Next.js server components](https://nextjs.org/docs/app/building-your-application/rendering/server-components) to properly secure our API key from exposure in the frontend of our dapp. To further increase our API key security, we can also [restrict our API key to certain Subgraphs or domains in Subgraph Studio](/cookbook/upgrading-a-subgraph/#securing-your-api-key). -In this cookbook, we will go over how to create a Next.js server component that queries a subgraph while also hiding the API key from the frontend. +In this cookbook, we will go over how to create a Next.js server component that queries a Subgraph while also hiding the API key from the frontend. ### Caveats @@ -18,7 +18,7 @@ In this cookbook, we will go over how to create a Next.js server component that In a standard React application, API keys included in the frontend code can be exposed to the client-side, posing a security risk. While `.env` files are commonly used, they don't fully protect the keys since React's code is executed on the client side, exposing the API key in the headers. Next.js Server Components address this issue by handling sensitive operations server-side. -### Using client-side rendering to query a subgraph +### Using client-side rendering to query a Subgraph ![Client-side rendering](/img/api-key-client-side-rendering.png) From 7d8f8702eb14f909d3d6a8bae291eb661717ced1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:06 -0500 Subject: [PATCH 0665/1789] New translations secure-api-keys-nextjs.mdx (Urdu (Pakistan)) --- .../pages/ur/subgraphs/cookbook/secure-api-keys-nextjs.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/ur/subgraphs/cookbook/secure-api-keys-nextjs.mdx b/website/src/pages/ur/subgraphs/cookbook/secure-api-keys-nextjs.mdx index 132c4cd07884..2ef4a2ac97fd 100644 --- a/website/src/pages/ur/subgraphs/cookbook/secure-api-keys-nextjs.mdx +++ b/website/src/pages/ur/subgraphs/cookbook/secure-api-keys-nextjs.mdx @@ -4,9 +4,9 @@ title: How to Secure API Keys Using Next.js Server Components ## جائزہ -We can use [Next.js server components](https://nextjs.org/docs/app/building-your-application/rendering/server-components) to properly secure our API key from exposure in the frontend of our dapp. To further increase our API key security, we can also [restrict our API key to certain subgraphs or domains in Subgraph Studio](/cookbook/upgrading-a-subgraph/#securing-your-api-key). +We can use [Next.js server components](https://nextjs.org/docs/app/building-your-application/rendering/server-components) to properly secure our API key from exposure in the frontend of our dapp. To further increase our API key security, we can also [restrict our API key to certain Subgraphs or domains in Subgraph Studio](/cookbook/upgrading-a-subgraph/#securing-your-api-key). -In this cookbook, we will go over how to create a Next.js server component that queries a subgraph while also hiding the API key from the frontend. +In this cookbook, we will go over how to create a Next.js server component that queries a Subgraph while also hiding the API key from the frontend. ### Caveats @@ -18,7 +18,7 @@ In this cookbook, we will go over how to create a Next.js server component that In a standard React application, API keys included in the frontend code can be exposed to the client-side, posing a security risk. While `.env` files are commonly used, they don't fully protect the keys since React's code is executed on the client side, exposing the API key in the headers. Next.js Server Components address this issue by handling sensitive operations server-side. -### Using client-side rendering to query a subgraph +### Using client-side rendering to query a Subgraph ![Client-side rendering](/img/api-key-client-side-rendering.png) From 1583c57842483d06365ae015dd10b45a3a6dd380 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:07 -0500 Subject: [PATCH 0666/1789] New translations secure-api-keys-nextjs.mdx (Vietnamese) --- .../pages/vi/subgraphs/cookbook/secure-api-keys-nextjs.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/vi/subgraphs/cookbook/secure-api-keys-nextjs.mdx b/website/src/pages/vi/subgraphs/cookbook/secure-api-keys-nextjs.mdx index e83414fea5e5..4940d09d815a 100644 --- a/website/src/pages/vi/subgraphs/cookbook/secure-api-keys-nextjs.mdx +++ b/website/src/pages/vi/subgraphs/cookbook/secure-api-keys-nextjs.mdx @@ -4,9 +4,9 @@ title: How to Secure API Keys Using Next.js Server Components ## Tổng quan -We can use [Next.js server components](https://nextjs.org/docs/app/building-your-application/rendering/server-components) to properly secure our API key from exposure in the frontend of our dapp. To further increase our API key security, we can also [restrict our API key to certain subgraphs or domains in Subgraph Studio](/cookbook/upgrading-a-subgraph/#securing-your-api-key). +We can use [Next.js server components](https://nextjs.org/docs/app/building-your-application/rendering/server-components) to properly secure our API key from exposure in the frontend of our dapp. To further increase our API key security, we can also [restrict our API key to certain Subgraphs or domains in Subgraph Studio](/cookbook/upgrading-a-subgraph/#securing-your-api-key). -In this cookbook, we will go over how to create a Next.js server component that queries a subgraph while also hiding the API key from the frontend. +In this cookbook, we will go over how to create a Next.js server component that queries a Subgraph while also hiding the API key from the frontend. ### Caveats @@ -18,7 +18,7 @@ In this cookbook, we will go over how to create a Next.js server component that In a standard React application, API keys included in the frontend code can be exposed to the client-side, posing a security risk. While `.env` files are commonly used, they don't fully protect the keys since React's code is executed on the client side, exposing the API key in the headers. Next.js Server Components address this issue by handling sensitive operations server-side. -### Using client-side rendering to query a subgraph +### Using client-side rendering to query a Subgraph ![Client-side rendering](/img/api-key-client-side-rendering.png) From f42c7e66e4c1ba3371b78619c5f68909f85ca4f3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:08 -0500 Subject: [PATCH 0667/1789] New translations secure-api-keys-nextjs.mdx (Marathi) --- .../pages/mr/subgraphs/cookbook/secure-api-keys-nextjs.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/mr/subgraphs/cookbook/secure-api-keys-nextjs.mdx b/website/src/pages/mr/subgraphs/cookbook/secure-api-keys-nextjs.mdx index d5ff1b146dfd..b6b043fa29f1 100644 --- a/website/src/pages/mr/subgraphs/cookbook/secure-api-keys-nextjs.mdx +++ b/website/src/pages/mr/subgraphs/cookbook/secure-api-keys-nextjs.mdx @@ -4,9 +4,9 @@ title: How to Secure API Keys Using Next.js Server Components ## सविश्लेषण -We can use [Next.js server components](https://nextjs.org/docs/app/building-your-application/rendering/server-components) to properly secure our API key from exposure in the frontend of our dapp. To further increase our API key security, we can also [restrict our API key to certain subgraphs or domains in Subgraph Studio](/cookbook/upgrading-a-subgraph/#securing-your-api-key). +We can use [Next.js server components](https://nextjs.org/docs/app/building-your-application/rendering/server-components) to properly secure our API key from exposure in the frontend of our dapp. To further increase our API key security, we can also [restrict our API key to certain Subgraphs or domains in Subgraph Studio](/cookbook/upgrading-a-subgraph/#securing-your-api-key). -In this cookbook, we will go over how to create a Next.js server component that queries a subgraph while also hiding the API key from the frontend. +In this cookbook, we will go over how to create a Next.js server component that queries a Subgraph while also hiding the API key from the frontend. ### Caveats @@ -18,7 +18,7 @@ In this cookbook, we will go over how to create a Next.js server component that In a standard React application, API keys included in the frontend code can be exposed to the client-side, posing a security risk. While `.env` files are commonly used, they don't fully protect the keys since React's code is executed on the client side, exposing the API key in the headers. Next.js Server Components address this issue by handling sensitive operations server-side. -### Using client-side rendering to query a subgraph +### Using client-side rendering to query a Subgraph ![Client-side rendering](/img/api-key-client-side-rendering.png) From 4d1fe8a67f8973ed6bb8183820a225a9d81b3a97 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:09 -0500 Subject: [PATCH 0668/1789] New translations secure-api-keys-nextjs.mdx (Hindi) --- .../hi/subgraphs/cookbook/secure-api-keys-nextjs.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/hi/subgraphs/cookbook/secure-api-keys-nextjs.mdx b/website/src/pages/hi/subgraphs/cookbook/secure-api-keys-nextjs.mdx index 4e690b3b4f7e..92208080848c 100644 --- a/website/src/pages/hi/subgraphs/cookbook/secure-api-keys-nextjs.mdx +++ b/website/src/pages/hi/subgraphs/cookbook/secure-api-keys-nextjs.mdx @@ -4,9 +4,9 @@ title: कैसे सुरक्षित करें API Keys का उप ## अवलोकन -हम Next.js server components(https://nextjs.org/docs/app/building-your-application/rendering/server-components) का उपयोग करके अपने dapp के frontend में API key को exposure से सुरक्षित रख सकते हैं। API key की सुरक्षा को और बढ़ाने के लिए, हम Subgraph Studio में अपनी API key को कुछ subgraphs या domains तक सीमित कर सकते हैं(/cookbook/upgrading-a-subgraph/#securing-your-api-key) +We can use [Next.js server components](https://nextjs.org/docs/app/building-your-application/rendering/server-components) to properly secure our API key from exposure in the frontend of our dapp. To further increase our API key security, we can also [restrict our API key to certain Subgraphs or domains in Subgraph Studio](/cookbook/upgrading-a-subgraph/#securing-your-api-key). -इस cookbook में, हम यह समझेंगे कि कैसे एक Next.js server component बनाया जाए जो subgraph से query करता है, साथ ही API key को frontend से छिपाने का तरीका भी शामिल है। +In this cookbook, we will go over how to create a Next.js server component that queries a Subgraph while also hiding the API key from the frontend. ### चेतावनी @@ -18,13 +18,13 @@ title: कैसे सुरक्षित करें API Keys का उप एक मानक React एप्लिकेशन में, फ्रंटेंड कोड में शामिल API कुंजियाँ क्लाइंट-साइड पर उजागर हो सकती हैं, जिससे सुरक्षा का जोखिम बढ़ता है। जबकि.env फ़ाइलें सामान्यत: उपयोग की जाती हैं, ये कुंजियों की पूरी सुरक्षा नहीं करतीं क्योंकि React का कोड क्लाइंट साइड पर निष्पादित होता है, जो API कुंजी को हेडर में उजागर करता है। Next.js सर्वर घटक इस मुद्दे का समाधान करते हैं द्वारा संवेदनशील कार्यों को सर्वर-साइड पर संभालना। -### क्लाइंट-साइड रेंडरिंग का उपयोग करके एक subgraph को क्वेरी करना +### Using client-side rendering to query a Subgraph ![Client-side rendering](/img/api-key-client-side-rendering.png) ### Prerequisites -- [Subgraph Studio](https://thegraph.com/studio) से एक API कुंजी +- [Subgraph Studio](https://thegraph.com/studio) से एक API कुंजी - Next.js और React का बुनियादी ज्ञान - एक मौजूदा Next.js प्रोजेक्ट जो App Router (https://nextjs.org/docs/app). का उपयोग करता है। From c308bd055558218fd164e0ffee5d3890ccec83f0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:10 -0500 Subject: [PATCH 0669/1789] New translations secure-api-keys-nextjs.mdx (Swahili) --- .../cookbook/secure-api-keys-nextjs.mdx | 123 ++++++++++++++++++ 1 file changed, 123 insertions(+) create mode 100644 website/src/pages/sw/subgraphs/cookbook/secure-api-keys-nextjs.mdx diff --git a/website/src/pages/sw/subgraphs/cookbook/secure-api-keys-nextjs.mdx b/website/src/pages/sw/subgraphs/cookbook/secure-api-keys-nextjs.mdx new file mode 100644 index 000000000000..e17e594408ff --- /dev/null +++ b/website/src/pages/sw/subgraphs/cookbook/secure-api-keys-nextjs.mdx @@ -0,0 +1,123 @@ +--- +title: How to Secure API Keys Using Next.js Server Components +--- + +## Overview + +We can use [Next.js server components](https://nextjs.org/docs/app/building-your-application/rendering/server-components) to properly secure our API key from exposure in the frontend of our dapp. To further increase our API key security, we can also [restrict our API key to certain Subgraphs or domains in Subgraph Studio](/cookbook/upgrading-a-subgraph/#securing-your-api-key). + +In this cookbook, we will go over how to create a Next.js server component that queries a Subgraph while also hiding the API key from the frontend. + +### Caveats + +- Next.js server components do not protect API keys from being drained using denial of service attacks. +- The Graph Network gateways have denial of service detection and mitigation strategies in place, however using server components may weaken these protections. +- Next.js server components introduce centralization risks as the server can go down. + +### Why It's Needed + +In a standard React application, API keys included in the frontend code can be exposed to the client-side, posing a security risk. While `.env` files are commonly used, they don't fully protect the keys since React's code is executed on the client side, exposing the API key in the headers. Next.js Server Components address this issue by handling sensitive operations server-side. + +### Using client-side rendering to query a Subgraph + +![Client-side rendering](/img/api-key-client-side-rendering.png) + +### Prerequisites + +- An API key from [Subgraph Studio](https://thegraph.com/studio) +- Basic knowledge of Next.js and React. +- An existing Next.js project that uses the [App Router](https://nextjs.org/docs/app). + +## Step-by-Step Cookbook + +### Step 1: Set Up Environment Variables + +1. In our Next.js project root, create a `.env.local` file. +2. Add our API key: `API_KEY=`. + +### Step 2: Create a Server Component + +1. In our `components` directory, create a new file, `ServerComponent.js`. +2. Use the provided example code to set up the server component. + +### Step 3: Implement Server-Side API Request + +In `ServerComponent.js`, add the following code: + +```javascript +const API_KEY = process.env.API_KEY + +export default async function ServerComponent() { + const response = await fetch( + `https://gateway-arbitrum.network.thegraph.com/api/${API_KEY}/subgraphs/id/HUZDsRpEVP2AvzDCyzDHtdc64dyDxx8FQjzsmqSg4H3B`, + { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify({ + query: /* GraphQL */ ` + { + factories(first: 5) { + id + poolCount + txCount + totalVolumeUSD + } + } + `, + }), + }, + ) + + const responseData = await response.json() + const data = responseData.data + + return ( +
+

Server Component

+ {data ? ( +
    + {data.factories.map((factory) => ( +
  • +

    ID: {factory.id}

    +

    Pool Count: {factory.poolCount}

    +

    Transaction Count: {factory.txCount}

    +

    Total Volume USD: {factory.totalVolumeUSD}

    +
  • + ))} +
+ ) : ( +

Loading data...

+ )} +
+ ) +} +``` + +### Step 4: Use the Server Component + +1. In our page file (e.g., `pages/index.js`), import `ServerComponent`. +2. Render the component: + +```javascript +import ServerComponent from './components/ServerComponent' + +export default function Home() { + return ( +
+ +
+ ) +} +``` + +### Step 5: Run and Test Our Dapp + +Start our Next.js application using `npm run dev`. Verify that the server component is fetching data without exposing the API key. + +![Server-side rendering](/img/api-key-server-side-rendering.png) + +### Conclusion + +By utilizing Next.js Server Components, we've effectively hidden the API key from the client-side, enhancing the security of our application. This method ensures that sensitive operations are handled server-side, away from potential client-side vulnerabilities. Finally, be sure to explore [other API key security measures](/subgraphs/querying/managing-api-keys/) to increase your API key security even further. From ec471f185db76d3f7794b23899acd780c10ad313 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:11 -0500 Subject: [PATCH 0670/1789] New translations subgraph-debug-forking.mdx (Romanian) --- .../cookbook/subgraph-debug-forking.mdx | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/website/src/pages/ro/subgraphs/cookbook/subgraph-debug-forking.mdx b/website/src/pages/ro/subgraphs/cookbook/subgraph-debug-forking.mdx index 6610f19da66d..91aa7484d2ec 100644 --- a/website/src/pages/ro/subgraphs/cookbook/subgraph-debug-forking.mdx +++ b/website/src/pages/ro/subgraphs/cookbook/subgraph-debug-forking.mdx @@ -2,23 +2,23 @@ title: Quick and Easy Subgraph Debugging Using Forks --- -As with many systems processing large amounts of data, The Graph's Indexers (Graph Nodes) may take quite some time to sync-up your subgraph with the target blockchain. The discrepancy between quick changes with the purpose of debugging and long wait times needed for indexing is extremely counterproductive and we are well aware of that. This is why we are introducing **subgraph forking**, developed by [LimeChain](https://limechain.tech/), and in this article I will show you how this feature can be used to substantially speed-up subgraph debugging! +As with many systems processing large amounts of data, The Graph's Indexers (Graph Nodes) may take quite some time to sync-up your Subgraph with the target blockchain. The discrepancy between quick changes with the purpose of debugging and long wait times needed for indexing is extremely counterproductive and we are well aware of that. This is why we are introducing **Subgraph forking**, developed by [LimeChain](https://limechain.tech/), and in this article I will show you how this feature can be used to substantially speed-up Subgraph debugging! ## Ok, what is it? -**Subgraph forking** is the process of lazily fetching entities from _another_ subgraph's store (usually a remote one). +**Subgraph forking** is the process of lazily fetching entities from _another_ Subgraph's store (usually a remote one). -In the context of debugging, **subgraph forking** allows you to debug your failed subgraph at block _X_ without needing to wait to sync-up to block _X_. +In the context of debugging, **Subgraph forking** allows you to debug your failed Subgraph at block _X_ without needing to wait to sync-up to block _X_. ## What?! How? -When you deploy a subgraph to a remote Graph Node for indexing and it fails at block _X_, the good news is that the Graph Node will still serve GraphQL queries using its store, which is synced-up to block _X_. That's great! This means we can take advantage of this "up-to-date" store to fix the bugs arising when indexing block _X_. +When you deploy a Subgraph to a remote Graph Node for indexing and it fails at block _X_, the good news is that the Graph Node will still serve GraphQL queries using its store, which is synced-up to block _X_. That's great! This means we can take advantage of this "up-to-date" store to fix the bugs arising when indexing block _X_. -In a nutshell, we are going to _fork the failing subgraph_ from a remote Graph Node that is guaranteed to have the subgraph indexed up to block _X_ in order to provide the locally deployed subgraph being debugged at block _X_ an up-to-date view of the indexing state. +In a nutshell, we are going to _fork the failing Subgraph_ from a remote Graph Node that is guaranteed to have the Subgraph indexed up to block _X_ in order to provide the locally deployed Subgraph being debugged at block _X_ an up-to-date view of the indexing state. ## Please, show me some code! -To stay focused on subgraph debugging, let's keep things simple and run along with the [example-subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexing the Ethereum Gravity smart contract. +To stay focused on Subgraph debugging, let's keep things simple and run along with the [example-Subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexing the Ethereum Gravity smart contract. Here are the handlers defined for indexing `Gravatar`s, with no bugs whatsoever: @@ -44,22 +44,22 @@ export function handleUpdatedGravatar(event: UpdatedGravatar): void { } ``` -Oops, how unfortunate, when I deploy my perfect looking subgraph to [Subgraph Studio](https://thegraph.com/studio/) it fails with the _"Gravatar not found!"_ error. +Oops, how unfortunate, when I deploy my perfect looking Subgraph to [Subgraph Studio](https://thegraph.com/studio/) it fails with the _"Gravatar not found!"_ error. The usual way to attempt a fix is: 1. Make a change in the mappings source, which you believe will solve the issue (while I know it won't). -2. Re-deploy the subgraph to [Subgraph Studio](https://thegraph.com/studio/) (or another remote Graph Node). +2. Re-deploy the Subgraph to [Subgraph Studio](https://thegraph.com/studio/) (or another remote Graph Node). 3. Wait for it to sync-up. 4. If it breaks again go back to 1, otherwise: Hooray! It is indeed pretty familiar to an ordinary debug process, but there is one step that horribly slows down the process: _3. Wait for it to sync-up._ -Using **subgraph forking** we can essentially eliminate this step. Here is how it looks: +Using **Subgraph forking** we can essentially eliminate this step. Here is how it looks: 0. Spin-up a local Graph Node with the **_appropriate fork-base_** set. 1. Make a change in the mappings source, which you believe will solve the issue. -2. Deploy to the local Graph Node, **_forking the failing subgraph_** and **_starting from the problematic block_**. +2. Deploy to the local Graph Node, **_forking the failing Subgraph_** and **_starting from the problematic block_**. 3. If it breaks again, go back to 1, otherwise: Hooray! Now, you may have 2 questions: @@ -69,18 +69,18 @@ Now, you may have 2 questions: And I answer: -1. `fork-base` is the "base" URL, such that when the _subgraph id_ is appended the resulting URL (`/`) is a valid GraphQL endpoint for the subgraph's store. +1. `fork-base` is the "base" URL, such that when the _subgraph id_ is appended the resulting URL (`/`) is a valid GraphQL endpoint for the Subgraph's store. 2. Forking is easy, no need to sweat: ```bash $ graph deploy --debug-fork --ipfs http://localhost:5001 --node http://localhost:8020 ``` -Also, don't forget to set the `dataSources.source.startBlock` field in the subgraph manifest to the number of the problematic block, so you can skip indexing unnecessary blocks and take advantage of the fork! +Also, don't forget to set the `dataSources.source.startBlock` field in the Subgraph manifest to the number of the problematic block, so you can skip indexing unnecessary blocks and take advantage of the fork! So, here is what I do: -1. I spin-up a local Graph Node ([here is how to do it](https://github.com/graphprotocol/graph-node#running-a-local-graph-node)) with the `fork-base` option set to: `https://api.thegraph.com/subgraphs/id/`, since I will fork a subgraph, the buggy one I deployed earlier, from [Subgraph Studio](https://thegraph.com/studio/). +1. I spin-up a local Graph Node ([here is how to do it](https://github.com/graphprotocol/graph-node#running-a-local-graph-node)) with the `fork-base` option set to: `https://api.thegraph.com/subgraphs/id/`, since I will fork a Subgraph, the buggy one I deployed earlier, from [Subgraph Studio](https://thegraph.com/studio/). ``` $ cargo run -p graph-node --release -- \ @@ -91,11 +91,11 @@ $ cargo run -p graph-node --release -- \ ``` 2. After careful inspection I notice that there is a mismatch in the `id` representations used when indexing `Gravatar`s in my two handlers. While `handleNewGravatar` converts it to a hex (`event.params.id.toHex()`), `handleUpdatedGravatar` uses an int32 (`event.params.id.toI32()`) which causes the `handleUpdatedGravatar` to panic with "Gravatar not found!". I make them both convert the `id` to a hex. -3. After I made the changes I deploy my subgraph to the local Graph Node, **_forking the failing subgraph_** and setting `dataSources.source.startBlock` to `6190343` in `subgraph.yaml`: +3. After I made the changes I deploy my Subgraph to the local Graph Node, **_forking the failing Subgraph_** and setting `dataSources.source.startBlock` to `6190343` in `subgraph.yaml`: ```bash $ graph deploy gravity --debug-fork QmNp169tKvomnH3cPXTfGg4ZEhAHA6kEq5oy1XDqAxqHmW --ipfs http://localhost:5001 --node http://localhost:8020 ``` 4. I inspect the logs produced by the local Graph Node and, Hooray!, everything seems to be working. -5. I deploy my now bug-free subgraph to a remote Graph Node and live happily ever after! (no potatoes tho) +5. I deploy my now bug-free Subgraph to a remote Graph Node and live happily ever after! (no potatoes tho) From 087f60d4ae6712e5edd8ac945824aa7c5c89c114 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:12 -0500 Subject: [PATCH 0671/1789] New translations subgraph-debug-forking.mdx (French) --- .../cookbook/subgraph-debug-forking.mdx | 38 +++++++++---------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/website/src/pages/fr/subgraphs/cookbook/subgraph-debug-forking.mdx b/website/src/pages/fr/subgraphs/cookbook/subgraph-debug-forking.mdx index cedcf3ece5c4..75a0c1543f83 100644 --- a/website/src/pages/fr/subgraphs/cookbook/subgraph-debug-forking.mdx +++ b/website/src/pages/fr/subgraphs/cookbook/subgraph-debug-forking.mdx @@ -2,25 +2,25 @@ title: Débogage rapide et facile des subgraph à l'aide de Forks --- -As with many systems processing large amounts of data, The Graph's Indexers (Graph Nodes) may take quite some time to sync-up your subgraph with the target blockchain. The discrepancy between quick changes with the purpose of debugging and long wait times needed for indexing is extremely counterproductive and we are well aware of that. This is why we are introducing **subgraph forking**, developed by [LimeChain](https://limechain.tech/), and in this article I will show you how this feature can be used to substantially speed-up subgraph debugging! +As with many systems processing large amounts of data, The Graph's Indexers (Graph Nodes) may take quite some time to sync-up your Subgraph with the target blockchain. The discrepancy between quick changes with the purpose of debugging and long wait times needed for indexing is extremely counterproductive and we are well aware of that. This is why we are introducing **Subgraph forking**, developed by [LimeChain](https://limechain.tech/), and in this article I will show you how this feature can be used to substantially speed-up Subgraph debugging! ## D'accord, qu'est-ce que c'est ? -**Subgraph forking** is the process of lazily fetching entities from _another_ subgraph's store (usually a remote one). +**Subgraph forking** is the process of lazily fetching entities from _another_ Subgraph's store (usually a remote one). -In the context of debugging, **subgraph forking** allows you to debug your failed subgraph at block _X_ without needing to wait to sync-up to block _X_. +In the context of debugging, **Subgraph forking** allows you to debug your failed Subgraph at block _X_ without needing to wait to sync-up to block _X_. ## Quoi ? Comment ? -When you deploy a subgraph to a remote Graph Node for indexing and it fails at block _X_, the good news is that the Graph Node will still serve GraphQL queries using its store, which is synced-up to block _X_. That's great! This means we can take advantage of this "up-to-date" store to fix the bugs arising when indexing block _X_. +When you deploy a Subgraph to a remote Graph Node for indexing and it fails at block _X_, the good news is that the Graph Node will still serve GraphQL queries using its store, which is synced-up to block _X_. That's great! This means we can take advantage of this "up-to-date" store to fix the bugs arising when indexing block _X_. -In a nutshell, we are going to _fork the failing subgraph_ from a remote Graph Node that is guaranteed to have the subgraph indexed up to block _X_ in order to provide the locally deployed subgraph being debugged at block _X_ an up-to-date view of the indexing state. +In a nutshell, we are going to _fork the failing Subgraph_ from a remote Graph Node that is guaranteed to have the Subgraph indexed up to block _X_ in order to provide the locally deployed Subgraph being debugged at block _X_ an up-to-date view of the indexing state. ## S'il vous plaît, montrez-moi du code ! -To stay focused on subgraph debugging, let's keep things simple and run along with the [example-subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexing the Ethereum Gravity smart contract. +To stay focused on Subgraph debugging, let's keep things simple and run along with the [example-Subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexing the Ethereum Gravity smart contract. -Here are the handlers defined for indexing `Gravatar`s, with no bugs whatsoever: +Voici les gestionnaires définis pour indexer `Gravatar`s, sans aucun bug : ```tsx export function handleNewGravatar(event: NewGravatar): void { @@ -44,22 +44,22 @@ export function handleUpdatedGravatar(event: UpdatedGravatar): void { } ``` -Oops, how unfortunate, when I deploy my perfect looking subgraph to [Subgraph Studio](https://thegraph.com/studio/) it fails with the _"Gravatar not found!"_ error. +Oops, how unfortunate, when I deploy my perfect looking Subgraph to [Subgraph Studio](https://thegraph.com/studio/) it fails with the _"Gravatar not found!"_ error. La méthode habituelle pour tenter de résoudre le problème est la suivante : 1. Apportez une modification à la source des mappages, ce qui, selon vous, résoudra le problème (même si je sais que ce ne sera pas le cas). -2. Re-deploy the subgraph to [Subgraph Studio](https://thegraph.com/studio/) (or another remote Graph Node). +2. Re-deploy the Subgraph to [Subgraph Studio](https://thegraph.com/studio/) (or another remote Graph Node). 3. Attendez qu’il soit synchronisé. 4. S'il se casse à nouveau, revenez au point 1, sinon : Hourra ! -It is indeed pretty familiar to an ordinary debug process, but there is one step that horribly slows down the process: _3. Wait for it to sync-up._ +Il s'agit en fait d'un processus assez familier à un processus de débogage ordinaire, mais il y a une étape qui ralentit terriblement le processus : _3. Attendez qu'il se synchronise._ -Using **subgraph forking** we can essentially eliminate this step. Here is how it looks: +Using **Subgraph forking** we can essentially eliminate this step. Here is how it looks: -0. Spin-up a local Graph Node with the **_appropriate fork-base_** set. +0. Créer un Graph Node local avec l'ensemble de **_base de fork approprié_**. 1. Apportez une modification à la source des mappings qui, selon vous, résoudra le problème. -2. Deploy to the local Graph Node, **_forking the failing subgraph_** and **_starting from the problematic block_**. +2. Deploy to the local Graph Node, **_forking the failing Subgraph_** and **_starting from the problematic block_**. 3. S'il casse à nouveau, revenez à 1, sinon : Hourra ! Maintenant, vous pouvez avoir 2 questions : @@ -69,18 +69,18 @@ Maintenant, vous pouvez avoir 2 questions : Je réponds : -1. `fork-base` is the "base" URL, such that when the _subgraph id_ is appended the resulting URL (`/`) is a valid GraphQL endpoint for the subgraph's store. +1. `fork-base` is the "base" URL, such that when the _subgraph id_ is appended the resulting URL (`/`) is a valid GraphQL endpoint for the Subgraph's store. 2. Fourcher est facile, pas besoin de transpirer : ```bash $ graph deploy --debug-fork --ipfs http://localhost:5001 --node http://localhost:8020 ``` -Also, don't forget to set the `dataSources.source.startBlock` field in the subgraph manifest to the number of the problematic block, so you can skip indexing unnecessary blocks and take advantage of the fork! +Also, don't forget to set the `dataSources.source.startBlock` field in the Subgraph manifest to the number of the problematic block, so you can skip indexing unnecessary blocks and take advantage of the fork! Voici donc ce que je fais : -1. I spin-up a local Graph Node ([here is how to do it](https://github.com/graphprotocol/graph-node#running-a-local-graph-node)) with the `fork-base` option set to: `https://api.thegraph.com/subgraphs/id/`, since I will fork a subgraph, the buggy one I deployed earlier, from [Subgraph Studio](https://thegraph.com/studio/). +1. I spin-up a local Graph Node ([here is how to do it](https://github.com/graphprotocol/graph-node#running-a-local-graph-node)) with the `fork-base` option set to: `https://api.thegraph.com/subgraphs/id/`, since I will fork a Subgraph, the buggy one I deployed earlier, from [Subgraph Studio](https://thegraph.com/studio/). ``` $ cargo run -p graph-node --release -- \ @@ -90,12 +90,12 @@ $ cargo run -p graph-node --release -- \ --fork-base https://api.thegraph.com/subgraphs/id/ ``` -2. After careful inspection I notice that there is a mismatch in the `id` representations used when indexing `Gravatar`s in my two handlers. While `handleNewGravatar` converts it to a hex (`event.params.id.toHex()`), `handleUpdatedGravatar` uses an int32 (`event.params.id.toI32()`) which causes the `handleUpdatedGravatar` to panic with "Gravatar not found!". I make them both convert the `id` to a hex. -3. After I made the changes I deploy my subgraph to the local Graph Node, **_forking the failing subgraph_** and setting `dataSources.source.startBlock` to `6190343` in `subgraph.yaml`: +2. Après une inspection minutieuse, j'ai remarqué qu'il y avait un décalage dans les représentations `id` utilisées lors de l'indexation des `Gravatar`s dans mes deux handlers. Alors que `handleNewGravatar` le convertit en hexadécimal (`event.params.id.toHex()`), `handleUpdatedGravatar` utilise un int32 (`event.params.id.toI32()`) ce qui fait paniquer `handleUpdatedGravatar` avec "Gravatar not found!". Je fais en sorte qu'ils convertissent tous les deux l'`id` en hexadécimal. +3. After I made the changes I deploy my Subgraph to the local Graph Node, **_forking the failing Subgraph_** and setting `dataSources.source.startBlock` to `6190343` in `subgraph.yaml`: ```bash $ graph deploy gravity --debug-fork QmNp169tKvomnH3cPXTfGg4ZEhAHA6kEq5oy1XDqAxqHmW --ipfs http://localhost:5001 --node http://localhost:8020 ``` 4. J'inspecte les logs générés par le Graph Node local et, Hourra!, tout semble fonctionner. -5. Je déploie mon subgraph, désormais débarrassé de tout bug, sur un Graph Node distant et vis heureux pour toujours ! (Malheureusement pas de patates, mais c’est la vie…) +5. I deploy my now bug-free Subgraph to a remote Graph Node and live happily ever after! (no potatoes tho) From 06bb1350e3d0a8b7246a49e8e7885559d8e17077 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:13 -0500 Subject: [PATCH 0672/1789] New translations subgraph-debug-forking.mdx (Spanish) --- .../cookbook/subgraph-debug-forking.mdx | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/website/src/pages/es/subgraphs/cookbook/subgraph-debug-forking.mdx b/website/src/pages/es/subgraphs/cookbook/subgraph-debug-forking.mdx index 163a16d59e00..145fe815ede1 100644 --- a/website/src/pages/es/subgraphs/cookbook/subgraph-debug-forking.mdx +++ b/website/src/pages/es/subgraphs/cookbook/subgraph-debug-forking.mdx @@ -2,23 +2,23 @@ title: Debugging rápido y sencillo de subgrafos mediante Forks --- -As with many systems processing large amounts of data, The Graph's Indexers (Graph Nodes) may take quite some time to sync-up your subgraph with the target blockchain. The discrepancy between quick changes with the purpose of debugging and long wait times needed for indexing is extremely counterproductive and we are well aware of that. This is why we are introducing **subgraph forking**, developed by [LimeChain](https://limechain.tech/), and in this article I will show you how this feature can be used to substantially speed-up subgraph debugging! +As with many systems processing large amounts of data, The Graph's Indexers (Graph Nodes) may take quite some time to sync-up your Subgraph with the target blockchain. The discrepancy between quick changes with the purpose of debugging and long wait times needed for indexing is extremely counterproductive and we are well aware of that. This is why we are introducing **Subgraph forking**, developed by [LimeChain](https://limechain.tech/), and in this article I will show you how this feature can be used to substantially speed-up Subgraph debugging! ## ¿Bien, qué es? -**Subgraph forking** is the process of lazily fetching entities from _another_ subgraph's store (usually a remote one). +**Subgraph forking** is the process of lazily fetching entities from _another_ Subgraph's store (usually a remote one). -In the context of debugging, **subgraph forking** allows you to debug your failed subgraph at block _X_ without needing to wait to sync-up to block _X_. +In the context of debugging, **Subgraph forking** allows you to debug your failed Subgraph at block _X_ without needing to wait to sync-up to block _X_. ## ¡¿Qué?! ¿Cómo? -When you deploy a subgraph to a remote Graph Node for indexing and it fails at block _X_, the good news is that the Graph Node will still serve GraphQL queries using its store, which is synced-up to block _X_. That's great! This means we can take advantage of this "up-to-date" store to fix the bugs arising when indexing block _X_. +When you deploy a Subgraph to a remote Graph Node for indexing and it fails at block _X_, the good news is that the Graph Node will still serve GraphQL queries using its store, which is synced-up to block _X_. That's great! This means we can take advantage of this "up-to-date" store to fix the bugs arising when indexing block _X_. -In a nutshell, we are going to _fork the failing subgraph_ from a remote Graph Node that is guaranteed to have the subgraph indexed up to block _X_ in order to provide the locally deployed subgraph being debugged at block _X_ an up-to-date view of the indexing state. +In a nutshell, we are going to _fork the failing Subgraph_ from a remote Graph Node that is guaranteed to have the Subgraph indexed up to block _X_ in order to provide the locally deployed Subgraph being debugged at block _X_ an up-to-date view of the indexing state. ## ¡Por favor, muéstrame algo de código! -To stay focused on subgraph debugging, let's keep things simple and run along with the [example-subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexing the Ethereum Gravity smart contract. +To stay focused on Subgraph debugging, let's keep things simple and run along with the [example-Subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexing the Ethereum Gravity smart contract. Here are the handlers defined for indexing `Gravatar`s, with no bugs whatsoever: @@ -44,22 +44,22 @@ export function handleUpdatedGravatar(event: UpdatedGravatar): void { } ``` -Oops, how unfortunate, when I deploy my perfect looking subgraph to [Subgraph Studio](https://thegraph.com/studio/) it fails with the _"Gravatar not found!"_ error. +Oops, how unfortunate, when I deploy my perfect looking Subgraph to [Subgraph Studio](https://thegraph.com/studio/) it fails with the _"Gravatar not found!"_ error. La forma habitual de intentar una solución es: 1. Realiza un cambio en la fuente de mapeos, que crees que resolverá el problema (aunque sé que no lo hará). -2. Re-deploy the subgraph to [Subgraph Studio](https://thegraph.com/studio/) (or another remote Graph Node). +2. Re-deploy the Subgraph to [Subgraph Studio](https://thegraph.com/studio/) (or another remote Graph Node). 3. Espera a que se sincronice. 4. Si se vuelve a romper vuelve a 1, de lo contrario: ¡Hurra! It is indeed pretty familiar to an ordinary debug process, but there is one step that horribly slows down the process: _3. Wait for it to sync-up._ -Using **subgraph forking** we can essentially eliminate this step. Here is how it looks: +Using **Subgraph forking** we can essentially eliminate this step. Here is how it looks: 0. Spin-up a local Graph Node with the **_appropriate fork-base_** set. 1. Realiza un cambio en la fuente de mapeos, que crees que resolverá el problema. -2. Deploy to the local Graph Node, **_forking the failing subgraph_** and **_starting from the problematic block_**. +2. Deploy to the local Graph Node, **_forking the failing Subgraph_** and **_starting from the problematic block_**. 3. Si se vuelve a romper, vuelve a 1, de lo contrario: ¡Hurra! Ahora, puedes tener 2 preguntas: @@ -69,18 +69,18 @@ Ahora, puedes tener 2 preguntas: Y yo respondo: -1. `fork-base` is the "base" URL, such that when the _subgraph id_ is appended the resulting URL (`/`) is a valid GraphQL endpoint for the subgraph's store. +1. `fork-base` is the "base" URL, such that when the _subgraph id_ is appended the resulting URL (`/`) is a valid GraphQL endpoint for the Subgraph's store. 2. Bifurcar es fácil, no hay necesidad de sudar: ```bash $ graph deploy --debug-fork --ipfs http://localhost:5001 --node http://localhost:8020 ``` -Also, don't forget to set the `dataSources.source.startBlock` field in the subgraph manifest to the number of the problematic block, so you can skip indexing unnecessary blocks and take advantage of the fork! +Also, don't forget to set the `dataSources.source.startBlock` field in the Subgraph manifest to the number of the problematic block, so you can skip indexing unnecessary blocks and take advantage of the fork! Entonces, esto es lo que hago: -1. I spin-up a local Graph Node ([here is how to do it](https://github.com/graphprotocol/graph-node#running-a-local-graph-node)) with the `fork-base` option set to: `https://api.thegraph.com/subgraphs/id/`, since I will fork a subgraph, the buggy one I deployed earlier, from [Subgraph Studio](https://thegraph.com/studio/). +1. I spin-up a local Graph Node ([here is how to do it](https://github.com/graphprotocol/graph-node#running-a-local-graph-node)) with the `fork-base` option set to: `https://api.thegraph.com/subgraphs/id/`, since I will fork a Subgraph, the buggy one I deployed earlier, from [Subgraph Studio](https://thegraph.com/studio/). ``` $ cargo run -p graph-node --release -- \ @@ -91,11 +91,11 @@ $ cargo run -p graph-node --release -- \ ``` 2. After careful inspection I notice that there is a mismatch in the `id` representations used when indexing `Gravatar`s in my two handlers. While `handleNewGravatar` converts it to a hex (`event.params.id.toHex()`), `handleUpdatedGravatar` uses an int32 (`event.params.id.toI32()`) which causes the `handleUpdatedGravatar` to panic with "Gravatar not found!". I make them both convert the `id` to a hex. -3. After I made the changes I deploy my subgraph to the local Graph Node, **_forking the failing subgraph_** and setting `dataSources.source.startBlock` to `6190343` in `subgraph.yaml`: +3. After I made the changes I deploy my Subgraph to the local Graph Node, **_forking the failing Subgraph_** and setting `dataSources.source.startBlock` to `6190343` in `subgraph.yaml`: ```bash $ graph deploy gravity --debug-fork QmNp169tKvomnH3cPXTfGg4ZEhAHA6kEq5oy1XDqAxqHmW --ipfs http://localhost:5001 --node http://localhost:8020 ``` 4. I inspect the logs produced by the local Graph Node and, Hooray!, everything seems to be working. -5. I deploy my now bug-free subgraph to a remote Graph Node and live happily ever after! (no potatoes tho) +5. I deploy my now bug-free Subgraph to a remote Graph Node and live happily ever after! (no potatoes tho) From bcc7fd77dc4ffc9f7bc2102c20f1d92375823d12 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:14 -0500 Subject: [PATCH 0673/1789] New translations subgraph-debug-forking.mdx (Arabic) --- .../cookbook/subgraph-debug-forking.mdx | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/website/src/pages/ar/subgraphs/cookbook/subgraph-debug-forking.mdx b/website/src/pages/ar/subgraphs/cookbook/subgraph-debug-forking.mdx index 3bacc1f60003..364fb8ce4d9c 100644 --- a/website/src/pages/ar/subgraphs/cookbook/subgraph-debug-forking.mdx +++ b/website/src/pages/ar/subgraphs/cookbook/subgraph-debug-forking.mdx @@ -2,23 +2,23 @@ title: Quick and Easy Subgraph Debugging Using Forks --- -As with many systems processing large amounts of data, The Graph's Indexers (Graph Nodes) may take quite some time to sync-up your subgraph with the target blockchain. The discrepancy between quick changes with the purpose of debugging and long wait times needed for indexing is extremely counterproductive and we are well aware of that. This is why we are introducing **subgraph forking**, developed by [LimeChain](https://limechain.tech/), and in this article I will show you how this feature can be used to substantially speed-up subgraph debugging! +As with many systems processing large amounts of data, The Graph's Indexers (Graph Nodes) may take quite some time to sync-up your Subgraph with the target blockchain. The discrepancy between quick changes with the purpose of debugging and long wait times needed for indexing is extremely counterproductive and we are well aware of that. This is why we are introducing **Subgraph forking**, developed by [LimeChain](https://limechain.tech/), and in this article I will show you how this feature can be used to substantially speed-up Subgraph debugging! ## حسنا، ما هو؟ -**Subgraph forking** is the process of lazily fetching entities from _another_ subgraph's store (usually a remote one). +**Subgraph forking** is the process of lazily fetching entities from _another_ Subgraph's store (usually a remote one). -In the context of debugging, **subgraph forking** allows you to debug your failed subgraph at block _X_ without needing to wait to sync-up to block _X_. +In the context of debugging, **Subgraph forking** allows you to debug your failed Subgraph at block _X_ without needing to wait to sync-up to block _X_. ## ماذا؟! كيف؟ -When you deploy a subgraph to a remote Graph Node for indexing and it fails at block _X_, the good news is that the Graph Node will still serve GraphQL queries using its store, which is synced-up to block _X_. That's great! This means we can take advantage of this "up-to-date" store to fix the bugs arising when indexing block _X_. +When you deploy a Subgraph to a remote Graph Node for indexing and it fails at block _X_, the good news is that the Graph Node will still serve GraphQL queries using its store, which is synced-up to block _X_. That's great! This means we can take advantage of this "up-to-date" store to fix the bugs arising when indexing block _X_. -In a nutshell, we are going to _fork the failing subgraph_ from a remote Graph Node that is guaranteed to have the subgraph indexed up to block _X_ in order to provide the locally deployed subgraph being debugged at block _X_ an up-to-date view of the indexing state. +In a nutshell, we are going to _fork the failing Subgraph_ from a remote Graph Node that is guaranteed to have the Subgraph indexed up to block _X_ in order to provide the locally deployed Subgraph being debugged at block _X_ an up-to-date view of the indexing state. ## من فضلك ، أرني بعض الأكواد! -To stay focused on subgraph debugging, let's keep things simple and run along with the [example-subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexing the Ethereum Gravity smart contract. +To stay focused on Subgraph debugging, let's keep things simple and run along with the [example-Subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexing the Ethereum Gravity smart contract. Here are the handlers defined for indexing `Gravatar`s, with no bugs whatsoever: @@ -44,22 +44,22 @@ export function handleUpdatedGravatar(event: UpdatedGravatar): void { } ``` -Oops, how unfortunate, when I deploy my perfect looking subgraph to [Subgraph Studio](https://thegraph.com/studio/) it fails with the _"Gravatar not found!"_ error. +Oops, how unfortunate, when I deploy my perfect looking Subgraph to [Subgraph Studio](https://thegraph.com/studio/) it fails with the _"Gravatar not found!"_ error. الطريقة المعتادة لمحاولة الإصلاح هي: 1. إجراء تغيير في مصدر الـ mappings ، والذي تعتقد أنه سيحل المشكلة (وأنا أعلم أنه لن يحلها). -2. Re-deploy the subgraph to [Subgraph Studio](https://thegraph.com/studio/) (or another remote Graph Node). +2. Re-deploy the Subgraph to [Subgraph Studio](https://thegraph.com/studio/) (or another remote Graph Node). 3. الانتظار حتى تتم المزامنة. 4. إذا حدثت المشكلة مرة أخرى ، فارجع إلى 1! It is indeed pretty familiar to an ordinary debug process, but there is one step that horribly slows down the process: _3. Wait for it to sync-up._ -Using **subgraph forking** we can essentially eliminate this step. Here is how it looks: +Using **Subgraph forking** we can essentially eliminate this step. Here is how it looks: 0. Spin-up a local Graph Node with the **_appropriate fork-base_** set. 1. قم بإجراء تغيير في مصدر الـ mappings ، والذي تعتقد أنه سيحل المشكلة. -2. Deploy to the local Graph Node, **_forking the failing subgraph_** and **_starting from the problematic block_**. +2. Deploy to the local Graph Node, **_forking the failing Subgraph_** and **_starting from the problematic block_**. 3. إذا حدثت المشكلة مرة أخرى ، فارجع إلى 1! الآن ، قد يكون لديك سؤالان: @@ -69,18 +69,18 @@ Using **subgraph forking** we can essentially eliminate this step. Here is how i وأنا أجيب: -1. `fork-base` is the "base" URL, such that when the _subgraph id_ is appended the resulting URL (`/`) is a valid GraphQL endpoint for the subgraph's store. +1. `fork-base` is the "base" URL, such that when the _subgraph id_ is appended the resulting URL (`/`) is a valid GraphQL endpoint for the Subgraph's store. 2. الـتفريع سهل ، فلا داعي للقلق: ```bash $ graph deploy --debug-fork --ipfs http://localhost:5001 --node http://localhost:8020 ``` -Also, don't forget to set the `dataSources.source.startBlock` field in the subgraph manifest to the number of the problematic block, so you can skip indexing unnecessary blocks and take advantage of the fork! +Also, don't forget to set the `dataSources.source.startBlock` field in the Subgraph manifest to the number of the problematic block, so you can skip indexing unnecessary blocks and take advantage of the fork! لذلك ، هذا ما أفعله: -1. I spin-up a local Graph Node ([here is how to do it](https://github.com/graphprotocol/graph-node#running-a-local-graph-node)) with the `fork-base` option set to: `https://api.thegraph.com/subgraphs/id/`, since I will fork a subgraph, the buggy one I deployed earlier, from [Subgraph Studio](https://thegraph.com/studio/). +1. I spin-up a local Graph Node ([here is how to do it](https://github.com/graphprotocol/graph-node#running-a-local-graph-node)) with the `fork-base` option set to: `https://api.thegraph.com/subgraphs/id/`, since I will fork a Subgraph, the buggy one I deployed earlier, from [Subgraph Studio](https://thegraph.com/studio/). ``` $ cargo run -p graph-node --release -- \ @@ -91,11 +91,11 @@ $ cargo run -p graph-node --release -- \ ``` 2. After careful inspection I notice that there is a mismatch in the `id` representations used when indexing `Gravatar`s in my two handlers. While `handleNewGravatar` converts it to a hex (`event.params.id.toHex()`), `handleUpdatedGravatar` uses an int32 (`event.params.id.toI32()`) which causes the `handleUpdatedGravatar` to panic with "Gravatar not found!". I make them both convert the `id` to a hex. -3. After I made the changes I deploy my subgraph to the local Graph Node, **_forking the failing subgraph_** and setting `dataSources.source.startBlock` to `6190343` in `subgraph.yaml`: +3. After I made the changes I deploy my Subgraph to the local Graph Node, **_forking the failing Subgraph_** and setting `dataSources.source.startBlock` to `6190343` in `subgraph.yaml`: ```bash $ graph deploy gravity --debug-fork QmNp169tKvomnH3cPXTfGg4ZEhAHA6kEq5oy1XDqAxqHmW --ipfs http://localhost:5001 --node http://localhost:8020 ``` 4. I inspect the logs produced by the local Graph Node and, Hooray!, everything seems to be working. -5. I deploy my now bug-free subgraph to a remote Graph Node and live happily ever after! (no potatoes tho) +5. I deploy my now bug-free Subgraph to a remote Graph Node and live happily ever after! (no potatoes tho) From 3c32ff27e8d6e5ddf6dc9ee2c34002702c03d493 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:15 -0500 Subject: [PATCH 0674/1789] New translations subgraph-debug-forking.mdx (Czech) --- .../cookbook/subgraph-debug-forking.mdx | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/website/src/pages/cs/subgraphs/cookbook/subgraph-debug-forking.mdx b/website/src/pages/cs/subgraphs/cookbook/subgraph-debug-forking.mdx index 4673b362c360..60ad21d2fe95 100644 --- a/website/src/pages/cs/subgraphs/cookbook/subgraph-debug-forking.mdx +++ b/website/src/pages/cs/subgraphs/cookbook/subgraph-debug-forking.mdx @@ -2,23 +2,23 @@ title: Rychlé a snadné ladění podgrafů pomocí vidliček --- -As with many systems processing large amounts of data, The Graph's Indexers (Graph Nodes) may take quite some time to sync-up your subgraph with the target blockchain. The discrepancy between quick changes with the purpose of debugging and long wait times needed for indexing is extremely counterproductive and we are well aware of that. This is why we are introducing **subgraph forking**, developed by [LimeChain](https://limechain.tech/), and in this article I will show you how this feature can be used to substantially speed-up subgraph debugging! +As with many systems processing large amounts of data, The Graph's Indexers (Graph Nodes) may take quite some time to sync-up your Subgraph with the target blockchain. The discrepancy between quick changes with the purpose of debugging and long wait times needed for indexing is extremely counterproductive and we are well aware of that. This is why we are introducing **Subgraph forking**, developed by [LimeChain](https://limechain.tech/), and in this article I will show you how this feature can be used to substantially speed-up Subgraph debugging! ## Ok, co to je? -**Subgraph forking** is the process of lazily fetching entities from _another_ subgraph's store (usually a remote one). +**Subgraph forking** is the process of lazily fetching entities from _another_ Subgraph's store (usually a remote one). -In the context of debugging, **subgraph forking** allows you to debug your failed subgraph at block _X_ without needing to wait to sync-up to block _X_. +In the context of debugging, **Subgraph forking** allows you to debug your failed Subgraph at block _X_ without needing to wait to sync-up to block _X_. ## Co?! Jak? -When you deploy a subgraph to a remote Graph Node for indexing and it fails at block _X_, the good news is that the Graph Node will still serve GraphQL queries using its store, which is synced-up to block _X_. That's great! This means we can take advantage of this "up-to-date" store to fix the bugs arising when indexing block _X_. +When you deploy a Subgraph to a remote Graph Node for indexing and it fails at block _X_, the good news is that the Graph Node will still serve GraphQL queries using its store, which is synced-up to block _X_. That's great! This means we can take advantage of this "up-to-date" store to fix the bugs arising when indexing block _X_. -In a nutshell, we are going to _fork the failing subgraph_ from a remote Graph Node that is guaranteed to have the subgraph indexed up to block _X_ in order to provide the locally deployed subgraph being debugged at block _X_ an up-to-date view of the indexing state. +In a nutshell, we are going to _fork the failing Subgraph_ from a remote Graph Node that is guaranteed to have the Subgraph indexed up to block _X_ in order to provide the locally deployed Subgraph being debugged at block _X_ an up-to-date view of the indexing state. ## Ukažte mi prosím nějaký kód! -To stay focused on subgraph debugging, let's keep things simple and run along with the [example-subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexing the Ethereum Gravity smart contract. +To stay focused on Subgraph debugging, let's keep things simple and run along with the [example-Subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexing the Ethereum Gravity smart contract. Here are the handlers defined for indexing `Gravatar`s, with no bugs whatsoever: @@ -44,22 +44,22 @@ export function handleUpdatedGravatar(event: UpdatedGravatar): void { } ``` -Oops, how unfortunate, when I deploy my perfect looking subgraph to [Subgraph Studio](https://thegraph.com/studio/) it fails with the _"Gravatar not found!"_ error. +Oops, how unfortunate, when I deploy my perfect looking Subgraph to [Subgraph Studio](https://thegraph.com/studio/) it fails with the _"Gravatar not found!"_ error. Obvyklý způsob, jak se pokusit o opravu, je: 1. Proveďte změnu ve zdroji mapování, která podle vás problém vyřeší (zatímco já vím, že ne). -2. Re-deploy the subgraph to [Subgraph Studio](https://thegraph.com/studio/) (or another remote Graph Node). +2. Re-deploy the Subgraph to [Subgraph Studio](https://thegraph.com/studio/) (or another remote Graph Node). 3. Počkejte na synchronizaci. 4. Pokud se opět rozbije, vraťte se na 1, jinak: Hurá! It is indeed pretty familiar to an ordinary debug process, but there is one step that horribly slows down the process: _3. Wait for it to sync-up._ -Using **subgraph forking** we can essentially eliminate this step. Here is how it looks: +Using **Subgraph forking** we can essentially eliminate this step. Here is how it looks: 0. Spin-up a local Graph Node with the **_appropriate fork-base_** set. 1. Proveďte změnu ve zdroji mapování, která podle vás problém vyřeší. -2. Deploy to the local Graph Node, **_forking the failing subgraph_** and **_starting from the problematic block_**. +2. Deploy to the local Graph Node, **_forking the failing Subgraph_** and **_starting from the problematic block_**. 3. Pokud se opět rozbije, vraťte se na 1, jinak: Hurá! Nyní můžete mít 2 otázky: @@ -69,18 +69,18 @@ Nyní můžete mít 2 otázky: A já odpovídám: -1. `fork-base` is the "base" URL, such that when the _subgraph id_ is appended the resulting URL (`/`) is a valid GraphQL endpoint for the subgraph's store. +1. `fork-base` is the "base" URL, such that when the _subgraph id_ is appended the resulting URL (`/`) is a valid GraphQL endpoint for the Subgraph's store. 2. Vidličkování je snadné, není třeba se potit: ```bash $ graph deploy --debug-fork --ipfs http://localhost:5001 --node http://localhost:8020 ``` -Also, don't forget to set the `dataSources.source.startBlock` field in the subgraph manifest to the number of the problematic block, so you can skip indexing unnecessary blocks and take advantage of the fork! +Also, don't forget to set the `dataSources.source.startBlock` field in the Subgraph manifest to the number of the problematic block, so you can skip indexing unnecessary blocks and take advantage of the fork! Takže to dělám takhle: -1. I spin-up a local Graph Node ([here is how to do it](https://github.com/graphprotocol/graph-node#running-a-local-graph-node)) with the `fork-base` option set to: `https://api.thegraph.com/subgraphs/id/`, since I will fork a subgraph, the buggy one I deployed earlier, from [Subgraph Studio](https://thegraph.com/studio/). +1. I spin-up a local Graph Node ([here is how to do it](https://github.com/graphprotocol/graph-node#running-a-local-graph-node)) with the `fork-base` option set to: `https://api.thegraph.com/subgraphs/id/`, since I will fork a Subgraph, the buggy one I deployed earlier, from [Subgraph Studio](https://thegraph.com/studio/). ``` $ cargo run -p graph-node --release -- \ @@ -91,11 +91,11 @@ $ cargo run -p graph-node --release -- \ ``` 2. After careful inspection I notice that there is a mismatch in the `id` representations used when indexing `Gravatar`s in my two handlers. While `handleNewGravatar` converts it to a hex (`event.params.id.toHex()`), `handleUpdatedGravatar` uses an int32 (`event.params.id.toI32()`) which causes the `handleUpdatedGravatar` to panic with "Gravatar not found!". I make them both convert the `id` to a hex. -3. After I made the changes I deploy my subgraph to the local Graph Node, **_forking the failing subgraph_** and setting `dataSources.source.startBlock` to `6190343` in `subgraph.yaml`: +3. After I made the changes I deploy my Subgraph to the local Graph Node, **_forking the failing Subgraph_** and setting `dataSources.source.startBlock` to `6190343` in `subgraph.yaml`: ```bash $ graph deploy gravity --debug-fork QmNp169tKvomnH3cPXTfGg4ZEhAHA6kEq5oy1XDqAxqHmW --ipfs http://localhost:5001 --node http://localhost:8020 ``` 4. Zkontroluji protokoly vytvořené místním graf uzlem a hurá, zdá se, že vše funguje. -5. Nasadím svůj nyní již bezchybný podgraf do vzdáleného uzlu Graf a žiji šťastně až do smrti! (bez brambor) +5. I deploy my now bug-free Subgraph to a remote Graph Node and live happily ever after! (no potatoes tho) From 6ca53248da88033468603a42343bb962d3e3c66c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:16 -0500 Subject: [PATCH 0675/1789] New translations subgraph-debug-forking.mdx (German) --- .../cookbook/subgraph-debug-forking.mdx | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/website/src/pages/de/subgraphs/cookbook/subgraph-debug-forking.mdx b/website/src/pages/de/subgraphs/cookbook/subgraph-debug-forking.mdx index 6610f19da66d..91aa7484d2ec 100644 --- a/website/src/pages/de/subgraphs/cookbook/subgraph-debug-forking.mdx +++ b/website/src/pages/de/subgraphs/cookbook/subgraph-debug-forking.mdx @@ -2,23 +2,23 @@ title: Quick and Easy Subgraph Debugging Using Forks --- -As with many systems processing large amounts of data, The Graph's Indexers (Graph Nodes) may take quite some time to sync-up your subgraph with the target blockchain. The discrepancy between quick changes with the purpose of debugging and long wait times needed for indexing is extremely counterproductive and we are well aware of that. This is why we are introducing **subgraph forking**, developed by [LimeChain](https://limechain.tech/), and in this article I will show you how this feature can be used to substantially speed-up subgraph debugging! +As with many systems processing large amounts of data, The Graph's Indexers (Graph Nodes) may take quite some time to sync-up your Subgraph with the target blockchain. The discrepancy between quick changes with the purpose of debugging and long wait times needed for indexing is extremely counterproductive and we are well aware of that. This is why we are introducing **Subgraph forking**, developed by [LimeChain](https://limechain.tech/), and in this article I will show you how this feature can be used to substantially speed-up Subgraph debugging! ## Ok, what is it? -**Subgraph forking** is the process of lazily fetching entities from _another_ subgraph's store (usually a remote one). +**Subgraph forking** is the process of lazily fetching entities from _another_ Subgraph's store (usually a remote one). -In the context of debugging, **subgraph forking** allows you to debug your failed subgraph at block _X_ without needing to wait to sync-up to block _X_. +In the context of debugging, **Subgraph forking** allows you to debug your failed Subgraph at block _X_ without needing to wait to sync-up to block _X_. ## What?! How? -When you deploy a subgraph to a remote Graph Node for indexing and it fails at block _X_, the good news is that the Graph Node will still serve GraphQL queries using its store, which is synced-up to block _X_. That's great! This means we can take advantage of this "up-to-date" store to fix the bugs arising when indexing block _X_. +When you deploy a Subgraph to a remote Graph Node for indexing and it fails at block _X_, the good news is that the Graph Node will still serve GraphQL queries using its store, which is synced-up to block _X_. That's great! This means we can take advantage of this "up-to-date" store to fix the bugs arising when indexing block _X_. -In a nutshell, we are going to _fork the failing subgraph_ from a remote Graph Node that is guaranteed to have the subgraph indexed up to block _X_ in order to provide the locally deployed subgraph being debugged at block _X_ an up-to-date view of the indexing state. +In a nutshell, we are going to _fork the failing Subgraph_ from a remote Graph Node that is guaranteed to have the Subgraph indexed up to block _X_ in order to provide the locally deployed Subgraph being debugged at block _X_ an up-to-date view of the indexing state. ## Please, show me some code! -To stay focused on subgraph debugging, let's keep things simple and run along with the [example-subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexing the Ethereum Gravity smart contract. +To stay focused on Subgraph debugging, let's keep things simple and run along with the [example-Subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexing the Ethereum Gravity smart contract. Here are the handlers defined for indexing `Gravatar`s, with no bugs whatsoever: @@ -44,22 +44,22 @@ export function handleUpdatedGravatar(event: UpdatedGravatar): void { } ``` -Oops, how unfortunate, when I deploy my perfect looking subgraph to [Subgraph Studio](https://thegraph.com/studio/) it fails with the _"Gravatar not found!"_ error. +Oops, how unfortunate, when I deploy my perfect looking Subgraph to [Subgraph Studio](https://thegraph.com/studio/) it fails with the _"Gravatar not found!"_ error. The usual way to attempt a fix is: 1. Make a change in the mappings source, which you believe will solve the issue (while I know it won't). -2. Re-deploy the subgraph to [Subgraph Studio](https://thegraph.com/studio/) (or another remote Graph Node). +2. Re-deploy the Subgraph to [Subgraph Studio](https://thegraph.com/studio/) (or another remote Graph Node). 3. Wait for it to sync-up. 4. If it breaks again go back to 1, otherwise: Hooray! It is indeed pretty familiar to an ordinary debug process, but there is one step that horribly slows down the process: _3. Wait for it to sync-up._ -Using **subgraph forking** we can essentially eliminate this step. Here is how it looks: +Using **Subgraph forking** we can essentially eliminate this step. Here is how it looks: 0. Spin-up a local Graph Node with the **_appropriate fork-base_** set. 1. Make a change in the mappings source, which you believe will solve the issue. -2. Deploy to the local Graph Node, **_forking the failing subgraph_** and **_starting from the problematic block_**. +2. Deploy to the local Graph Node, **_forking the failing Subgraph_** and **_starting from the problematic block_**. 3. If it breaks again, go back to 1, otherwise: Hooray! Now, you may have 2 questions: @@ -69,18 +69,18 @@ Now, you may have 2 questions: And I answer: -1. `fork-base` is the "base" URL, such that when the _subgraph id_ is appended the resulting URL (`/`) is a valid GraphQL endpoint for the subgraph's store. +1. `fork-base` is the "base" URL, such that when the _subgraph id_ is appended the resulting URL (`/`) is a valid GraphQL endpoint for the Subgraph's store. 2. Forking is easy, no need to sweat: ```bash $ graph deploy --debug-fork --ipfs http://localhost:5001 --node http://localhost:8020 ``` -Also, don't forget to set the `dataSources.source.startBlock` field in the subgraph manifest to the number of the problematic block, so you can skip indexing unnecessary blocks and take advantage of the fork! +Also, don't forget to set the `dataSources.source.startBlock` field in the Subgraph manifest to the number of the problematic block, so you can skip indexing unnecessary blocks and take advantage of the fork! So, here is what I do: -1. I spin-up a local Graph Node ([here is how to do it](https://github.com/graphprotocol/graph-node#running-a-local-graph-node)) with the `fork-base` option set to: `https://api.thegraph.com/subgraphs/id/`, since I will fork a subgraph, the buggy one I deployed earlier, from [Subgraph Studio](https://thegraph.com/studio/). +1. I spin-up a local Graph Node ([here is how to do it](https://github.com/graphprotocol/graph-node#running-a-local-graph-node)) with the `fork-base` option set to: `https://api.thegraph.com/subgraphs/id/`, since I will fork a Subgraph, the buggy one I deployed earlier, from [Subgraph Studio](https://thegraph.com/studio/). ``` $ cargo run -p graph-node --release -- \ @@ -91,11 +91,11 @@ $ cargo run -p graph-node --release -- \ ``` 2. After careful inspection I notice that there is a mismatch in the `id` representations used when indexing `Gravatar`s in my two handlers. While `handleNewGravatar` converts it to a hex (`event.params.id.toHex()`), `handleUpdatedGravatar` uses an int32 (`event.params.id.toI32()`) which causes the `handleUpdatedGravatar` to panic with "Gravatar not found!". I make them both convert the `id` to a hex. -3. After I made the changes I deploy my subgraph to the local Graph Node, **_forking the failing subgraph_** and setting `dataSources.source.startBlock` to `6190343` in `subgraph.yaml`: +3. After I made the changes I deploy my Subgraph to the local Graph Node, **_forking the failing Subgraph_** and setting `dataSources.source.startBlock` to `6190343` in `subgraph.yaml`: ```bash $ graph deploy gravity --debug-fork QmNp169tKvomnH3cPXTfGg4ZEhAHA6kEq5oy1XDqAxqHmW --ipfs http://localhost:5001 --node http://localhost:8020 ``` 4. I inspect the logs produced by the local Graph Node and, Hooray!, everything seems to be working. -5. I deploy my now bug-free subgraph to a remote Graph Node and live happily ever after! (no potatoes tho) +5. I deploy my now bug-free Subgraph to a remote Graph Node and live happily ever after! (no potatoes tho) From 174574d84d52b37398f174b68ff77d140a5dfe57 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:17 -0500 Subject: [PATCH 0676/1789] New translations subgraph-debug-forking.mdx (Italian) --- .../cookbook/subgraph-debug-forking.mdx | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/website/src/pages/it/subgraphs/cookbook/subgraph-debug-forking.mdx b/website/src/pages/it/subgraphs/cookbook/subgraph-debug-forking.mdx index 6610f19da66d..91aa7484d2ec 100644 --- a/website/src/pages/it/subgraphs/cookbook/subgraph-debug-forking.mdx +++ b/website/src/pages/it/subgraphs/cookbook/subgraph-debug-forking.mdx @@ -2,23 +2,23 @@ title: Quick and Easy Subgraph Debugging Using Forks --- -As with many systems processing large amounts of data, The Graph's Indexers (Graph Nodes) may take quite some time to sync-up your subgraph with the target blockchain. The discrepancy between quick changes with the purpose of debugging and long wait times needed for indexing is extremely counterproductive and we are well aware of that. This is why we are introducing **subgraph forking**, developed by [LimeChain](https://limechain.tech/), and in this article I will show you how this feature can be used to substantially speed-up subgraph debugging! +As with many systems processing large amounts of data, The Graph's Indexers (Graph Nodes) may take quite some time to sync-up your Subgraph with the target blockchain. The discrepancy between quick changes with the purpose of debugging and long wait times needed for indexing is extremely counterproductive and we are well aware of that. This is why we are introducing **Subgraph forking**, developed by [LimeChain](https://limechain.tech/), and in this article I will show you how this feature can be used to substantially speed-up Subgraph debugging! ## Ok, what is it? -**Subgraph forking** is the process of lazily fetching entities from _another_ subgraph's store (usually a remote one). +**Subgraph forking** is the process of lazily fetching entities from _another_ Subgraph's store (usually a remote one). -In the context of debugging, **subgraph forking** allows you to debug your failed subgraph at block _X_ without needing to wait to sync-up to block _X_. +In the context of debugging, **Subgraph forking** allows you to debug your failed Subgraph at block _X_ without needing to wait to sync-up to block _X_. ## What?! How? -When you deploy a subgraph to a remote Graph Node for indexing and it fails at block _X_, the good news is that the Graph Node will still serve GraphQL queries using its store, which is synced-up to block _X_. That's great! This means we can take advantage of this "up-to-date" store to fix the bugs arising when indexing block _X_. +When you deploy a Subgraph to a remote Graph Node for indexing and it fails at block _X_, the good news is that the Graph Node will still serve GraphQL queries using its store, which is synced-up to block _X_. That's great! This means we can take advantage of this "up-to-date" store to fix the bugs arising when indexing block _X_. -In a nutshell, we are going to _fork the failing subgraph_ from a remote Graph Node that is guaranteed to have the subgraph indexed up to block _X_ in order to provide the locally deployed subgraph being debugged at block _X_ an up-to-date view of the indexing state. +In a nutshell, we are going to _fork the failing Subgraph_ from a remote Graph Node that is guaranteed to have the Subgraph indexed up to block _X_ in order to provide the locally deployed Subgraph being debugged at block _X_ an up-to-date view of the indexing state. ## Please, show me some code! -To stay focused on subgraph debugging, let's keep things simple and run along with the [example-subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexing the Ethereum Gravity smart contract. +To stay focused on Subgraph debugging, let's keep things simple and run along with the [example-Subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexing the Ethereum Gravity smart contract. Here are the handlers defined for indexing `Gravatar`s, with no bugs whatsoever: @@ -44,22 +44,22 @@ export function handleUpdatedGravatar(event: UpdatedGravatar): void { } ``` -Oops, how unfortunate, when I deploy my perfect looking subgraph to [Subgraph Studio](https://thegraph.com/studio/) it fails with the _"Gravatar not found!"_ error. +Oops, how unfortunate, when I deploy my perfect looking Subgraph to [Subgraph Studio](https://thegraph.com/studio/) it fails with the _"Gravatar not found!"_ error. The usual way to attempt a fix is: 1. Make a change in the mappings source, which you believe will solve the issue (while I know it won't). -2. Re-deploy the subgraph to [Subgraph Studio](https://thegraph.com/studio/) (or another remote Graph Node). +2. Re-deploy the Subgraph to [Subgraph Studio](https://thegraph.com/studio/) (or another remote Graph Node). 3. Wait for it to sync-up. 4. If it breaks again go back to 1, otherwise: Hooray! It is indeed pretty familiar to an ordinary debug process, but there is one step that horribly slows down the process: _3. Wait for it to sync-up._ -Using **subgraph forking** we can essentially eliminate this step. Here is how it looks: +Using **Subgraph forking** we can essentially eliminate this step. Here is how it looks: 0. Spin-up a local Graph Node with the **_appropriate fork-base_** set. 1. Make a change in the mappings source, which you believe will solve the issue. -2. Deploy to the local Graph Node, **_forking the failing subgraph_** and **_starting from the problematic block_**. +2. Deploy to the local Graph Node, **_forking the failing Subgraph_** and **_starting from the problematic block_**. 3. If it breaks again, go back to 1, otherwise: Hooray! Now, you may have 2 questions: @@ -69,18 +69,18 @@ Now, you may have 2 questions: And I answer: -1. `fork-base` is the "base" URL, such that when the _subgraph id_ is appended the resulting URL (`/`) is a valid GraphQL endpoint for the subgraph's store. +1. `fork-base` is the "base" URL, such that when the _subgraph id_ is appended the resulting URL (`/`) is a valid GraphQL endpoint for the Subgraph's store. 2. Forking is easy, no need to sweat: ```bash $ graph deploy --debug-fork --ipfs http://localhost:5001 --node http://localhost:8020 ``` -Also, don't forget to set the `dataSources.source.startBlock` field in the subgraph manifest to the number of the problematic block, so you can skip indexing unnecessary blocks and take advantage of the fork! +Also, don't forget to set the `dataSources.source.startBlock` field in the Subgraph manifest to the number of the problematic block, so you can skip indexing unnecessary blocks and take advantage of the fork! So, here is what I do: -1. I spin-up a local Graph Node ([here is how to do it](https://github.com/graphprotocol/graph-node#running-a-local-graph-node)) with the `fork-base` option set to: `https://api.thegraph.com/subgraphs/id/`, since I will fork a subgraph, the buggy one I deployed earlier, from [Subgraph Studio](https://thegraph.com/studio/). +1. I spin-up a local Graph Node ([here is how to do it](https://github.com/graphprotocol/graph-node#running-a-local-graph-node)) with the `fork-base` option set to: `https://api.thegraph.com/subgraphs/id/`, since I will fork a Subgraph, the buggy one I deployed earlier, from [Subgraph Studio](https://thegraph.com/studio/). ``` $ cargo run -p graph-node --release -- \ @@ -91,11 +91,11 @@ $ cargo run -p graph-node --release -- \ ``` 2. After careful inspection I notice that there is a mismatch in the `id` representations used when indexing `Gravatar`s in my two handlers. While `handleNewGravatar` converts it to a hex (`event.params.id.toHex()`), `handleUpdatedGravatar` uses an int32 (`event.params.id.toI32()`) which causes the `handleUpdatedGravatar` to panic with "Gravatar not found!". I make them both convert the `id` to a hex. -3. After I made the changes I deploy my subgraph to the local Graph Node, **_forking the failing subgraph_** and setting `dataSources.source.startBlock` to `6190343` in `subgraph.yaml`: +3. After I made the changes I deploy my Subgraph to the local Graph Node, **_forking the failing Subgraph_** and setting `dataSources.source.startBlock` to `6190343` in `subgraph.yaml`: ```bash $ graph deploy gravity --debug-fork QmNp169tKvomnH3cPXTfGg4ZEhAHA6kEq5oy1XDqAxqHmW --ipfs http://localhost:5001 --node http://localhost:8020 ``` 4. I inspect the logs produced by the local Graph Node and, Hooray!, everything seems to be working. -5. I deploy my now bug-free subgraph to a remote Graph Node and live happily ever after! (no potatoes tho) +5. I deploy my now bug-free Subgraph to a remote Graph Node and live happily ever after! (no potatoes tho) From 04b671c611dc104cb9b86845899962cf5e8ebb14 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:18 -0500 Subject: [PATCH 0677/1789] New translations subgraph-debug-forking.mdx (Japanese) --- .../cookbook/subgraph-debug-forking.mdx | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/website/src/pages/ja/subgraphs/cookbook/subgraph-debug-forking.mdx b/website/src/pages/ja/subgraphs/cookbook/subgraph-debug-forking.mdx index 7d4e4d6a6e6f..cba9bbca2ff7 100644 --- a/website/src/pages/ja/subgraphs/cookbook/subgraph-debug-forking.mdx +++ b/website/src/pages/ja/subgraphs/cookbook/subgraph-debug-forking.mdx @@ -2,23 +2,23 @@ title: フォークを用いた迅速かつ容易なサブグラフのデバッグ --- -As with many systems processing large amounts of data, The Graph's Indexers (Graph Nodes) may take quite some time to sync-up your subgraph with the target blockchain. The discrepancy between quick changes with the purpose of debugging and long wait times needed for indexing is extremely counterproductive and we are well aware of that. This is why we are introducing **subgraph forking**, developed by [LimeChain](https://limechain.tech/), and in this article I will show you how this feature can be used to substantially speed-up subgraph debugging! +As with many systems processing large amounts of data, The Graph's Indexers (Graph Nodes) may take quite some time to sync-up your Subgraph with the target blockchain. The discrepancy between quick changes with the purpose of debugging and long wait times needed for indexing is extremely counterproductive and we are well aware of that. This is why we are introducing **Subgraph forking**, developed by [LimeChain](https://limechain.tech/), and in this article I will show you how this feature can be used to substantially speed-up Subgraph debugging! ## さて、それは何でしょうか? -**Subgraph forking** is the process of lazily fetching entities from _another_ subgraph's store (usually a remote one). +**Subgraph forking** is the process of lazily fetching entities from _another_ Subgraph's store (usually a remote one). -In the context of debugging, **subgraph forking** allows you to debug your failed subgraph at block _X_ without needing to wait to sync-up to block _X_. +In the context of debugging, **Subgraph forking** allows you to debug your failed Subgraph at block _X_ without needing to wait to sync-up to block _X_. ## その方法は? -When you deploy a subgraph to a remote Graph Node for indexing and it fails at block _X_, the good news is that the Graph Node will still serve GraphQL queries using its store, which is synced-up to block _X_. That's great! This means we can take advantage of this "up-to-date" store to fix the bugs arising when indexing block _X_. +When you deploy a Subgraph to a remote Graph Node for indexing and it fails at block _X_, the good news is that the Graph Node will still serve GraphQL queries using its store, which is synced-up to block _X_. That's great! This means we can take advantage of this "up-to-date" store to fix the bugs arising when indexing block _X_. -In a nutshell, we are going to _fork the failing subgraph_ from a remote Graph Node that is guaranteed to have the subgraph indexed up to block _X_ in order to provide the locally deployed subgraph being debugged at block _X_ an up-to-date view of the indexing state. +In a nutshell, we are going to _fork the failing Subgraph_ from a remote Graph Node that is guaranteed to have the Subgraph indexed up to block _X_ in order to provide the locally deployed Subgraph being debugged at block _X_ an up-to-date view of the indexing state. ## コードを見てみましょう -To stay focused on subgraph debugging, let's keep things simple and run along with the [example-subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexing the Ethereum Gravity smart contract. +To stay focused on Subgraph debugging, let's keep things simple and run along with the [example-Subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexing the Ethereum Gravity smart contract. Here are the handlers defined for indexing `Gravatar`s, with no bugs whatsoever: @@ -44,22 +44,22 @@ export function handleUpdatedGravatar(event: UpdatedGravatar): void { } ``` -Oops, how unfortunate, when I deploy my perfect looking subgraph to [Subgraph Studio](https://thegraph.com/studio/) it fails with the _"Gravatar not found!"_ error. +Oops, how unfortunate, when I deploy my perfect looking Subgraph to [Subgraph Studio](https://thegraph.com/studio/) it fails with the _"Gravatar not found!"_ error. 通常の試すであろう修正方法: 1. マッピングソースを変更して問題の解決を試す(解決されないことは分かっていても) -2. Re-deploy the subgraph to [Subgraph Studio](https://thegraph.com/studio/) (or another remote Graph Node). +2. Re-deploy the Subgraph to [Subgraph Studio](https://thegraph.com/studio/) (or another remote Graph Node). 3. 同期を待つ 4. 再び問題が発生した場合は、1に戻る It is indeed pretty familiar to an ordinary debug process, but there is one step that horribly slows down the process: _3. Wait for it to sync-up._ -Using **subgraph forking** we can essentially eliminate this step. Here is how it looks: +Using **Subgraph forking** we can essentially eliminate this step. Here is how it looks: 0. Spin-up a local Graph Node with the **_appropriate fork-base_** set. 1. マッピングのソースを変更し、問題を解決する -2. Deploy to the local Graph Node, **_forking the failing subgraph_** and **_starting from the problematic block_**. +2. Deploy to the local Graph Node, **_forking the failing Subgraph_** and **_starting from the problematic block_**. 3. もし再度、壊れる場合1に戻る さて、ここで2つの疑問が生じます: @@ -69,18 +69,18 @@ Using **subgraph forking** we can essentially eliminate this step. Here is how i 回答: -1. `fork-base` is the "base" URL, such that when the _subgraph id_ is appended the resulting URL (`/`) is a valid GraphQL endpoint for the subgraph's store. +1. `fork-base` is the "base" URL, such that when the _subgraph id_ is appended the resulting URL (`/`) is a valid GraphQL endpoint for the Subgraph's store. 2. フォーキングは簡単であり煩雑な手間はありません ```bash $ graph deploy --debug-fork --ipfs http://localhost:5001 --node http://localhost:8020 ``` -Also, don't forget to set the `dataSources.source.startBlock` field in the subgraph manifest to the number of the problematic block, so you can skip indexing unnecessary blocks and take advantage of the fork! +Also, don't forget to set the `dataSources.source.startBlock` field in the Subgraph manifest to the number of the problematic block, so you can skip indexing unnecessary blocks and take advantage of the fork! そこで、以下の通りです: -1. I spin-up a local Graph Node ([here is how to do it](https://github.com/graphprotocol/graph-node#running-a-local-graph-node)) with the `fork-base` option set to: `https://api.thegraph.com/subgraphs/id/`, since I will fork a subgraph, the buggy one I deployed earlier, from [Subgraph Studio](https://thegraph.com/studio/). +1. I spin-up a local Graph Node ([here is how to do it](https://github.com/graphprotocol/graph-node#running-a-local-graph-node)) with the `fork-base` option set to: `https://api.thegraph.com/subgraphs/id/`, since I will fork a Subgraph, the buggy one I deployed earlier, from [Subgraph Studio](https://thegraph.com/studio/). ``` $ cargo run -p graph-node --release -- \ @@ -91,11 +91,11 @@ $ cargo run -p graph-node --release -- \ ``` 2. After careful inspection I notice that there is a mismatch in the `id` representations used when indexing `Gravatar`s in my two handlers. While `handleNewGravatar` converts it to a hex (`event.params.id.toHex()`), `handleUpdatedGravatar` uses an int32 (`event.params.id.toI32()`) which causes the `handleUpdatedGravatar` to panic with "Gravatar not found!". I make them both convert the `id` to a hex. -3. After I made the changes I deploy my subgraph to the local Graph Node, **_forking the failing subgraph_** and setting `dataSources.source.startBlock` to `6190343` in `subgraph.yaml`: +3. After I made the changes I deploy my Subgraph to the local Graph Node, **_forking the failing Subgraph_** and setting `dataSources.source.startBlock` to `6190343` in `subgraph.yaml`: ```bash $ graph deploy gravity --debug-fork QmNp169tKvomnH3cPXTfGg4ZEhAHA6kEq5oy1XDqAxqHmW --ipfs http://localhost:5001 --node http://localhost:8020 ``` 4. I inspect the logs produced by the local Graph Node and, Hooray!, everything seems to be working. -5. I deploy my now bug-free subgraph to a remote Graph Node and live happily ever after! (no potatoes tho) +5. I deploy my now bug-free Subgraph to a remote Graph Node and live happily ever after! (no potatoes tho) From c40466e54c654bd78bd6c0775b4f16d65771e62f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:19 -0500 Subject: [PATCH 0678/1789] New translations subgraph-debug-forking.mdx (Korean) --- .../cookbook/subgraph-debug-forking.mdx | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/website/src/pages/ko/subgraphs/cookbook/subgraph-debug-forking.mdx b/website/src/pages/ko/subgraphs/cookbook/subgraph-debug-forking.mdx index 6610f19da66d..91aa7484d2ec 100644 --- a/website/src/pages/ko/subgraphs/cookbook/subgraph-debug-forking.mdx +++ b/website/src/pages/ko/subgraphs/cookbook/subgraph-debug-forking.mdx @@ -2,23 +2,23 @@ title: Quick and Easy Subgraph Debugging Using Forks --- -As with many systems processing large amounts of data, The Graph's Indexers (Graph Nodes) may take quite some time to sync-up your subgraph with the target blockchain. The discrepancy between quick changes with the purpose of debugging and long wait times needed for indexing is extremely counterproductive and we are well aware of that. This is why we are introducing **subgraph forking**, developed by [LimeChain](https://limechain.tech/), and in this article I will show you how this feature can be used to substantially speed-up subgraph debugging! +As with many systems processing large amounts of data, The Graph's Indexers (Graph Nodes) may take quite some time to sync-up your Subgraph with the target blockchain. The discrepancy between quick changes with the purpose of debugging and long wait times needed for indexing is extremely counterproductive and we are well aware of that. This is why we are introducing **Subgraph forking**, developed by [LimeChain](https://limechain.tech/), and in this article I will show you how this feature can be used to substantially speed-up Subgraph debugging! ## Ok, what is it? -**Subgraph forking** is the process of lazily fetching entities from _another_ subgraph's store (usually a remote one). +**Subgraph forking** is the process of lazily fetching entities from _another_ Subgraph's store (usually a remote one). -In the context of debugging, **subgraph forking** allows you to debug your failed subgraph at block _X_ without needing to wait to sync-up to block _X_. +In the context of debugging, **Subgraph forking** allows you to debug your failed Subgraph at block _X_ without needing to wait to sync-up to block _X_. ## What?! How? -When you deploy a subgraph to a remote Graph Node for indexing and it fails at block _X_, the good news is that the Graph Node will still serve GraphQL queries using its store, which is synced-up to block _X_. That's great! This means we can take advantage of this "up-to-date" store to fix the bugs arising when indexing block _X_. +When you deploy a Subgraph to a remote Graph Node for indexing and it fails at block _X_, the good news is that the Graph Node will still serve GraphQL queries using its store, which is synced-up to block _X_. That's great! This means we can take advantage of this "up-to-date" store to fix the bugs arising when indexing block _X_. -In a nutshell, we are going to _fork the failing subgraph_ from a remote Graph Node that is guaranteed to have the subgraph indexed up to block _X_ in order to provide the locally deployed subgraph being debugged at block _X_ an up-to-date view of the indexing state. +In a nutshell, we are going to _fork the failing Subgraph_ from a remote Graph Node that is guaranteed to have the Subgraph indexed up to block _X_ in order to provide the locally deployed Subgraph being debugged at block _X_ an up-to-date view of the indexing state. ## Please, show me some code! -To stay focused on subgraph debugging, let's keep things simple and run along with the [example-subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexing the Ethereum Gravity smart contract. +To stay focused on Subgraph debugging, let's keep things simple and run along with the [example-Subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexing the Ethereum Gravity smart contract. Here are the handlers defined for indexing `Gravatar`s, with no bugs whatsoever: @@ -44,22 +44,22 @@ export function handleUpdatedGravatar(event: UpdatedGravatar): void { } ``` -Oops, how unfortunate, when I deploy my perfect looking subgraph to [Subgraph Studio](https://thegraph.com/studio/) it fails with the _"Gravatar not found!"_ error. +Oops, how unfortunate, when I deploy my perfect looking Subgraph to [Subgraph Studio](https://thegraph.com/studio/) it fails with the _"Gravatar not found!"_ error. The usual way to attempt a fix is: 1. Make a change in the mappings source, which you believe will solve the issue (while I know it won't). -2. Re-deploy the subgraph to [Subgraph Studio](https://thegraph.com/studio/) (or another remote Graph Node). +2. Re-deploy the Subgraph to [Subgraph Studio](https://thegraph.com/studio/) (or another remote Graph Node). 3. Wait for it to sync-up. 4. If it breaks again go back to 1, otherwise: Hooray! It is indeed pretty familiar to an ordinary debug process, but there is one step that horribly slows down the process: _3. Wait for it to sync-up._ -Using **subgraph forking** we can essentially eliminate this step. Here is how it looks: +Using **Subgraph forking** we can essentially eliminate this step. Here is how it looks: 0. Spin-up a local Graph Node with the **_appropriate fork-base_** set. 1. Make a change in the mappings source, which you believe will solve the issue. -2. Deploy to the local Graph Node, **_forking the failing subgraph_** and **_starting from the problematic block_**. +2. Deploy to the local Graph Node, **_forking the failing Subgraph_** and **_starting from the problematic block_**. 3. If it breaks again, go back to 1, otherwise: Hooray! Now, you may have 2 questions: @@ -69,18 +69,18 @@ Now, you may have 2 questions: And I answer: -1. `fork-base` is the "base" URL, such that when the _subgraph id_ is appended the resulting URL (`/`) is a valid GraphQL endpoint for the subgraph's store. +1. `fork-base` is the "base" URL, such that when the _subgraph id_ is appended the resulting URL (`/`) is a valid GraphQL endpoint for the Subgraph's store. 2. Forking is easy, no need to sweat: ```bash $ graph deploy --debug-fork --ipfs http://localhost:5001 --node http://localhost:8020 ``` -Also, don't forget to set the `dataSources.source.startBlock` field in the subgraph manifest to the number of the problematic block, so you can skip indexing unnecessary blocks and take advantage of the fork! +Also, don't forget to set the `dataSources.source.startBlock` field in the Subgraph manifest to the number of the problematic block, so you can skip indexing unnecessary blocks and take advantage of the fork! So, here is what I do: -1. I spin-up a local Graph Node ([here is how to do it](https://github.com/graphprotocol/graph-node#running-a-local-graph-node)) with the `fork-base` option set to: `https://api.thegraph.com/subgraphs/id/`, since I will fork a subgraph, the buggy one I deployed earlier, from [Subgraph Studio](https://thegraph.com/studio/). +1. I spin-up a local Graph Node ([here is how to do it](https://github.com/graphprotocol/graph-node#running-a-local-graph-node)) with the `fork-base` option set to: `https://api.thegraph.com/subgraphs/id/`, since I will fork a Subgraph, the buggy one I deployed earlier, from [Subgraph Studio](https://thegraph.com/studio/). ``` $ cargo run -p graph-node --release -- \ @@ -91,11 +91,11 @@ $ cargo run -p graph-node --release -- \ ``` 2. After careful inspection I notice that there is a mismatch in the `id` representations used when indexing `Gravatar`s in my two handlers. While `handleNewGravatar` converts it to a hex (`event.params.id.toHex()`), `handleUpdatedGravatar` uses an int32 (`event.params.id.toI32()`) which causes the `handleUpdatedGravatar` to panic with "Gravatar not found!". I make them both convert the `id` to a hex. -3. After I made the changes I deploy my subgraph to the local Graph Node, **_forking the failing subgraph_** and setting `dataSources.source.startBlock` to `6190343` in `subgraph.yaml`: +3. After I made the changes I deploy my Subgraph to the local Graph Node, **_forking the failing Subgraph_** and setting `dataSources.source.startBlock` to `6190343` in `subgraph.yaml`: ```bash $ graph deploy gravity --debug-fork QmNp169tKvomnH3cPXTfGg4ZEhAHA6kEq5oy1XDqAxqHmW --ipfs http://localhost:5001 --node http://localhost:8020 ``` 4. I inspect the logs produced by the local Graph Node and, Hooray!, everything seems to be working. -5. I deploy my now bug-free subgraph to a remote Graph Node and live happily ever after! (no potatoes tho) +5. I deploy my now bug-free Subgraph to a remote Graph Node and live happily ever after! (no potatoes tho) From 9d608a30f90d3219911da6d1fdc259b7007f2ad2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:20 -0500 Subject: [PATCH 0679/1789] New translations subgraph-debug-forking.mdx (Dutch) --- .../cookbook/subgraph-debug-forking.mdx | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/website/src/pages/nl/subgraphs/cookbook/subgraph-debug-forking.mdx b/website/src/pages/nl/subgraphs/cookbook/subgraph-debug-forking.mdx index 6610f19da66d..91aa7484d2ec 100644 --- a/website/src/pages/nl/subgraphs/cookbook/subgraph-debug-forking.mdx +++ b/website/src/pages/nl/subgraphs/cookbook/subgraph-debug-forking.mdx @@ -2,23 +2,23 @@ title: Quick and Easy Subgraph Debugging Using Forks --- -As with many systems processing large amounts of data, The Graph's Indexers (Graph Nodes) may take quite some time to sync-up your subgraph with the target blockchain. The discrepancy between quick changes with the purpose of debugging and long wait times needed for indexing is extremely counterproductive and we are well aware of that. This is why we are introducing **subgraph forking**, developed by [LimeChain](https://limechain.tech/), and in this article I will show you how this feature can be used to substantially speed-up subgraph debugging! +As with many systems processing large amounts of data, The Graph's Indexers (Graph Nodes) may take quite some time to sync-up your Subgraph with the target blockchain. The discrepancy between quick changes with the purpose of debugging and long wait times needed for indexing is extremely counterproductive and we are well aware of that. This is why we are introducing **Subgraph forking**, developed by [LimeChain](https://limechain.tech/), and in this article I will show you how this feature can be used to substantially speed-up Subgraph debugging! ## Ok, what is it? -**Subgraph forking** is the process of lazily fetching entities from _another_ subgraph's store (usually a remote one). +**Subgraph forking** is the process of lazily fetching entities from _another_ Subgraph's store (usually a remote one). -In the context of debugging, **subgraph forking** allows you to debug your failed subgraph at block _X_ without needing to wait to sync-up to block _X_. +In the context of debugging, **Subgraph forking** allows you to debug your failed Subgraph at block _X_ without needing to wait to sync-up to block _X_. ## What?! How? -When you deploy a subgraph to a remote Graph Node for indexing and it fails at block _X_, the good news is that the Graph Node will still serve GraphQL queries using its store, which is synced-up to block _X_. That's great! This means we can take advantage of this "up-to-date" store to fix the bugs arising when indexing block _X_. +When you deploy a Subgraph to a remote Graph Node for indexing and it fails at block _X_, the good news is that the Graph Node will still serve GraphQL queries using its store, which is synced-up to block _X_. That's great! This means we can take advantage of this "up-to-date" store to fix the bugs arising when indexing block _X_. -In a nutshell, we are going to _fork the failing subgraph_ from a remote Graph Node that is guaranteed to have the subgraph indexed up to block _X_ in order to provide the locally deployed subgraph being debugged at block _X_ an up-to-date view of the indexing state. +In a nutshell, we are going to _fork the failing Subgraph_ from a remote Graph Node that is guaranteed to have the Subgraph indexed up to block _X_ in order to provide the locally deployed Subgraph being debugged at block _X_ an up-to-date view of the indexing state. ## Please, show me some code! -To stay focused on subgraph debugging, let's keep things simple and run along with the [example-subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexing the Ethereum Gravity smart contract. +To stay focused on Subgraph debugging, let's keep things simple and run along with the [example-Subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexing the Ethereum Gravity smart contract. Here are the handlers defined for indexing `Gravatar`s, with no bugs whatsoever: @@ -44,22 +44,22 @@ export function handleUpdatedGravatar(event: UpdatedGravatar): void { } ``` -Oops, how unfortunate, when I deploy my perfect looking subgraph to [Subgraph Studio](https://thegraph.com/studio/) it fails with the _"Gravatar not found!"_ error. +Oops, how unfortunate, when I deploy my perfect looking Subgraph to [Subgraph Studio](https://thegraph.com/studio/) it fails with the _"Gravatar not found!"_ error. The usual way to attempt a fix is: 1. Make a change in the mappings source, which you believe will solve the issue (while I know it won't). -2. Re-deploy the subgraph to [Subgraph Studio](https://thegraph.com/studio/) (or another remote Graph Node). +2. Re-deploy the Subgraph to [Subgraph Studio](https://thegraph.com/studio/) (or another remote Graph Node). 3. Wait for it to sync-up. 4. If it breaks again go back to 1, otherwise: Hooray! It is indeed pretty familiar to an ordinary debug process, but there is one step that horribly slows down the process: _3. Wait for it to sync-up._ -Using **subgraph forking** we can essentially eliminate this step. Here is how it looks: +Using **Subgraph forking** we can essentially eliminate this step. Here is how it looks: 0. Spin-up a local Graph Node with the **_appropriate fork-base_** set. 1. Make a change in the mappings source, which you believe will solve the issue. -2. Deploy to the local Graph Node, **_forking the failing subgraph_** and **_starting from the problematic block_**. +2. Deploy to the local Graph Node, **_forking the failing Subgraph_** and **_starting from the problematic block_**. 3. If it breaks again, go back to 1, otherwise: Hooray! Now, you may have 2 questions: @@ -69,18 +69,18 @@ Now, you may have 2 questions: And I answer: -1. `fork-base` is the "base" URL, such that when the _subgraph id_ is appended the resulting URL (`/`) is a valid GraphQL endpoint for the subgraph's store. +1. `fork-base` is the "base" URL, such that when the _subgraph id_ is appended the resulting URL (`/`) is a valid GraphQL endpoint for the Subgraph's store. 2. Forking is easy, no need to sweat: ```bash $ graph deploy --debug-fork --ipfs http://localhost:5001 --node http://localhost:8020 ``` -Also, don't forget to set the `dataSources.source.startBlock` field in the subgraph manifest to the number of the problematic block, so you can skip indexing unnecessary blocks and take advantage of the fork! +Also, don't forget to set the `dataSources.source.startBlock` field in the Subgraph manifest to the number of the problematic block, so you can skip indexing unnecessary blocks and take advantage of the fork! So, here is what I do: -1. I spin-up a local Graph Node ([here is how to do it](https://github.com/graphprotocol/graph-node#running-a-local-graph-node)) with the `fork-base` option set to: `https://api.thegraph.com/subgraphs/id/`, since I will fork a subgraph, the buggy one I deployed earlier, from [Subgraph Studio](https://thegraph.com/studio/). +1. I spin-up a local Graph Node ([here is how to do it](https://github.com/graphprotocol/graph-node#running-a-local-graph-node)) with the `fork-base` option set to: `https://api.thegraph.com/subgraphs/id/`, since I will fork a Subgraph, the buggy one I deployed earlier, from [Subgraph Studio](https://thegraph.com/studio/). ``` $ cargo run -p graph-node --release -- \ @@ -91,11 +91,11 @@ $ cargo run -p graph-node --release -- \ ``` 2. After careful inspection I notice that there is a mismatch in the `id` representations used when indexing `Gravatar`s in my two handlers. While `handleNewGravatar` converts it to a hex (`event.params.id.toHex()`), `handleUpdatedGravatar` uses an int32 (`event.params.id.toI32()`) which causes the `handleUpdatedGravatar` to panic with "Gravatar not found!". I make them both convert the `id` to a hex. -3. After I made the changes I deploy my subgraph to the local Graph Node, **_forking the failing subgraph_** and setting `dataSources.source.startBlock` to `6190343` in `subgraph.yaml`: +3. After I made the changes I deploy my Subgraph to the local Graph Node, **_forking the failing Subgraph_** and setting `dataSources.source.startBlock` to `6190343` in `subgraph.yaml`: ```bash $ graph deploy gravity --debug-fork QmNp169tKvomnH3cPXTfGg4ZEhAHA6kEq5oy1XDqAxqHmW --ipfs http://localhost:5001 --node http://localhost:8020 ``` 4. I inspect the logs produced by the local Graph Node and, Hooray!, everything seems to be working. -5. I deploy my now bug-free subgraph to a remote Graph Node and live happily ever after! (no potatoes tho) +5. I deploy my now bug-free Subgraph to a remote Graph Node and live happily ever after! (no potatoes tho) From 846466578a6662f5481ad26eae09bc97ff81dd4a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:21 -0500 Subject: [PATCH 0680/1789] New translations subgraph-debug-forking.mdx (Polish) --- .../cookbook/subgraph-debug-forking.mdx | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/website/src/pages/pl/subgraphs/cookbook/subgraph-debug-forking.mdx b/website/src/pages/pl/subgraphs/cookbook/subgraph-debug-forking.mdx index 6610f19da66d..91aa7484d2ec 100644 --- a/website/src/pages/pl/subgraphs/cookbook/subgraph-debug-forking.mdx +++ b/website/src/pages/pl/subgraphs/cookbook/subgraph-debug-forking.mdx @@ -2,23 +2,23 @@ title: Quick and Easy Subgraph Debugging Using Forks --- -As with many systems processing large amounts of data, The Graph's Indexers (Graph Nodes) may take quite some time to sync-up your subgraph with the target blockchain. The discrepancy between quick changes with the purpose of debugging and long wait times needed for indexing is extremely counterproductive and we are well aware of that. This is why we are introducing **subgraph forking**, developed by [LimeChain](https://limechain.tech/), and in this article I will show you how this feature can be used to substantially speed-up subgraph debugging! +As with many systems processing large amounts of data, The Graph's Indexers (Graph Nodes) may take quite some time to sync-up your Subgraph with the target blockchain. The discrepancy between quick changes with the purpose of debugging and long wait times needed for indexing is extremely counterproductive and we are well aware of that. This is why we are introducing **Subgraph forking**, developed by [LimeChain](https://limechain.tech/), and in this article I will show you how this feature can be used to substantially speed-up Subgraph debugging! ## Ok, what is it? -**Subgraph forking** is the process of lazily fetching entities from _another_ subgraph's store (usually a remote one). +**Subgraph forking** is the process of lazily fetching entities from _another_ Subgraph's store (usually a remote one). -In the context of debugging, **subgraph forking** allows you to debug your failed subgraph at block _X_ without needing to wait to sync-up to block _X_. +In the context of debugging, **Subgraph forking** allows you to debug your failed Subgraph at block _X_ without needing to wait to sync-up to block _X_. ## What?! How? -When you deploy a subgraph to a remote Graph Node for indexing and it fails at block _X_, the good news is that the Graph Node will still serve GraphQL queries using its store, which is synced-up to block _X_. That's great! This means we can take advantage of this "up-to-date" store to fix the bugs arising when indexing block _X_. +When you deploy a Subgraph to a remote Graph Node for indexing and it fails at block _X_, the good news is that the Graph Node will still serve GraphQL queries using its store, which is synced-up to block _X_. That's great! This means we can take advantage of this "up-to-date" store to fix the bugs arising when indexing block _X_. -In a nutshell, we are going to _fork the failing subgraph_ from a remote Graph Node that is guaranteed to have the subgraph indexed up to block _X_ in order to provide the locally deployed subgraph being debugged at block _X_ an up-to-date view of the indexing state. +In a nutshell, we are going to _fork the failing Subgraph_ from a remote Graph Node that is guaranteed to have the Subgraph indexed up to block _X_ in order to provide the locally deployed Subgraph being debugged at block _X_ an up-to-date view of the indexing state. ## Please, show me some code! -To stay focused on subgraph debugging, let's keep things simple and run along with the [example-subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexing the Ethereum Gravity smart contract. +To stay focused on Subgraph debugging, let's keep things simple and run along with the [example-Subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexing the Ethereum Gravity smart contract. Here are the handlers defined for indexing `Gravatar`s, with no bugs whatsoever: @@ -44,22 +44,22 @@ export function handleUpdatedGravatar(event: UpdatedGravatar): void { } ``` -Oops, how unfortunate, when I deploy my perfect looking subgraph to [Subgraph Studio](https://thegraph.com/studio/) it fails with the _"Gravatar not found!"_ error. +Oops, how unfortunate, when I deploy my perfect looking Subgraph to [Subgraph Studio](https://thegraph.com/studio/) it fails with the _"Gravatar not found!"_ error. The usual way to attempt a fix is: 1. Make a change in the mappings source, which you believe will solve the issue (while I know it won't). -2. Re-deploy the subgraph to [Subgraph Studio](https://thegraph.com/studio/) (or another remote Graph Node). +2. Re-deploy the Subgraph to [Subgraph Studio](https://thegraph.com/studio/) (or another remote Graph Node). 3. Wait for it to sync-up. 4. If it breaks again go back to 1, otherwise: Hooray! It is indeed pretty familiar to an ordinary debug process, but there is one step that horribly slows down the process: _3. Wait for it to sync-up._ -Using **subgraph forking** we can essentially eliminate this step. Here is how it looks: +Using **Subgraph forking** we can essentially eliminate this step. Here is how it looks: 0. Spin-up a local Graph Node with the **_appropriate fork-base_** set. 1. Make a change in the mappings source, which you believe will solve the issue. -2. Deploy to the local Graph Node, **_forking the failing subgraph_** and **_starting from the problematic block_**. +2. Deploy to the local Graph Node, **_forking the failing Subgraph_** and **_starting from the problematic block_**. 3. If it breaks again, go back to 1, otherwise: Hooray! Now, you may have 2 questions: @@ -69,18 +69,18 @@ Now, you may have 2 questions: And I answer: -1. `fork-base` is the "base" URL, such that when the _subgraph id_ is appended the resulting URL (`/`) is a valid GraphQL endpoint for the subgraph's store. +1. `fork-base` is the "base" URL, such that when the _subgraph id_ is appended the resulting URL (`/`) is a valid GraphQL endpoint for the Subgraph's store. 2. Forking is easy, no need to sweat: ```bash $ graph deploy --debug-fork --ipfs http://localhost:5001 --node http://localhost:8020 ``` -Also, don't forget to set the `dataSources.source.startBlock` field in the subgraph manifest to the number of the problematic block, so you can skip indexing unnecessary blocks and take advantage of the fork! +Also, don't forget to set the `dataSources.source.startBlock` field in the Subgraph manifest to the number of the problematic block, so you can skip indexing unnecessary blocks and take advantage of the fork! So, here is what I do: -1. I spin-up a local Graph Node ([here is how to do it](https://github.com/graphprotocol/graph-node#running-a-local-graph-node)) with the `fork-base` option set to: `https://api.thegraph.com/subgraphs/id/`, since I will fork a subgraph, the buggy one I deployed earlier, from [Subgraph Studio](https://thegraph.com/studio/). +1. I spin-up a local Graph Node ([here is how to do it](https://github.com/graphprotocol/graph-node#running-a-local-graph-node)) with the `fork-base` option set to: `https://api.thegraph.com/subgraphs/id/`, since I will fork a Subgraph, the buggy one I deployed earlier, from [Subgraph Studio](https://thegraph.com/studio/). ``` $ cargo run -p graph-node --release -- \ @@ -91,11 +91,11 @@ $ cargo run -p graph-node --release -- \ ``` 2. After careful inspection I notice that there is a mismatch in the `id` representations used when indexing `Gravatar`s in my two handlers. While `handleNewGravatar` converts it to a hex (`event.params.id.toHex()`), `handleUpdatedGravatar` uses an int32 (`event.params.id.toI32()`) which causes the `handleUpdatedGravatar` to panic with "Gravatar not found!". I make them both convert the `id` to a hex. -3. After I made the changes I deploy my subgraph to the local Graph Node, **_forking the failing subgraph_** and setting `dataSources.source.startBlock` to `6190343` in `subgraph.yaml`: +3. After I made the changes I deploy my Subgraph to the local Graph Node, **_forking the failing Subgraph_** and setting `dataSources.source.startBlock` to `6190343` in `subgraph.yaml`: ```bash $ graph deploy gravity --debug-fork QmNp169tKvomnH3cPXTfGg4ZEhAHA6kEq5oy1XDqAxqHmW --ipfs http://localhost:5001 --node http://localhost:8020 ``` 4. I inspect the logs produced by the local Graph Node and, Hooray!, everything seems to be working. -5. I deploy my now bug-free subgraph to a remote Graph Node and live happily ever after! (no potatoes tho) +5. I deploy my now bug-free Subgraph to a remote Graph Node and live happily ever after! (no potatoes tho) From 99484bf63b0e1c42193241db1e1c44004e505ae3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:22 -0500 Subject: [PATCH 0681/1789] New translations subgraph-debug-forking.mdx (Portuguese) --- .../cookbook/subgraph-debug-forking.mdx | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/website/src/pages/pt/subgraphs/cookbook/subgraph-debug-forking.mdx b/website/src/pages/pt/subgraphs/cookbook/subgraph-debug-forking.mdx index 8d1a1bc6444a..a64909d2ae27 100644 --- a/website/src/pages/pt/subgraphs/cookbook/subgraph-debug-forking.mdx +++ b/website/src/pages/pt/subgraphs/cookbook/subgraph-debug-forking.mdx @@ -2,23 +2,23 @@ title: Debugging de Subgraphs Rápido e Fácil Com Forks --- -Assim como vários sistemas que processam uma abundância de dados, os Indexadores do Graph (Graph Nodes) podem demorar um pouco para sincronizar o seu subgraph com a blockchain de destino. A discrepância entre mudanças rápidas para fins de debugging e os longos tempos de espera necessários para o indexing é extremamente contraprodutiva, e nós sabemos muito bem disso. É por isso que introduzimos o **forking de subgraphs**, programado pela [LimeChain](https://limechain.tech/); neste artigo. Veja como dá para acelerar bastante o debugging de subgraphs! +As with many systems processing large amounts of data, The Graph's Indexers (Graph Nodes) may take quite some time to sync-up your Subgraph with the target blockchain. The discrepancy between quick changes with the purpose of debugging and long wait times needed for indexing is extremely counterproductive and we are well aware of that. This is why we are introducing **Subgraph forking**, developed by [LimeChain](https://limechain.tech/), and in this article I will show you how this feature can be used to substantially speed-up Subgraph debugging! ## Ok, o que é isso? -**Forking de subgraphs** é o processo de retirar entidades tranquilamente do armazenamento de _outro_ subgraph (normalmente, remoto). +**Subgraph forking** is the process of lazily fetching entities from _another_ Subgraph's store (usually a remote one). -No contexto do debugging, o **forking de subgraphs** permite debugar o seu subgraph falho no bloco _X_ sem precisar esperar que ele sincronize até o bloco _X_. +In the context of debugging, **Subgraph forking** allows you to debug your failed Subgraph at block _X_ without needing to wait to sync-up to block _X_. ## O quê?! Como?! -Quando um subgraph é implementado a um Graph Node remoto para indexação, e ele falha no bloco _X_, a boa notícia é que o Graph Node ainda servirá queries GraphQL com seu armazenamento, que é sincronizado até o bloco _X_. Ótimo! Podemos aproveitar este armazenamento "atualizado" para consertar os bugs que surgem ao indexar o bloco _X_. +When you deploy a Subgraph to a remote Graph Node for indexing and it fails at block _X_, the good news is that the Graph Node will still serve GraphQL queries using its store, which is synced-up to block _X_. That's great! This means we can take advantage of this "up-to-date" store to fix the bugs arising when indexing block _X_. -Resumindo, faremos um fork do subgraph falho de um Graph Node remoto para garantir que o subgraph seja indexado até o bloco _X_, para fornecer ao subgraph implantado localmente uma visão atualizada do estado da indexação; sendo este debugado no bloco _X_. +In a nutshell, we are going to _fork the failing Subgraph_ from a remote Graph Node that is guaranteed to have the Subgraph indexed up to block _X_ in order to provide the locally deployed Subgraph being debugged at block _X_ an up-to-date view of the indexing state. ## Por favor, quero ver códigos! -Para manter a concentração no debugging de subgraphs, vamos começar com coisas simples: siga o [example-subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) a indexar o contrato inteligente Ethereum Gravity. +To stay focused on Subgraph debugging, let's keep things simple and run along with the [example-Subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexing the Ethereum Gravity smart contract. Aqui estão os handlers definidos para a indexação dos `Gravatars`, sem qualquer bug: @@ -44,22 +44,22 @@ export function handleUpdatedGravatar(event: UpdatedGravatar): void { } ``` -Que pena! Quando eu implanto o meu lindo subgraph no Subgraph Studio, ele falha com o erro "Gravatar not found" (Gravatar não encontrado). +Oops, how unfortunate, when I deploy my perfect looking Subgraph to [Subgraph Studio](https://thegraph.com/studio/) it fails with the _"Gravatar not found!"_ error. A maneira mais comum de tentar consertar este erro é: 1. Fazer uma mudança na fonte dos mapeamentos, que talvez possa resolver o problema (mas é claro que não vai). -2. Implante o subgraph novamente no [Subgraph Studio](https://thegraph.com/studio/) (ou outro Graph Node remoto). +2. Re-deploy the Subgraph to [Subgraph Studio](https://thegraph.com/studio/) (or another remote Graph Node). 3. Esperar que ele se sincronize. 4. Se quebrar novamente, volte ao passo 1. Se não: Eba! É um típico processo ordinário de debug, mas há um passo que atrasa muito o processo: _3. Esperar que ele se sincronize._ -Com o **forking de subgraphs**, essencialmente, podemos eliminar este passo. É mais ou menos assim: +Using **Subgraph forking** we can essentially eliminate this step. Here is how it looks: 0. Crie um Graph Node local com o conjunto de **_fork-base apropriado_**. 1. Faça uma mudança na fonte dos mapeamentos, que talvez possa resolver o problema. -2. Implante-o no Graph Node local, **faça um fork do subgraph falho**, e **comece do bloco problemático\_**. +2. Deploy to the local Graph Node, **_forking the failing Subgraph_** and **_starting from the problematic block_**. 3. Se quebrar novamente, volte ao passo 1. Se não: Eba! Agora, você deve ter duas perguntas: @@ -69,18 +69,18 @@ Agora, você deve ter duas perguntas: E eu respondo: -1. `fork-base` é o URL "base", tal que quando a _id de subgraph_ é atrelada, o URL resultante (`/`) se torna um ponto final GraphQL válido para o armazenamento do subgraph. +1. `fork-base` is the "base" URL, such that when the _subgraph id_ is appended the resulting URL (`/`) is a valid GraphQL endpoint for the Subgraph's store. 2. Forking é fácil, não precisa se preocupar: ```bash $ graph deploy --debug-fork --ipfs http://localhost:5001 --node http://localhost:8020 ``` -Aliás, não esqueça de preencher o campo `dataSources.source.startBlock` no manifest do subgraph com o número do bloco problemático, para pular a indexação de blocos desnecessários e tomar vantagem do fork! +Also, don't forget to set the `dataSources.source.startBlock` field in the Subgraph manifest to the number of the problematic block, so you can skip indexing unnecessary blocks and take advantage of the fork! Aqui está o que eu faço: -1. Eu crio um Graph Node local ([veja como](https://github.com/graphprotocol/graph-node#running-a-local-graph-node)) com a opção `fork-base` de `https://api.thegraph.com/subgraphs/id/`, já que eu vou forkar um subgraph, o bugado que eu lancei anteriormente, do [Subgraph Studio](https://thegraph.com/studio/). +1. I spin-up a local Graph Node ([here is how to do it](https://github.com/graphprotocol/graph-node#running-a-local-graph-node)) with the `fork-base` option set to: `https://api.thegraph.com/subgraphs/id/`, since I will fork a Subgraph, the buggy one I deployed earlier, from [Subgraph Studio](https://thegraph.com/studio/). ``` $ cargo run -p graph-node --release -- \ @@ -91,11 +91,11 @@ $ cargo run -p graph-node --release -- \ ``` 2. Após vistoriar com cuidado, percebo uma discrepância nas representações de `id` usadas ao indexar `Gravatar`s nos meus dois handlers. Enquanto `handleNewGravatar` o converte a um hex (`event.params.id.toHex()`), o `handleUpdatedGravatar` usa um int32 (`event.params.id.toI32()`). Assim, o `handleUpdatedGravatar` entra em pânico com o "Gravatar não encontrado!". Eu faço os dois converterem o `id` em um hex. -3. Após ter feito as mudanças, implanto o meu subgraph no Graph Node local, **_fazendo um fork do subgraph falho_** e programando o `dataSources.source.startBlock` em `6190343` no `subgraph.yaml`: +3. After I made the changes I deploy my Subgraph to the local Graph Node, **_forking the failing Subgraph_** and setting `dataSources.source.startBlock` to `6190343` in `subgraph.yaml`: ```bash $ graph deploy gravity --debug-fork QmNp169tKvomnH3cPXTfGg4ZEhAHA6kEq5oy1XDqAxqHmW --ipfs http://localhost:5001 --node http://localhost:8020 ``` 4. Eu verifico os logs produzidos pelo Graph Node local e... eba! Parece que deu tudo certo. -5. Lanço o meu subgraph, agora livre de bugs, a um Graph Node remoto e vivo feliz para sempre! (mas sem batatas) +5. I deploy my now bug-free Subgraph to a remote Graph Node and live happily ever after! (no potatoes tho) From 0cb495aa36d68d9642c5c227954f94ed0c34052f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:23 -0500 Subject: [PATCH 0682/1789] New translations subgraph-debug-forking.mdx (Russian) --- .../cookbook/subgraph-debug-forking.mdx | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/website/src/pages/ru/subgraphs/cookbook/subgraph-debug-forking.mdx b/website/src/pages/ru/subgraphs/cookbook/subgraph-debug-forking.mdx index 8f2e67289d77..14fd0d145d5c 100644 --- a/website/src/pages/ru/subgraphs/cookbook/subgraph-debug-forking.mdx +++ b/website/src/pages/ru/subgraphs/cookbook/subgraph-debug-forking.mdx @@ -2,23 +2,23 @@ title: Быстрая и простая отладка субграфа с использованием форков --- -Как и многие системы, обрабатывающие большие объемы данных, Индексаторы The Graph (Graph Nodes) могут занять достаточно много времени для синхронизации Вашего субграфа с целевым блокчейном. Несоответствие между быстрыми изменениями, направленными на отладку, и долгим временем ожидания, необходимым для индексирования, является крайне непродуктивным, и мы прекрасно осознаем эту проблему. Поэтому мы представляем **форкинг субграфа**, разработанный [LimeChain](https://limechain.tech/), и в этой статье я покажу, как эту функцию можно использовать для значительного ускорения отладки субграфов! +As with many systems processing large amounts of data, The Graph's Indexers (Graph Nodes) may take quite some time to sync-up your Subgraph with the target blockchain. The discrepancy between quick changes with the purpose of debugging and long wait times needed for indexing is extremely counterproductive and we are well aware of that. This is why we are introducing **Subgraph forking**, developed by [LimeChain](https://limechain.tech/), and in this article I will show you how this feature can be used to substantially speed-up Subgraph debugging! ## И так, что это? -**Форкинг субграфа** — это процесс ленивой загрузки объектов из _другого_ хранилища субграфа (обычно удалённого). +**Subgraph forking** is the process of lazily fetching entities from _another_ Subgraph's store (usually a remote one). -В контексте отладки **форкинг субграфа** позволяет Вам отлаживать Ваш неудавшийся субграф на блоке _X_, не дожидаясь синхронизации с блоком _X_. +In the context of debugging, **Subgraph forking** allows you to debug your failed Subgraph at block _X_ without needing to wait to sync-up to block _X_. ## Что? Как? -Когда Вы развертываете субграф на удалённой Graph Node для индексирования, и он терпит неудачу на блоке _X_, хорошая новость заключается в том, что Graph Node всё равно будет обслуживать запросы GraphQL, используя своё хранилище, которое синхронизировано с блоком _X_. Это здорово! Таким образом, мы можем воспользоваться этим "актуальным" хранилищем, чтобы исправить ошибки, возникающие при индексировании блока _X_. +When you deploy a Subgraph to a remote Graph Node for indexing and it fails at block _X_, the good news is that the Graph Node will still serve GraphQL queries using its store, which is synced-up to block _X_. That's great! This means we can take advantage of this "up-to-date" store to fix the bugs arising when indexing block _X_. -Короче говоря, мы собираемся _форкать неработающий субграф_ с удалённой Graph Node, которая гарантированно имеет индексированный субграф до блока _X_, чтобы предоставить локально развернутому субграфу, который отлаживается на блоке _X_, актуальное состояние индексирования. +In a nutshell, we are going to _fork the failing Subgraph_ from a remote Graph Node that is guaranteed to have the Subgraph indexed up to block _X_ in order to provide the locally deployed Subgraph being debugged at block _X_ an up-to-date view of the indexing state. ## Пожалуйста, покажите мне какой-нибудь код! -Чтобы сосредоточиться на отладке субграфа, давайте упростим задачу и продолжим с [примером субграфа](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar), который индексирует смарт-контракт Ethereum Gravity. +To stay focused on Subgraph debugging, let's keep things simple and run along with the [example-Subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexing the Ethereum Gravity smart contract. Вот обработчики, определённые для индексирования `Gravatar`, без каких-либо ошибок: @@ -44,22 +44,22 @@ export function handleUpdatedGravatar(event: UpdatedGravatar): void { } ``` -Ой, как неприятно! Когда я развертываю свой идеально выглядящий субграф в [Subgraph Studio](https://thegraph.com/studio/), он выдаёт ошибку _"Gravatar not found!"_. +Oops, how unfortunate, when I deploy my perfect looking Subgraph to [Subgraph Studio](https://thegraph.com/studio/) it fails with the _"Gravatar not found!"_ error. Обычный способ попытаться исправить это: 1. Внести изменения в источник мэппингов, которые, по Вашему мнению, решат проблему (в то время как я знаю, что это не так). -2. Перезапустить развертывание своего субграфа в [Subgraph Studio](https://thegraph.com/studio/) (или на другую удалённую Graph Node). +2. Re-deploy the Subgraph to [Subgraph Studio](https://thegraph.com/studio/) (or another remote Graph Node). 3. Подождать, пока он синхронизируется. 4. Если он снова сломается, вернуться к пункту 1, в противном случае: Ура! Действительно, это похоже на обычный процесс отладки, но есть один шаг, который ужасно замедляет процесс: _3. Ждите, пока завершится синхронизация._ -Используя **форк субграфа**, мы можем фактически устранить этот шаг. Вот как это выглядит: +Using **Subgraph forking** we can essentially eliminate this step. Here is how it looks: 0. Запустите локальную Graph Node с помощью **_соответстсвующего набора fork-base_**. 1. Внесите изменения в источник мэппингов, которые, по Вашему мнению, решат проблему. -2. Произведите развертывание на локальной Graph Node, **_форкнув неудачно развернутый субграф_** и **_начав с проблемного блока_**. +2. Deploy to the local Graph Node, **_forking the failing Subgraph_** and **_starting from the problematic block_**. 3. Если он снова сломается, вернитесь к пункту 1, в противном случае: Ура! Сейчас у Вас может появиться 2 вопроса: @@ -69,18 +69,18 @@ export function handleUpdatedGravatar(event: UpdatedGravatar): void { И я вам отвечаю: -1. `fork-base` - это «базовый» URL, при добавлении которого к _subgraph id_ результирующий URL (`/`) является действительной конечной точкой GraphQL для хранилища субграфа. +1. `fork-base` is the "base" URL, such that when the _subgraph id_ is appended the resulting URL (`/`) is a valid GraphQL endpoint for the Subgraph's store. 2. Форкнуть легко, не нужно напрягаться: ```bash $ graph deploy --debug-fork --ipfs http://localhost:5001 --node http://localhost:8020 ``` -Также не забудьте установить поле `dataSources.source.startBlock` в манифесте субграфа на номер проблемного блока, чтобы пропустить индексирование ненужных блоков и воспользоваться преимуществами форка! +Also, don't forget to set the `dataSources.source.startBlock` field in the Subgraph manifest to the number of the problematic block, so you can skip indexing unnecessary blocks and take advantage of the fork! Итак, вот что я делаю: -1. Я запускаю локальную Graph Node ([вот как это сделать](https://github.com/graphprotocol/graph-node#running-a-local-graph-node)) с опцией `fork-base`, установленной в: `https://api.thegraph.com/subgraphs/id/`, поскольку я буду форкать субграф, тот самый, который я ранее развертывал, с [Subgraph Studio](https://thegraph.com/studio/). +1. I spin-up a local Graph Node ([here is how to do it](https://github.com/graphprotocol/graph-node#running-a-local-graph-node)) with the `fork-base` option set to: `https://api.thegraph.com/subgraphs/id/`, since I will fork a Subgraph, the buggy one I deployed earlier, from [Subgraph Studio](https://thegraph.com/studio/). ``` $ cargo run -p graph-node --release -- \ @@ -91,11 +91,11 @@ $ cargo run -p graph-node --release -- \ ``` 2. После тщательной проверки я замечаю, что существует несоответствие в представлениях `id`, используемых при индексировании `Gravatar` в двух моих обработчиках. В то время как `handleNewGravatar` конвертирует его в hex (`event.params.id.toHex()`), `handleUpdatedGravatar` использует int32 (`event.params.id.toI32()`), что приводит к тому, что `handleUpdatedGravatar` завершается ошибкой и появляется сообщение "Gravatar not found!". Я заставляю оба обработчика конвертировать `id` в hex. -3. После внесения изменений я развертываю свой субграф на локальной Graph Node, **выполняя форк неудавшегося субграфа** и устанавливаю значение `dataSources.source.startBlock` равным `6190343` в файле `subgraph.yaml`: +3. After I made the changes I deploy my Subgraph to the local Graph Node, **_forking the failing Subgraph_** and setting `dataSources.source.startBlock` to `6190343` in `subgraph.yaml`: ```bash $ graph deploy gravity --debug-fork QmNp169tKvomnH3cPXTfGg4ZEhAHA6kEq5oy1XDqAxqHmW --ipfs http://localhost:5001 --node http://localhost:8020 ``` 4. Я проверяю логи, созданные локальной Graph Node, и, ура!, кажется, все работает. -5. Я развертываю свой теперь свободный от ошибок субграф на удаленной Graph Node и живу долго и счастливо! (но без картошки) +5. I deploy my now bug-free Subgraph to a remote Graph Node and live happily ever after! (no potatoes tho) From b9e52fb4ac8f190e7a337e23bf46d5d2cacdff93 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:24 -0500 Subject: [PATCH 0683/1789] New translations subgraph-debug-forking.mdx (Swedish) --- .../cookbook/subgraph-debug-forking.mdx | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/website/src/pages/sv/subgraphs/cookbook/subgraph-debug-forking.mdx b/website/src/pages/sv/subgraphs/cookbook/subgraph-debug-forking.mdx index aee8ecf8791f..75bff8ee89a8 100644 --- a/website/src/pages/sv/subgraphs/cookbook/subgraph-debug-forking.mdx +++ b/website/src/pages/sv/subgraphs/cookbook/subgraph-debug-forking.mdx @@ -2,23 +2,23 @@ title: Snabb och enkel subgraf felsökning med gafflar --- -As with many systems processing large amounts of data, The Graph's Indexers (Graph Nodes) may take quite some time to sync-up your subgraph with the target blockchain. The discrepancy between quick changes with the purpose of debugging and long wait times needed for indexing is extremely counterproductive and we are well aware of that. This is why we are introducing **subgraph forking**, developed by [LimeChain](https://limechain.tech/), and in this article I will show you how this feature can be used to substantially speed-up subgraph debugging! +As with many systems processing large amounts of data, The Graph's Indexers (Graph Nodes) may take quite some time to sync-up your Subgraph with the target blockchain. The discrepancy between quick changes with the purpose of debugging and long wait times needed for indexing is extremely counterproductive and we are well aware of that. This is why we are introducing **Subgraph forking**, developed by [LimeChain](https://limechain.tech/), and in this article I will show you how this feature can be used to substantially speed-up Subgraph debugging! ## Ok, vad är det? -**Subgraph forking** is the process of lazily fetching entities from _another_ subgraph's store (usually a remote one). +**Subgraph forking** is the process of lazily fetching entities from _another_ Subgraph's store (usually a remote one). -In the context of debugging, **subgraph forking** allows you to debug your failed subgraph at block _X_ without needing to wait to sync-up to block _X_. +In the context of debugging, **Subgraph forking** allows you to debug your failed Subgraph at block _X_ without needing to wait to sync-up to block _X_. ## Vad?! Hur? -When you deploy a subgraph to a remote Graph Node for indexing and it fails at block _X_, the good news is that the Graph Node will still serve GraphQL queries using its store, which is synced-up to block _X_. That's great! This means we can take advantage of this "up-to-date" store to fix the bugs arising when indexing block _X_. +When you deploy a Subgraph to a remote Graph Node for indexing and it fails at block _X_, the good news is that the Graph Node will still serve GraphQL queries using its store, which is synced-up to block _X_. That's great! This means we can take advantage of this "up-to-date" store to fix the bugs arising when indexing block _X_. -In a nutshell, we are going to _fork the failing subgraph_ from a remote Graph Node that is guaranteed to have the subgraph indexed up to block _X_ in order to provide the locally deployed subgraph being debugged at block _X_ an up-to-date view of the indexing state. +In a nutshell, we are going to _fork the failing Subgraph_ from a remote Graph Node that is guaranteed to have the Subgraph indexed up to block _X_ in order to provide the locally deployed Subgraph being debugged at block _X_ an up-to-date view of the indexing state. ## Snälla, visa mig lite kod! -To stay focused on subgraph debugging, let's keep things simple and run along with the [example-subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexing the Ethereum Gravity smart contract. +To stay focused on Subgraph debugging, let's keep things simple and run along with the [example-Subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexing the Ethereum Gravity smart contract. Here are the handlers defined for indexing `Gravatar`s, with no bugs whatsoever: @@ -44,22 +44,22 @@ export function handleUpdatedGravatar(event: UpdatedGravatar): void { } ``` -Oops, how unfortunate, when I deploy my perfect looking subgraph to [Subgraph Studio](https://thegraph.com/studio/) it fails with the _"Gravatar not found!"_ error. +Oops, how unfortunate, when I deploy my perfect looking Subgraph to [Subgraph Studio](https://thegraph.com/studio/) it fails with the _"Gravatar not found!"_ error. Det vanliga sättet att försöka fixa är: 1. Gör en förändring i mappningskällan, som du tror kommer att lösa problemet (även om jag vet att det inte kommer att göra det). -2. Re-deploy the subgraph to [Subgraph Studio](https://thegraph.com/studio/) (or another remote Graph Node). +2. Re-deploy the Subgraph to [Subgraph Studio](https://thegraph.com/studio/) (or another remote Graph Node). 3. Vänta tills det synkroniseras. 4. Om den går sönder igen gå tillbaka till 1, annars: Hurra! It is indeed pretty familiar to an ordinary debug process, but there is one step that horribly slows down the process: _3. Wait for it to sync-up._ -Using **subgraph forking** we can essentially eliminate this step. Here is how it looks: +Using **Subgraph forking** we can essentially eliminate this step. Here is how it looks: 0. Spin-up a local Graph Node with the **_appropriate fork-base_** set. 1. Gör en ändring i mappningskällan som du tror kommer att lösa problemet. -2. Deploy to the local Graph Node, **_forking the failing subgraph_** and **_starting from the problematic block_**. +2. Deploy to the local Graph Node, **_forking the failing Subgraph_** and **_starting from the problematic block_**. 3. Om den går sönder igen, gå tillbaka till 1, annars: Hurra! Nu kanske du har 2 frågor: @@ -69,18 +69,18 @@ Nu kanske du har 2 frågor: Och jag svarar: -1. `fork-base` is the "base" URL, such that when the _subgraph id_ is appended the resulting URL (`/`) is a valid GraphQL endpoint for the subgraph's store. +1. `fork-base` is the "base" URL, such that when the _subgraph id_ is appended the resulting URL (`/`) is a valid GraphQL endpoint for the Subgraph's store. 2. Gaffling är lätt, du behöver inte svettas: ```bash $ graph deploy --debug-fork --ipfs http://localhost:5001 --node http://localhost:8020 ``` -Also, don't forget to set the `dataSources.source.startBlock` field in the subgraph manifest to the number of the problematic block, so you can skip indexing unnecessary blocks and take advantage of the fork! +Also, don't forget to set the `dataSources.source.startBlock` field in the Subgraph manifest to the number of the problematic block, so you can skip indexing unnecessary blocks and take advantage of the fork! Så här är vad jag gör: -1. I spin-up a local Graph Node ([here is how to do it](https://github.com/graphprotocol/graph-node#running-a-local-graph-node)) with the `fork-base` option set to: `https://api.thegraph.com/subgraphs/id/`, since I will fork a subgraph, the buggy one I deployed earlier, from [Subgraph Studio](https://thegraph.com/studio/). +1. I spin-up a local Graph Node ([here is how to do it](https://github.com/graphprotocol/graph-node#running-a-local-graph-node)) with the `fork-base` option set to: `https://api.thegraph.com/subgraphs/id/`, since I will fork a Subgraph, the buggy one I deployed earlier, from [Subgraph Studio](https://thegraph.com/studio/). ``` $ cargo run -p graph-node --release -- \ @@ -91,11 +91,11 @@ $ cargo run -p graph-node --release -- \ ``` 2. After careful inspection I notice that there is a mismatch in the `id` representations used when indexing `Gravatar`s in my two handlers. While `handleNewGravatar` converts it to a hex (`event.params.id.toHex()`), `handleUpdatedGravatar` uses an int32 (`event.params.id.toI32()`) which causes the `handleUpdatedGravatar` to panic with "Gravatar not found!". I make them both convert the `id` to a hex. -3. After I made the changes I deploy my subgraph to the local Graph Node, **_forking the failing subgraph_** and setting `dataSources.source.startBlock` to `6190343` in `subgraph.yaml`: +3. After I made the changes I deploy my Subgraph to the local Graph Node, **_forking the failing Subgraph_** and setting `dataSources.source.startBlock` to `6190343` in `subgraph.yaml`: ```bash $ graph deploy gravity --debug-fork QmNp169tKvomnH3cPXTfGg4ZEhAHA6kEq5oy1XDqAxqHmW --ipfs http://localhost:5001 --node http://localhost:8020 ``` 4. I inspect the logs produced by the local Graph Node and, Hooray!, everything seems to be working. -5. I deploy my now bug-free subgraph to a remote Graph Node and live happily ever after! (no potatoes tho) +5. I deploy my now bug-free Subgraph to a remote Graph Node and live happily ever after! (no potatoes tho) From 73ba543a9fce9f82d387b77788dcc1d78b3a4c3b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:25 -0500 Subject: [PATCH 0684/1789] New translations subgraph-debug-forking.mdx (Turkish) --- .../cookbook/subgraph-debug-forking.mdx | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/website/src/pages/tr/subgraphs/cookbook/subgraph-debug-forking.mdx b/website/src/pages/tr/subgraphs/cookbook/subgraph-debug-forking.mdx index d739d1aee6d6..fd5c2222db9a 100644 --- a/website/src/pages/tr/subgraphs/cookbook/subgraph-debug-forking.mdx +++ b/website/src/pages/tr/subgraphs/cookbook/subgraph-debug-forking.mdx @@ -2,23 +2,23 @@ title: Fork Kullanarak Hızlı ve Kolay Subgraph Hata Ayıklama --- -Büyük miktarda veri işleyen birçok sistemde olduğu gibi, The Graph'ın Endeksleyicilerinin (Graph Düğümlerinin), subgraph'inizi hedef blokzinciri ile senkronize etmesi ciddi ölçüde uzun sürebilir. Hata ayıklama amacıyla yapılan hızlı değişiklikler ile endeksleme için gereken uzun bekleme süreleri arasındaki uyumsuzluk son derece verimsiz olmaktadır. Bunun kesinlikle farkındayız. Bu yüzden, [LimeChain](https://limechain.tech/) tarafından geliştirilen **subgraph çatallama**yı sunuyoruz. Bu makalede size bu özelliğin subgraph hata ayıklamayı önemli ölçüde hızlandırmak için nasıl kullanılabileceğini göstereceğim! +As with many systems processing large amounts of data, The Graph's Indexers (Graph Nodes) may take quite some time to sync-up your Subgraph with the target blockchain. The discrepancy between quick changes with the purpose of debugging and long wait times needed for indexing is extremely counterproductive and we are well aware of that. This is why we are introducing **Subgraph forking**, developed by [LimeChain](https://limechain.tech/), and in this article I will show you how this feature can be used to substantially speed-up Subgraph debugging! ## Peki, nedir bu Subgraph Forklama? -**Subgraph çatallama**, _başka bir_ subgraph'in mağazasından (genellikle uzak bir mağaza) tembel bir şekilde öge çekme işlemidir. +**Subgraph forking** is the process of lazily fetching entities from _another_ Subgraph's store (usually a remote one). -Hata ayıklama bağlamında, **subgraph çatallama** başarısız olmuş subgraph'i, _X_ blokuna kadar senkronize olmasını beklemeden hata ayıklamanıza olanak tanır. +In the context of debugging, **Subgraph forking** allows you to debug your failed Subgraph at block _X_ without needing to wait to sync-up to block _X_. ## Ne?! Nasıl? -Bir subgraph'i uzaktaki bir Graph Düğümüne endekslemek amacıyla dağıttığınızda subgraph _X_ blokunda çalışmayı durdurabilir. İyi haber ise Graph Düğümü, _X_ blokuna kadar senkronize olmuş deposunu kullanarak GraphQL sorgularını yerine getiriyor olacaktır. Bu harika bir haber! Bu durum, endeksleme sırasında _X_ blokunda ortaya çıkan hataları düzeltmek için bu "güncel" depodan faydalanabileceğimiz anlamına gelir. +When you deploy a Subgraph to a remote Graph Node for indexing and it fails at block _X_, the good news is that the Graph Node will still serve GraphQL queries using its store, which is synced-up to block _X_. That's great! This means we can take advantage of this "up-to-date" store to fix the bugs arising when indexing block _X_. -Özetle, _çalışmayı durdurmuş bir subgraph'i_, _X_ blokuna kadar endekslenmesi garanti edilen uzak bir Graph Düğümünden _çatallayacağız_. Böylece _X_ blokunda hatası ayıklanan yerel olarak dağıtılmış subgraph'in endeksleme durumunu gösteren güncel bir görünüm sağlamış olacağız. +In a nutshell, we are going to _fork the failing Subgraph_ from a remote Graph Node that is guaranteed to have the Subgraph indexed up to block _X_ in order to provide the locally deployed Subgraph being debugged at block _X_ an up-to-date view of the indexing state. ## Lütfen bana biraz kod göster! -Subgraph hata ayıklamalarına odaklanmak için işleri basitleştirelim ve Ethereum Gravatar akıllı sözleşmesini endeksleyen [örnek-subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) üzerinde çalışalım. +To stay focused on Subgraph debugging, let's keep things simple and run along with the [example-Subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexing the Ethereum Gravity smart contract. `Gravatar`'ları endekslemek için tanımlanan, hiçbir hata içermeyen işleyiciler şunlardır: @@ -44,22 +44,22 @@ export function handleUpdatedGravatar(event: UpdatedGravatar): void { } ``` -Eyvah, ne talihsiz bir durum, mükemmel görünen subgraph'imi [Subgraph Studio](https://thegraph.com/studio/) üzerinde dağıttığımda _"Gravatar bulunamadı!"_ hatası ile çalışmayı durduruyor. +Oops, how unfortunate, when I deploy my perfect looking Subgraph to [Subgraph Studio](https://thegraph.com/studio/) it fails with the _"Gravatar not found!"_ error. Genellikle düzeltmeyi denemek için yol şudur: 1. Eşleştirme kaynağında, sorunu çözeceğine inandığınız bir değişiklik yapın (ama ben çözmeyeceğini biliyorum). -2. Subgraph'i [Subgraph Studio](https://thegraph.com/studio/) (veya başka bir uzak Graph Düğümü) üzerinde yeniden dağıtın. +2. Re-deploy the Subgraph to [Subgraph Studio](https://thegraph.com/studio/) (or another remote Graph Node). 3. Senkronize olması için bekleyin. 4. Tekrar sorunla karşılaşırsanız 1. aşamaya geri dönün, aksi takdirde: Yaşasın! Bu gerçekten sıradan bir hata ayıklama sürecine oldukça benzemektedir, ancak süreci korkunç derecede yavaşlatan bir adım vardır: _3. Senkronize olmasını bekleyin._ -**Subgraph çatallama** kullanarak bu adımı ortadan kaldırabiliriz. Aşağıda bu işlemi görebilirsiniz: +Using **Subgraph forking** we can essentially eliminate this step. Here is how it looks: 0. **_Uygun çatal-temeli (fork-base)_** ayarlanmış yerel bir Graph Düğümü başlatın. 1. Eşleştirme kaynağında, sorunu çözeceğine inandığınız bir değişiklik yapın. -2. Çalışmayı durduran subgraph'i **_çatallayarak_** ve **_sorunlu bloktan başlayarak_** yerel Graph Düğümüne dağıtın. +2. Deploy to the local Graph Node, **_forking the failing Subgraph_** and **_starting from the problematic block_**. 3. Tekrar sorunla karşılaşırsanız 1. aşamaya geri dönün, aksi takdirde: Yaşasın! Şimdi, 2 sorunuz olabilir: @@ -69,18 +69,18 @@ Bu gerçekten sıradan bir hata ayıklama sürecine oldukça benzemektedir, anca Ve ben cevap veriyorum: -1. `fork-base`, "temel" URL'dir, böylece devamına _subgraph id_ eklendiğinde oluşan URL (`/`) subgraph'in depolaması için geçerli bir GraphQL uç noktası olur. +1. `fork-base` is the "base" URL, such that when the _subgraph id_ is appended the resulting URL (`/`) is a valid GraphQL endpoint for the Subgraph's store. 2. Forklama kolay, ter dökmeye gerek yok: ```bash $ graph deploy --debug-fork --ipfs http://localhost:5001 --node http://localhost:8020 ``` -Ayrıca, ihtiyaç olmayan blokları endekslemeyi atlamak ve çatallamanın avantajlarından yararlanmak için `subgraph` manifesto dosyasındaki `dataSources.source.startBlock` alanını sorunlu blokun numarası olarak ayarlamayı unutmayın! +Also, don't forget to set the `dataSources.source.startBlock` field in the Subgraph manifest to the number of the problematic block, so you can skip indexing unnecessary blocks and take advantage of the fork! İşte benim ne yaptığım: -1. Lokal bir Graph Düğümü başlatıyorum ([nasıl yapılacağını buradan öğrenebilirsiniz](https://github.com/graphprotocol/graph-node#running-a-local-graph-node)) ve `fork-base` seçeneğini şu şekilde ayarlıyorum: `https://api.thegraph.com/subgraphs/id/`, çünkü daha önce [Subgraph Studio](https://thegraph.com/studio/)dan dağıttığım hatalı `subgraph`i çatallayacağım. +1. I spin-up a local Graph Node ([here is how to do it](https://github.com/graphprotocol/graph-node#running-a-local-graph-node)) with the `fork-base` option set to: `https://api.thegraph.com/subgraphs/id/`, since I will fork a Subgraph, the buggy one I deployed earlier, from [Subgraph Studio](https://thegraph.com/studio/). ``` $ cargo run -p graph-node --release -- \ @@ -91,11 +91,11 @@ $ cargo run -p graph-node --release -- \ ``` 2. Dikkatli bir incelemeden sonra, iki işleyicimde `Gravatar`'ları endekslerken kullanılan `id` temsillerinde bir uyumsuzluk olduğunu fark ettim. `handleNewGravatar` onu bir hex'e dönüştürürken (`event.params.id.toHex()`), `handleUpdatedGravatar` bir int32 (`event.params.id.toI32()`) kullanıyor, bu da `handleUpdatedGravatar`'ın "Gravatar not found!" hatasını vermesine neden oluyor. İkisini de `id`'yi hex'e dönüştürecek şekilde düzenledim. -3. Değişiklikleri yaptıktan sonra, **_hatalı subgraph'i çatallayarak_** ve `subgraph.yaml` dosyasında `dataSources.source.startBlock` değerini `6190343` olarak ayarlayarak subgraph'imi yerel Graph Düğümü'ne dağıttım: +3. After I made the changes I deploy my Subgraph to the local Graph Node, **_forking the failing Subgraph_** and setting `dataSources.source.startBlock` to `6190343` in `subgraph.yaml`: ```bash $ graph deploy gravity --debug-fork QmNp169tKvomnH3cPXTfGg4ZEhAHA6kEq5oy1XDqAxqHmW --ipfs http://localhost:5001 --node http://localhost:8020 ``` 4. Yerel Graph Düğümü tarafından üretilen günlükleri inceliyorum ve yaşasın! Her şey yolunda görünüyor. -5. Artık hatasız olan `subgraph`imi uzak bir Graph Düğümü'nde dağıtıyorum ve sonsuza kadar mutlu yaşıyorum! +5. I deploy my now bug-free Subgraph to a remote Graph Node and live happily ever after! (no potatoes tho) From 383c63480644c4fedfd293957aade4e3908e9652 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:26 -0500 Subject: [PATCH 0685/1789] New translations subgraph-debug-forking.mdx (Ukrainian) --- .../cookbook/subgraph-debug-forking.mdx | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/website/src/pages/uk/subgraphs/cookbook/subgraph-debug-forking.mdx b/website/src/pages/uk/subgraphs/cookbook/subgraph-debug-forking.mdx index 6610f19da66d..91aa7484d2ec 100644 --- a/website/src/pages/uk/subgraphs/cookbook/subgraph-debug-forking.mdx +++ b/website/src/pages/uk/subgraphs/cookbook/subgraph-debug-forking.mdx @@ -2,23 +2,23 @@ title: Quick and Easy Subgraph Debugging Using Forks --- -As with many systems processing large amounts of data, The Graph's Indexers (Graph Nodes) may take quite some time to sync-up your subgraph with the target blockchain. The discrepancy between quick changes with the purpose of debugging and long wait times needed for indexing is extremely counterproductive and we are well aware of that. This is why we are introducing **subgraph forking**, developed by [LimeChain](https://limechain.tech/), and in this article I will show you how this feature can be used to substantially speed-up subgraph debugging! +As with many systems processing large amounts of data, The Graph's Indexers (Graph Nodes) may take quite some time to sync-up your Subgraph with the target blockchain. The discrepancy between quick changes with the purpose of debugging and long wait times needed for indexing is extremely counterproductive and we are well aware of that. This is why we are introducing **Subgraph forking**, developed by [LimeChain](https://limechain.tech/), and in this article I will show you how this feature can be used to substantially speed-up Subgraph debugging! ## Ok, what is it? -**Subgraph forking** is the process of lazily fetching entities from _another_ subgraph's store (usually a remote one). +**Subgraph forking** is the process of lazily fetching entities from _another_ Subgraph's store (usually a remote one). -In the context of debugging, **subgraph forking** allows you to debug your failed subgraph at block _X_ without needing to wait to sync-up to block _X_. +In the context of debugging, **Subgraph forking** allows you to debug your failed Subgraph at block _X_ without needing to wait to sync-up to block _X_. ## What?! How? -When you deploy a subgraph to a remote Graph Node for indexing and it fails at block _X_, the good news is that the Graph Node will still serve GraphQL queries using its store, which is synced-up to block _X_. That's great! This means we can take advantage of this "up-to-date" store to fix the bugs arising when indexing block _X_. +When you deploy a Subgraph to a remote Graph Node for indexing and it fails at block _X_, the good news is that the Graph Node will still serve GraphQL queries using its store, which is synced-up to block _X_. That's great! This means we can take advantage of this "up-to-date" store to fix the bugs arising when indexing block _X_. -In a nutshell, we are going to _fork the failing subgraph_ from a remote Graph Node that is guaranteed to have the subgraph indexed up to block _X_ in order to provide the locally deployed subgraph being debugged at block _X_ an up-to-date view of the indexing state. +In a nutshell, we are going to _fork the failing Subgraph_ from a remote Graph Node that is guaranteed to have the Subgraph indexed up to block _X_ in order to provide the locally deployed Subgraph being debugged at block _X_ an up-to-date view of the indexing state. ## Please, show me some code! -To stay focused on subgraph debugging, let's keep things simple and run along with the [example-subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexing the Ethereum Gravity smart contract. +To stay focused on Subgraph debugging, let's keep things simple and run along with the [example-Subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexing the Ethereum Gravity smart contract. Here are the handlers defined for indexing `Gravatar`s, with no bugs whatsoever: @@ -44,22 +44,22 @@ export function handleUpdatedGravatar(event: UpdatedGravatar): void { } ``` -Oops, how unfortunate, when I deploy my perfect looking subgraph to [Subgraph Studio](https://thegraph.com/studio/) it fails with the _"Gravatar not found!"_ error. +Oops, how unfortunate, when I deploy my perfect looking Subgraph to [Subgraph Studio](https://thegraph.com/studio/) it fails with the _"Gravatar not found!"_ error. The usual way to attempt a fix is: 1. Make a change in the mappings source, which you believe will solve the issue (while I know it won't). -2. Re-deploy the subgraph to [Subgraph Studio](https://thegraph.com/studio/) (or another remote Graph Node). +2. Re-deploy the Subgraph to [Subgraph Studio](https://thegraph.com/studio/) (or another remote Graph Node). 3. Wait for it to sync-up. 4. If it breaks again go back to 1, otherwise: Hooray! It is indeed pretty familiar to an ordinary debug process, but there is one step that horribly slows down the process: _3. Wait for it to sync-up._ -Using **subgraph forking** we can essentially eliminate this step. Here is how it looks: +Using **Subgraph forking** we can essentially eliminate this step. Here is how it looks: 0. Spin-up a local Graph Node with the **_appropriate fork-base_** set. 1. Make a change in the mappings source, which you believe will solve the issue. -2. Deploy to the local Graph Node, **_forking the failing subgraph_** and **_starting from the problematic block_**. +2. Deploy to the local Graph Node, **_forking the failing Subgraph_** and **_starting from the problematic block_**. 3. If it breaks again, go back to 1, otherwise: Hooray! Now, you may have 2 questions: @@ -69,18 +69,18 @@ Now, you may have 2 questions: And I answer: -1. `fork-base` is the "base" URL, such that when the _subgraph id_ is appended the resulting URL (`/`) is a valid GraphQL endpoint for the subgraph's store. +1. `fork-base` is the "base" URL, such that when the _subgraph id_ is appended the resulting URL (`/`) is a valid GraphQL endpoint for the Subgraph's store. 2. Forking is easy, no need to sweat: ```bash $ graph deploy --debug-fork --ipfs http://localhost:5001 --node http://localhost:8020 ``` -Also, don't forget to set the `dataSources.source.startBlock` field in the subgraph manifest to the number of the problematic block, so you can skip indexing unnecessary blocks and take advantage of the fork! +Also, don't forget to set the `dataSources.source.startBlock` field in the Subgraph manifest to the number of the problematic block, so you can skip indexing unnecessary blocks and take advantage of the fork! So, here is what I do: -1. I spin-up a local Graph Node ([here is how to do it](https://github.com/graphprotocol/graph-node#running-a-local-graph-node)) with the `fork-base` option set to: `https://api.thegraph.com/subgraphs/id/`, since I will fork a subgraph, the buggy one I deployed earlier, from [Subgraph Studio](https://thegraph.com/studio/). +1. I spin-up a local Graph Node ([here is how to do it](https://github.com/graphprotocol/graph-node#running-a-local-graph-node)) with the `fork-base` option set to: `https://api.thegraph.com/subgraphs/id/`, since I will fork a Subgraph, the buggy one I deployed earlier, from [Subgraph Studio](https://thegraph.com/studio/). ``` $ cargo run -p graph-node --release -- \ @@ -91,11 +91,11 @@ $ cargo run -p graph-node --release -- \ ``` 2. After careful inspection I notice that there is a mismatch in the `id` representations used when indexing `Gravatar`s in my two handlers. While `handleNewGravatar` converts it to a hex (`event.params.id.toHex()`), `handleUpdatedGravatar` uses an int32 (`event.params.id.toI32()`) which causes the `handleUpdatedGravatar` to panic with "Gravatar not found!". I make them both convert the `id` to a hex. -3. After I made the changes I deploy my subgraph to the local Graph Node, **_forking the failing subgraph_** and setting `dataSources.source.startBlock` to `6190343` in `subgraph.yaml`: +3. After I made the changes I deploy my Subgraph to the local Graph Node, **_forking the failing Subgraph_** and setting `dataSources.source.startBlock` to `6190343` in `subgraph.yaml`: ```bash $ graph deploy gravity --debug-fork QmNp169tKvomnH3cPXTfGg4ZEhAHA6kEq5oy1XDqAxqHmW --ipfs http://localhost:5001 --node http://localhost:8020 ``` 4. I inspect the logs produced by the local Graph Node and, Hooray!, everything seems to be working. -5. I deploy my now bug-free subgraph to a remote Graph Node and live happily ever after! (no potatoes tho) +5. I deploy my now bug-free Subgraph to a remote Graph Node and live happily ever after! (no potatoes tho) From 1b571bb9b6465f6419597b46a7c5615fb2627050 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:27 -0500 Subject: [PATCH 0686/1789] New translations subgraph-debug-forking.mdx (Chinese Simplified) --- .../cookbook/subgraph-debug-forking.mdx | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/website/src/pages/zh/subgraphs/cookbook/subgraph-debug-forking.mdx b/website/src/pages/zh/subgraphs/cookbook/subgraph-debug-forking.mdx index 7eef54e247ea..8b622ca66134 100644 --- a/website/src/pages/zh/subgraphs/cookbook/subgraph-debug-forking.mdx +++ b/website/src/pages/zh/subgraphs/cookbook/subgraph-debug-forking.mdx @@ -2,23 +2,23 @@ title: 使用分叉快速轻松地调试子图 --- -As with many systems processing large amounts of data, The Graph's Indexers (Graph Nodes) may take quite some time to sync-up your subgraph with the target blockchain. The discrepancy between quick changes with the purpose of debugging and long wait times needed for indexing is extremely counterproductive and we are well aware of that. This is why we are introducing **subgraph forking**, developed by [LimeChain](https://limechain.tech/), and in this article I will show you how this feature can be used to substantially speed-up subgraph debugging! +As with many systems processing large amounts of data, The Graph's Indexers (Graph Nodes) may take quite some time to sync-up your Subgraph with the target blockchain. The discrepancy between quick changes with the purpose of debugging and long wait times needed for indexing is extremely counterproductive and we are well aware of that. This is why we are introducing **Subgraph forking**, developed by [LimeChain](https://limechain.tech/), and in this article I will show you how this feature can be used to substantially speed-up Subgraph debugging! ## 好的,那是什么? -**Subgraph forking** is the process of lazily fetching entities from _another_ subgraph's store (usually a remote one). +**Subgraph forking** is the process of lazily fetching entities from _another_ Subgraph's store (usually a remote one). -In the context of debugging, **subgraph forking** allows you to debug your failed subgraph at block _X_ without needing to wait to sync-up to block _X_. +In the context of debugging, **Subgraph forking** allows you to debug your failed Subgraph at block _X_ without needing to wait to sync-up to block _X_. ## 什么?! 如何处理? -When you deploy a subgraph to a remote Graph Node for indexing and it fails at block _X_, the good news is that the Graph Node will still serve GraphQL queries using its store, which is synced-up to block _X_. That's great! This means we can take advantage of this "up-to-date" store to fix the bugs arising when indexing block _X_. +When you deploy a Subgraph to a remote Graph Node for indexing and it fails at block _X_, the good news is that the Graph Node will still serve GraphQL queries using its store, which is synced-up to block _X_. That's great! This means we can take advantage of this "up-to-date" store to fix the bugs arising when indexing block _X_. -In a nutshell, we are going to _fork the failing subgraph_ from a remote Graph Node that is guaranteed to have the subgraph indexed up to block _X_ in order to provide the locally deployed subgraph being debugged at block _X_ an up-to-date view of the indexing state. +In a nutshell, we are going to _fork the failing Subgraph_ from a remote Graph Node that is guaranteed to have the Subgraph indexed up to block _X_ in order to provide the locally deployed Subgraph being debugged at block _X_ an up-to-date view of the indexing state. ## 请给我看一些代码! -To stay focused on subgraph debugging, let's keep things simple and run along with the [example-subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexing the Ethereum Gravity smart contract. +To stay focused on Subgraph debugging, let's keep things simple and run along with the [example-Subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexing the Ethereum Gravity smart contract. Here are the handlers defined for indexing `Gravatar`s, with no bugs whatsoever: @@ -44,22 +44,22 @@ export function handleUpdatedGravatar(event: UpdatedGravatar): void { } ``` -Oops, how unfortunate, when I deploy my perfect looking subgraph to [Subgraph Studio](https://thegraph.com/studio/) it fails with the _"Gravatar not found!"_ error. +Oops, how unfortunate, when I deploy my perfect looking Subgraph to [Subgraph Studio](https://thegraph.com/studio/) it fails with the _"Gravatar not found!"_ error. The usual way to attempt a fix is: 1. Make a change in the mappings source, which you believe will solve the issue (while I know it won't). -2. Re-deploy the subgraph to [Subgraph Studio](https://thegraph.com/studio/) (or another remote Graph Node). +2. Re-deploy the Subgraph to [Subgraph Studio](https://thegraph.com/studio/) (or another remote Graph Node). 3. Wait for it to sync-up. 4. If it breaks again go back to 1, otherwise: Hooray! It is indeed pretty familiar to an ordinary debug process, but there is one step that horribly slows down the process: _3. Wait for it to sync-up._ -Using **subgraph forking** we can essentially eliminate this step. Here is how it looks: +Using **Subgraph forking** we can essentially eliminate this step. Here is how it looks: 0. Spin-up a local Graph Node with the **_appropriate fork-base_** set. 1. 按照你认为可以解决问题的方法,在映射源中进行更改。 -2. Deploy to the local Graph Node, **_forking the failing subgraph_** and **_starting from the problematic block_**. +2. Deploy to the local Graph Node, **_forking the failing Subgraph_** and **_starting from the problematic block_**. 3. 如果它再次中断,则返回 第1步,否则:搞定! 现在,你可能有 2 个问题: @@ -69,18 +69,18 @@ Using **subgraph forking** we can essentially eliminate this step. Here is how i 回答如下: -1. `fork-base` is the "base" URL, such that when the _subgraph id_ is appended the resulting URL (`/`) is a valid GraphQL endpoint for the subgraph's store. +1. `fork-base` is the "base" URL, such that when the _subgraph id_ is appended the resulting URL (`/`) is a valid GraphQL endpoint for the Subgraph's store. 2. 分叉容易,不要紧张: ```bash $ graph deploy --debug-fork --ipfs http://localhost:5001 --node http://localhost:8020 ``` -Also, don't forget to set the `dataSources.source.startBlock` field in the subgraph manifest to the number of the problematic block, so you can skip indexing unnecessary blocks and take advantage of the fork! +Also, don't forget to set the `dataSources.source.startBlock` field in the Subgraph manifest to the number of the problematic block, so you can skip indexing unnecessary blocks and take advantage of the fork! 所以,我是这么做的: -1. I spin-up a local Graph Node ([here is how to do it](https://github.com/graphprotocol/graph-node#running-a-local-graph-node)) with the `fork-base` option set to: `https://api.thegraph.com/subgraphs/id/`, since I will fork a subgraph, the buggy one I deployed earlier, from [Subgraph Studio](https://thegraph.com/studio/). +1. I spin-up a local Graph Node ([here is how to do it](https://github.com/graphprotocol/graph-node#running-a-local-graph-node)) with the `fork-base` option set to: `https://api.thegraph.com/subgraphs/id/`, since I will fork a Subgraph, the buggy one I deployed earlier, from [Subgraph Studio](https://thegraph.com/studio/). ``` $ cargo run -p graph-node --release -- \ @@ -91,11 +91,11 @@ $ cargo run -p graph-node --release -- \ ``` 2. After careful inspection I notice that there is a mismatch in the `id` representations used when indexing `Gravatar`s in my two handlers. While `handleNewGravatar` converts it to a hex (`event.params.id.toHex()`), `handleUpdatedGravatar` uses an int32 (`event.params.id.toI32()`) which causes the `handleUpdatedGravatar` to panic with "Gravatar not found!". I make them both convert the `id` to a hex. -3. After I made the changes I deploy my subgraph to the local Graph Node, **_forking the failing subgraph_** and setting `dataSources.source.startBlock` to `6190343` in `subgraph.yaml`: +3. After I made the changes I deploy my Subgraph to the local Graph Node, **_forking the failing Subgraph_** and setting `dataSources.source.startBlock` to `6190343` in `subgraph.yaml`: ```bash $ graph deploy gravity --debug-fork QmNp169tKvomnH3cPXTfGg4ZEhAHA6kEq5oy1XDqAxqHmW --ipfs http://localhost:5001 --node http://localhost:8020 ``` 4. I inspect the logs produced by the local Graph Node and, Hooray!, everything seems to be working. -5. I deploy my now bug-free subgraph to a remote Graph Node and live happily ever after! (no potatoes tho) +5. I deploy my now bug-free Subgraph to a remote Graph Node and live happily ever after! (no potatoes tho) From 30d33f59f246dfbac5682502a958c7e5e1700507 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:28 -0500 Subject: [PATCH 0687/1789] New translations subgraph-debug-forking.mdx (Urdu (Pakistan)) --- .../cookbook/subgraph-debug-forking.mdx | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/website/src/pages/ur/subgraphs/cookbook/subgraph-debug-forking.mdx b/website/src/pages/ur/subgraphs/cookbook/subgraph-debug-forking.mdx index 7a0dadaa4dfe..70889905808f 100644 --- a/website/src/pages/ur/subgraphs/cookbook/subgraph-debug-forking.mdx +++ b/website/src/pages/ur/subgraphs/cookbook/subgraph-debug-forking.mdx @@ -2,23 +2,23 @@ title: فورکس کا استعمال کرتے ہوۓ تیز اور آسان ڈیبگنگ --- -As with many systems processing large amounts of data, The Graph's Indexers (Graph Nodes) may take quite some time to sync-up your subgraph with the target blockchain. The discrepancy between quick changes with the purpose of debugging and long wait times needed for indexing is extremely counterproductive and we are well aware of that. This is why we are introducing **subgraph forking**, developed by [LimeChain](https://limechain.tech/), and in this article I will show you how this feature can be used to substantially speed-up subgraph debugging! +As with many systems processing large amounts of data, The Graph's Indexers (Graph Nodes) may take quite some time to sync-up your Subgraph with the target blockchain. The discrepancy between quick changes with the purpose of debugging and long wait times needed for indexing is extremely counterproductive and we are well aware of that. This is why we are introducing **Subgraph forking**, developed by [LimeChain](https://limechain.tech/), and in this article I will show you how this feature can be used to substantially speed-up Subgraph debugging! ## ٹھیک ہے، یہ ہے کیا؟ -**Subgraph forking** is the process of lazily fetching entities from _another_ subgraph's store (usually a remote one). +**Subgraph forking** is the process of lazily fetching entities from _another_ Subgraph's store (usually a remote one). -In the context of debugging, **subgraph forking** allows you to debug your failed subgraph at block _X_ without needing to wait to sync-up to block _X_. +In the context of debugging, **Subgraph forking** allows you to debug your failed Subgraph at block _X_ without needing to wait to sync-up to block _X_. ## کیا؟! کیسے؟ -When you deploy a subgraph to a remote Graph Node for indexing and it fails at block _X_, the good news is that the Graph Node will still serve GraphQL queries using its store, which is synced-up to block _X_. That's great! This means we can take advantage of this "up-to-date" store to fix the bugs arising when indexing block _X_. +When you deploy a Subgraph to a remote Graph Node for indexing and it fails at block _X_, the good news is that the Graph Node will still serve GraphQL queries using its store, which is synced-up to block _X_. That's great! This means we can take advantage of this "up-to-date" store to fix the bugs arising when indexing block _X_. -In a nutshell, we are going to _fork the failing subgraph_ from a remote Graph Node that is guaranteed to have the subgraph indexed up to block _X_ in order to provide the locally deployed subgraph being debugged at block _X_ an up-to-date view of the indexing state. +In a nutshell, we are going to _fork the failing Subgraph_ from a remote Graph Node that is guaranteed to have the Subgraph indexed up to block _X_ in order to provide the locally deployed Subgraph being debugged at block _X_ an up-to-date view of the indexing state. ## براۓ مہربانی، مجہے کچھ کوڈ دکھائیں! -To stay focused on subgraph debugging, let's keep things simple and run along with the [example-subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexing the Ethereum Gravity smart contract. +To stay focused on Subgraph debugging, let's keep things simple and run along with the [example-Subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexing the Ethereum Gravity smart contract. Here are the handlers defined for indexing `Gravatar`s, with no bugs whatsoever: @@ -44,22 +44,22 @@ export function handleUpdatedGravatar(event: UpdatedGravatar): void { } ``` -Oops, how unfortunate, when I deploy my perfect looking subgraph to [Subgraph Studio](https://thegraph.com/studio/) it fails with the _"Gravatar not found!"_ error. +Oops, how unfortunate, when I deploy my perfect looking Subgraph to [Subgraph Studio](https://thegraph.com/studio/) it fails with the _"Gravatar not found!"_ error. درست کرنے کی کوشش کرنے کا معمول کا طریقہ یہ ہے: 1. میپنگ کے ماخذ میں تبدیلی کریں، جس کے بارے میں آپ کو یقین ہے کہ مسئلہ حل ہو جائے گا (جبکہ میں جانتا ہوں کہ ایسا نہیں ہوگا). -2. Re-deploy the subgraph to [Subgraph Studio](https://thegraph.com/studio/) (or another remote Graph Node). +2. Re-deploy the Subgraph to [Subgraph Studio](https://thegraph.com/studio/) (or another remote Graph Node). 3. اس کے مطابقت پذیر ہونے کا انتظار کریں. 4. اگر یہ دوبارہ ٹوٹ جاتا ہے تو 1 پر واپس جائیں، ورنہ: ہورے! It is indeed pretty familiar to an ordinary debug process, but there is one step that horribly slows down the process: _3. Wait for it to sync-up._ -Using **subgraph forking** we can essentially eliminate this step. Here is how it looks: +Using **Subgraph forking** we can essentially eliminate this step. Here is how it looks: 0. Spin-up a local Graph Node with the **_appropriate fork-base_** set. 1. میپنگ کے ماخذ میں تبدیلی کریں، جس سے آپ کو یقین ہے کہ مسئلہ حل ہو جائے گا. -2. Deploy to the local Graph Node, **_forking the failing subgraph_** and **_starting from the problematic block_**. +2. Deploy to the local Graph Node, **_forking the failing Subgraph_** and **_starting from the problematic block_**. 3. اگر یہ دوبارہ ٹوٹ جاتا ہے، تو 1 پر واپس جائیں، ورنہ: ہورے! اب، آپ کے پاس 2 سوالات ہوسکتے ہیں: @@ -69,18 +69,18 @@ Using **subgraph forking** we can essentially eliminate this step. Here is how i اور میرا جواب: -1. `fork-base` is the "base" URL, such that when the _subgraph id_ is appended the resulting URL (`/`) is a valid GraphQL endpoint for the subgraph's store. +1. `fork-base` is the "base" URL, such that when the _subgraph id_ is appended the resulting URL (`/`) is a valid GraphQL endpoint for the Subgraph's store. 2. فورکنگ آسان ہے، پریشان ہونے کی ضرورت نہیں ہے: ```bash $ graph deploy --debug-fork --ipfs http://localhost:5001 --node http://localhost:8020 ``` -Also, don't forget to set the `dataSources.source.startBlock` field in the subgraph manifest to the number of the problematic block, so you can skip indexing unnecessary blocks and take advantage of the fork! +Also, don't forget to set the `dataSources.source.startBlock` field in the Subgraph manifest to the number of the problematic block, so you can skip indexing unnecessary blocks and take advantage of the fork! تو، یہاں میں کیا کرتا ہوں: -1. I spin-up a local Graph Node ([here is how to do it](https://github.com/graphprotocol/graph-node#running-a-local-graph-node)) with the `fork-base` option set to: `https://api.thegraph.com/subgraphs/id/`, since I will fork a subgraph, the buggy one I deployed earlier, from [Subgraph Studio](https://thegraph.com/studio/). +1. I spin-up a local Graph Node ([here is how to do it](https://github.com/graphprotocol/graph-node#running-a-local-graph-node)) with the `fork-base` option set to: `https://api.thegraph.com/subgraphs/id/`, since I will fork a Subgraph, the buggy one I deployed earlier, from [Subgraph Studio](https://thegraph.com/studio/). ``` $ cargo run -p graph-node --release -- \ @@ -91,11 +91,11 @@ $ cargo run -p graph-node --release -- \ ``` 2. After careful inspection I notice that there is a mismatch in the `id` representations used when indexing `Gravatar`s in my two handlers. While `handleNewGravatar` converts it to a hex (`event.params.id.toHex()`), `handleUpdatedGravatar` uses an int32 (`event.params.id.toI32()`) which causes the `handleUpdatedGravatar` to panic with "Gravatar not found!". I make them both convert the `id` to a hex. -3. After I made the changes I deploy my subgraph to the local Graph Node, **_forking the failing subgraph_** and setting `dataSources.source.startBlock` to `6190343` in `subgraph.yaml`: +3. After I made the changes I deploy my Subgraph to the local Graph Node, **_forking the failing Subgraph_** and setting `dataSources.source.startBlock` to `6190343` in `subgraph.yaml`: ```bash $ graph deploy gravity --debug-fork QmNp169tKvomnH3cPXTfGg4ZEhAHA6kEq5oy1XDqAxqHmW --ipfs http://localhost:5001 --node http://localhost:8020 ``` 4. I inspect the logs produced by the local Graph Node and, Hooray!, everything seems to be working. -5. I deploy my now bug-free subgraph to a remote Graph Node and live happily ever after! (no potatoes tho) +5. I deploy my now bug-free Subgraph to a remote Graph Node and live happily ever after! (no potatoes tho) From 56a57e9b8c4c13e516c3020a66c3c077b035e3fd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:29 -0500 Subject: [PATCH 0688/1789] New translations subgraph-debug-forking.mdx (Vietnamese) --- .../cookbook/subgraph-debug-forking.mdx | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/website/src/pages/vi/subgraphs/cookbook/subgraph-debug-forking.mdx b/website/src/pages/vi/subgraphs/cookbook/subgraph-debug-forking.mdx index 6610f19da66d..91aa7484d2ec 100644 --- a/website/src/pages/vi/subgraphs/cookbook/subgraph-debug-forking.mdx +++ b/website/src/pages/vi/subgraphs/cookbook/subgraph-debug-forking.mdx @@ -2,23 +2,23 @@ title: Quick and Easy Subgraph Debugging Using Forks --- -As with many systems processing large amounts of data, The Graph's Indexers (Graph Nodes) may take quite some time to sync-up your subgraph with the target blockchain. The discrepancy between quick changes with the purpose of debugging and long wait times needed for indexing is extremely counterproductive and we are well aware of that. This is why we are introducing **subgraph forking**, developed by [LimeChain](https://limechain.tech/), and in this article I will show you how this feature can be used to substantially speed-up subgraph debugging! +As with many systems processing large amounts of data, The Graph's Indexers (Graph Nodes) may take quite some time to sync-up your Subgraph with the target blockchain. The discrepancy between quick changes with the purpose of debugging and long wait times needed for indexing is extremely counterproductive and we are well aware of that. This is why we are introducing **Subgraph forking**, developed by [LimeChain](https://limechain.tech/), and in this article I will show you how this feature can be used to substantially speed-up Subgraph debugging! ## Ok, what is it? -**Subgraph forking** is the process of lazily fetching entities from _another_ subgraph's store (usually a remote one). +**Subgraph forking** is the process of lazily fetching entities from _another_ Subgraph's store (usually a remote one). -In the context of debugging, **subgraph forking** allows you to debug your failed subgraph at block _X_ without needing to wait to sync-up to block _X_. +In the context of debugging, **Subgraph forking** allows you to debug your failed Subgraph at block _X_ without needing to wait to sync-up to block _X_. ## What?! How? -When you deploy a subgraph to a remote Graph Node for indexing and it fails at block _X_, the good news is that the Graph Node will still serve GraphQL queries using its store, which is synced-up to block _X_. That's great! This means we can take advantage of this "up-to-date" store to fix the bugs arising when indexing block _X_. +When you deploy a Subgraph to a remote Graph Node for indexing and it fails at block _X_, the good news is that the Graph Node will still serve GraphQL queries using its store, which is synced-up to block _X_. That's great! This means we can take advantage of this "up-to-date" store to fix the bugs arising when indexing block _X_. -In a nutshell, we are going to _fork the failing subgraph_ from a remote Graph Node that is guaranteed to have the subgraph indexed up to block _X_ in order to provide the locally deployed subgraph being debugged at block _X_ an up-to-date view of the indexing state. +In a nutshell, we are going to _fork the failing Subgraph_ from a remote Graph Node that is guaranteed to have the Subgraph indexed up to block _X_ in order to provide the locally deployed Subgraph being debugged at block _X_ an up-to-date view of the indexing state. ## Please, show me some code! -To stay focused on subgraph debugging, let's keep things simple and run along with the [example-subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexing the Ethereum Gravity smart contract. +To stay focused on Subgraph debugging, let's keep things simple and run along with the [example-Subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexing the Ethereum Gravity smart contract. Here are the handlers defined for indexing `Gravatar`s, with no bugs whatsoever: @@ -44,22 +44,22 @@ export function handleUpdatedGravatar(event: UpdatedGravatar): void { } ``` -Oops, how unfortunate, when I deploy my perfect looking subgraph to [Subgraph Studio](https://thegraph.com/studio/) it fails with the _"Gravatar not found!"_ error. +Oops, how unfortunate, when I deploy my perfect looking Subgraph to [Subgraph Studio](https://thegraph.com/studio/) it fails with the _"Gravatar not found!"_ error. The usual way to attempt a fix is: 1. Make a change in the mappings source, which you believe will solve the issue (while I know it won't). -2. Re-deploy the subgraph to [Subgraph Studio](https://thegraph.com/studio/) (or another remote Graph Node). +2. Re-deploy the Subgraph to [Subgraph Studio](https://thegraph.com/studio/) (or another remote Graph Node). 3. Wait for it to sync-up. 4. If it breaks again go back to 1, otherwise: Hooray! It is indeed pretty familiar to an ordinary debug process, but there is one step that horribly slows down the process: _3. Wait for it to sync-up._ -Using **subgraph forking** we can essentially eliminate this step. Here is how it looks: +Using **Subgraph forking** we can essentially eliminate this step. Here is how it looks: 0. Spin-up a local Graph Node with the **_appropriate fork-base_** set. 1. Make a change in the mappings source, which you believe will solve the issue. -2. Deploy to the local Graph Node, **_forking the failing subgraph_** and **_starting from the problematic block_**. +2. Deploy to the local Graph Node, **_forking the failing Subgraph_** and **_starting from the problematic block_**. 3. If it breaks again, go back to 1, otherwise: Hooray! Now, you may have 2 questions: @@ -69,18 +69,18 @@ Now, you may have 2 questions: And I answer: -1. `fork-base` is the "base" URL, such that when the _subgraph id_ is appended the resulting URL (`/`) is a valid GraphQL endpoint for the subgraph's store. +1. `fork-base` is the "base" URL, such that when the _subgraph id_ is appended the resulting URL (`/`) is a valid GraphQL endpoint for the Subgraph's store. 2. Forking is easy, no need to sweat: ```bash $ graph deploy --debug-fork --ipfs http://localhost:5001 --node http://localhost:8020 ``` -Also, don't forget to set the `dataSources.source.startBlock` field in the subgraph manifest to the number of the problematic block, so you can skip indexing unnecessary blocks and take advantage of the fork! +Also, don't forget to set the `dataSources.source.startBlock` field in the Subgraph manifest to the number of the problematic block, so you can skip indexing unnecessary blocks and take advantage of the fork! So, here is what I do: -1. I spin-up a local Graph Node ([here is how to do it](https://github.com/graphprotocol/graph-node#running-a-local-graph-node)) with the `fork-base` option set to: `https://api.thegraph.com/subgraphs/id/`, since I will fork a subgraph, the buggy one I deployed earlier, from [Subgraph Studio](https://thegraph.com/studio/). +1. I spin-up a local Graph Node ([here is how to do it](https://github.com/graphprotocol/graph-node#running-a-local-graph-node)) with the `fork-base` option set to: `https://api.thegraph.com/subgraphs/id/`, since I will fork a Subgraph, the buggy one I deployed earlier, from [Subgraph Studio](https://thegraph.com/studio/). ``` $ cargo run -p graph-node --release -- \ @@ -91,11 +91,11 @@ $ cargo run -p graph-node --release -- \ ``` 2. After careful inspection I notice that there is a mismatch in the `id` representations used when indexing `Gravatar`s in my two handlers. While `handleNewGravatar` converts it to a hex (`event.params.id.toHex()`), `handleUpdatedGravatar` uses an int32 (`event.params.id.toI32()`) which causes the `handleUpdatedGravatar` to panic with "Gravatar not found!". I make them both convert the `id` to a hex. -3. After I made the changes I deploy my subgraph to the local Graph Node, **_forking the failing subgraph_** and setting `dataSources.source.startBlock` to `6190343` in `subgraph.yaml`: +3. After I made the changes I deploy my Subgraph to the local Graph Node, **_forking the failing Subgraph_** and setting `dataSources.source.startBlock` to `6190343` in `subgraph.yaml`: ```bash $ graph deploy gravity --debug-fork QmNp169tKvomnH3cPXTfGg4ZEhAHA6kEq5oy1XDqAxqHmW --ipfs http://localhost:5001 --node http://localhost:8020 ``` 4. I inspect the logs produced by the local Graph Node and, Hooray!, everything seems to be working. -5. I deploy my now bug-free subgraph to a remote Graph Node and live happily ever after! (no potatoes tho) +5. I deploy my now bug-free Subgraph to a remote Graph Node and live happily ever after! (no potatoes tho) From c979ce72dac0bb94cc765f817b68ee000c0a9047 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:30 -0500 Subject: [PATCH 0689/1789] New translations subgraph-debug-forking.mdx (Marathi) --- .../cookbook/subgraph-debug-forking.mdx | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/website/src/pages/mr/subgraphs/cookbook/subgraph-debug-forking.mdx b/website/src/pages/mr/subgraphs/cookbook/subgraph-debug-forking.mdx index 3c7f2ec051e3..7007c6021580 100644 --- a/website/src/pages/mr/subgraphs/cookbook/subgraph-debug-forking.mdx +++ b/website/src/pages/mr/subgraphs/cookbook/subgraph-debug-forking.mdx @@ -2,23 +2,23 @@ title: फॉर्क्स वापरून जलद आणि सुलभ सबग्राफ डीबगिंग --- -As with many systems processing large amounts of data, The Graph's Indexers (Graph Nodes) may take quite some time to sync-up your subgraph with the target blockchain. The discrepancy between quick changes with the purpose of debugging and long wait times needed for indexing is extremely counterproductive and we are well aware of that. This is why we are introducing **subgraph forking**, developed by [LimeChain](https://limechain.tech/), and in this article I will show you how this feature can be used to substantially speed-up subgraph debugging! +As with many systems processing large amounts of data, The Graph's Indexers (Graph Nodes) may take quite some time to sync-up your Subgraph with the target blockchain. The discrepancy between quick changes with the purpose of debugging and long wait times needed for indexing is extremely counterproductive and we are well aware of that. This is why we are introducing **Subgraph forking**, developed by [LimeChain](https://limechain.tech/), and in this article I will show you how this feature can be used to substantially speed-up Subgraph debugging! ## ठीक आहे, ते काय आहे? -**Subgraph forking** is the process of lazily fetching entities from _another_ subgraph's store (usually a remote one). +**Subgraph forking** is the process of lazily fetching entities from _another_ Subgraph's store (usually a remote one). -In the context of debugging, **subgraph forking** allows you to debug your failed subgraph at block _X_ without needing to wait to sync-up to block _X_. +In the context of debugging, **Subgraph forking** allows you to debug your failed Subgraph at block _X_ without needing to wait to sync-up to block _X_. ## काय?! कसे? -When you deploy a subgraph to a remote Graph Node for indexing and it fails at block _X_, the good news is that the Graph Node will still serve GraphQL queries using its store, which is synced-up to block _X_. That's great! This means we can take advantage of this "up-to-date" store to fix the bugs arising when indexing block _X_. +When you deploy a Subgraph to a remote Graph Node for indexing and it fails at block _X_, the good news is that the Graph Node will still serve GraphQL queries using its store, which is synced-up to block _X_. That's great! This means we can take advantage of this "up-to-date" store to fix the bugs arising when indexing block _X_. -In a nutshell, we are going to _fork the failing subgraph_ from a remote Graph Node that is guaranteed to have the subgraph indexed up to block _X_ in order to provide the locally deployed subgraph being debugged at block _X_ an up-to-date view of the indexing state. +In a nutshell, we are going to _fork the failing Subgraph_ from a remote Graph Node that is guaranteed to have the Subgraph indexed up to block _X_ in order to provide the locally deployed Subgraph being debugged at block _X_ an up-to-date view of the indexing state. ## कृपया, मला काही कोड दाखवा! -To stay focused on subgraph debugging, let's keep things simple and run along with the [example-subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexing the Ethereum Gravity smart contract. +To stay focused on Subgraph debugging, let's keep things simple and run along with the [example-Subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexing the Ethereum Gravity smart contract. Here are the handlers defined for indexing `Gravatar`s, with no bugs whatsoever: @@ -44,22 +44,22 @@ export function handleUpdatedGravatar(event: UpdatedGravatar): void { } ``` -Oops, how unfortunate, when I deploy my perfect looking subgraph to [Subgraph Studio](https://thegraph.com/studio/) it fails with the _"Gravatar not found!"_ error. +Oops, how unfortunate, when I deploy my perfect looking Subgraph to [Subgraph Studio](https://thegraph.com/studio/) it fails with the _"Gravatar not found!"_ error. निराकरण करण्याचा प्रयत्न करण्याचा नेहमीचा मार्ग आहे: 1. मॅपिंग स्त्रोतामध्ये बदल करा, जो तुम्हाला विश्वास आहे की समस्या सोडवेल (जेव्हा मला माहित आहे की ते होणार नाही). -2. Re-deploy the subgraph to [Subgraph Studio](https://thegraph.com/studio/) (or another remote Graph Node). +2. Re-deploy the Subgraph to [Subgraph Studio](https://thegraph.com/studio/) (or another remote Graph Node). 3. ते समक्रमित होण्याची प्रतीक्षा करा. 4. तो पुन्हा खंडित झाल्यास 1 वर परत जा, अन्यथा: हुर्रे! It is indeed pretty familiar to an ordinary debug process, but there is one step that horribly slows down the process: _3. Wait for it to sync-up._ -Using **subgraph forking** we can essentially eliminate this step. Here is how it looks: +Using **Subgraph forking** we can essentially eliminate this step. Here is how it looks: 0. Spin-up a local Graph Node with the **_appropriate fork-base_** set. 1. मॅपिंग स्त्रोतामध्ये बदल करा, जो तुम्हाला विश्वास आहे की समस्या सोडवेल. -2. Deploy to the local Graph Node, **_forking the failing subgraph_** and **_starting from the problematic block_**. +2. Deploy to the local Graph Node, **_forking the failing Subgraph_** and **_starting from the problematic block_**. 3. तो पुन्हा खंडित झाल्यास, 1 वर परत जा, अन्यथा: हुर्रे! आता, तुमच्याकडे 2 प्रश्न असू शकतात: @@ -69,18 +69,18 @@ Using **subgraph forking** we can essentially eliminate this step. Here is how i आणि मी उत्तर देतो: -1. `fork-base` is the "base" URL, such that when the _subgraph id_ is appended the resulting URL (`/`) is a valid GraphQL endpoint for the subgraph's store. +1. `fork-base` is the "base" URL, such that when the _subgraph id_ is appended the resulting URL (`/`) is a valid GraphQL endpoint for the Subgraph's store. 2. काटा काढणे सोपे आहे, घाम गाळण्याची गरज नाही: ```bash $ graph deploy --debug-fork --ipfs http://localhost:5001 --node http://localhost:8020 ``` -Also, don't forget to set the `dataSources.source.startBlock` field in the subgraph manifest to the number of the problematic block, so you can skip indexing unnecessary blocks and take advantage of the fork! +Also, don't forget to set the `dataSources.source.startBlock` field in the Subgraph manifest to the number of the problematic block, so you can skip indexing unnecessary blocks and take advantage of the fork! तर, मी काय करतो ते येथे आहे: -1. I spin-up a local Graph Node ([here is how to do it](https://github.com/graphprotocol/graph-node#running-a-local-graph-node)) with the `fork-base` option set to: `https://api.thegraph.com/subgraphs/id/`, since I will fork a subgraph, the buggy one I deployed earlier, from [Subgraph Studio](https://thegraph.com/studio/). +1. I spin-up a local Graph Node ([here is how to do it](https://github.com/graphprotocol/graph-node#running-a-local-graph-node)) with the `fork-base` option set to: `https://api.thegraph.com/subgraphs/id/`, since I will fork a Subgraph, the buggy one I deployed earlier, from [Subgraph Studio](https://thegraph.com/studio/). ``` $ cargo run -p graph-node --release -- \ @@ -91,11 +91,11 @@ $ cargo run -p graph-node --release -- \ ``` 2. After careful inspection I notice that there is a mismatch in the `id` representations used when indexing `Gravatar`s in my two handlers. While `handleNewGravatar` converts it to a hex (`event.params.id.toHex()`), `handleUpdatedGravatar` uses an int32 (`event.params.id.toI32()`) which causes the `handleUpdatedGravatar` to panic with "Gravatar not found!". I make them both convert the `id` to a hex. -3. After I made the changes I deploy my subgraph to the local Graph Node, **_forking the failing subgraph_** and setting `dataSources.source.startBlock` to `6190343` in `subgraph.yaml`: +3. After I made the changes I deploy my Subgraph to the local Graph Node, **_forking the failing Subgraph_** and setting `dataSources.source.startBlock` to `6190343` in `subgraph.yaml`: ```bash $ graph deploy gravity --debug-fork QmNp169tKvomnH3cPXTfGg4ZEhAHA6kEq5oy1XDqAxqHmW --ipfs http://localhost:5001 --node http://localhost:8020 ``` 4. I inspect the logs produced by the local Graph Node and, Hooray!, everything seems to be working. -5. I deploy my now bug-free subgraph to a remote Graph Node and live happily ever after! (no potatoes tho) +5. I deploy my now bug-free Subgraph to a remote Graph Node and live happily ever after! (no potatoes tho) From 185a1b05237cd657b05bce8403a3aa3aa9f4cd8c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:31 -0500 Subject: [PATCH 0690/1789] New translations subgraph-debug-forking.mdx (Hindi) --- .../cookbook/subgraph-debug-forking.mdx | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/website/src/pages/hi/subgraphs/cookbook/subgraph-debug-forking.mdx b/website/src/pages/hi/subgraphs/cookbook/subgraph-debug-forking.mdx index 0dc044459311..089b4cd545c5 100644 --- a/website/src/pages/hi/subgraphs/cookbook/subgraph-debug-forking.mdx +++ b/website/src/pages/hi/subgraphs/cookbook/subgraph-debug-forking.mdx @@ -2,23 +2,23 @@ title: फोर्क्स का उपयोग करके त्वरित और आसान सबग्राफ डिबगिंग --- -As with many systems processing large amounts of data, The Graph's Indexers (Graph Nodes) may take quite some time to sync-up your subgraph with the target blockchain. The discrepancy between quick changes with the purpose of debugging and long wait times needed for indexing is extremely counterproductive and we are well aware of that. This is why we are introducing **subgraph forking**, developed by [LimeChain](https://limechain.tech/), and in this article I will show you how this feature can be used to substantially speed-up subgraph debugging! +As with many systems processing large amounts of data, The Graph's Indexers (Graph Nodes) may take quite some time to sync-up your Subgraph with the target blockchain. The discrepancy between quick changes with the purpose of debugging and long wait times needed for indexing is extremely counterproductive and we are well aware of that. This is why we are introducing **Subgraph forking**, developed by [LimeChain](https://limechain.tech/), and in this article I will show you how this feature can be used to substantially speed-up Subgraph debugging! ## ठीक है वो क्या है? -**Subgraph forking** is the process of lazily fetching entities from _another_ subgraph's store (usually a remote one). +**Subgraph forking** is the process of lazily fetching entities from _another_ Subgraph's store (usually a remote one). -In the context of debugging, **subgraph forking** allows you to debug your failed subgraph at block _X_ without needing to wait to sync-up to block _X_. +In the context of debugging, **Subgraph forking** allows you to debug your failed Subgraph at block _X_ without needing to wait to sync-up to block _X_. ## क्या?! कैसे? -When you deploy a subgraph to a remote Graph Node for indexing and it fails at block _X_, the good news is that the Graph Node will still serve GraphQL queries using its store, which is synced-up to block _X_. That's great! This means we can take advantage of this "up-to-date" store to fix the bugs arising when indexing block _X_. +When you deploy a Subgraph to a remote Graph Node for indexing and it fails at block _X_, the good news is that the Graph Node will still serve GraphQL queries using its store, which is synced-up to block _X_. That's great! This means we can take advantage of this "up-to-date" store to fix the bugs arising when indexing block _X_. -In a nutshell, we are going to _fork the failing subgraph_ from a remote Graph Node that is guaranteed to have the subgraph indexed up to block _X_ in order to provide the locally deployed subgraph being debugged at block _X_ an up-to-date view of the indexing state. +In a nutshell, we are going to _fork the failing Subgraph_ from a remote Graph Node that is guaranteed to have the Subgraph indexed up to block _X_ in order to provide the locally deployed Subgraph being debugged at block _X_ an up-to-date view of the indexing state. ## कृपया मुझे कुछ कोड दिखाओ! -To stay focused on subgraph debugging, let's keep things simple and run along with the [example-subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexing the Ethereum Gravity smart contract. +To stay focused on Subgraph debugging, let's keep things simple and run along with the [example-Subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexing the Ethereum Gravity smart contract. Here are the handlers defined for indexing `Gravatar`s, with no bugs whatsoever: @@ -44,22 +44,22 @@ export function handleUpdatedGravatar(event: UpdatedGravatar): void { } ``` -Oops, how unfortunate, when I deploy my perfect looking subgraph to [Subgraph Studio](https://thegraph.com/studio/) it fails with the _"Gravatar not found!"_ error. +Oops, how unfortunate, when I deploy my perfect looking Subgraph to [Subgraph Studio](https://thegraph.com/studio/) it fails with the _"Gravatar not found!"_ error. फिक्स का प्रयास करने का सामान्य तरीका है: 1. मैपिंग सोर्स में बदलाव करें, जो आपको लगता है कि समस्या का समाधान करेगा (जबकि मुझे पता है कि यह नहीं होगा)। -2. Re-deploy the subgraph to [Subgraph Studio](https://thegraph.com/studio/) (or another remote Graph Node). +2. Re-deploy the Subgraph to [Subgraph Studio](https://thegraph.com/studio/) (or another remote Graph Node). 3. इसके सिंक-अप होने की प्रतीक्षा करें। 4. यदि यह फिर से टूट जाता है तो 1 पर वापस जाएँ, अन्यथा: हुर्रे! It is indeed pretty familiar to an ordinary debug process, but there is one step that horribly slows down the process: _3. Wait for it to sync-up._ -Using **subgraph forking** we can essentially eliminate this step. Here is how it looks: +Using **Subgraph forking** we can essentially eliminate this step. Here is how it looks: 0. Spin-up a local Graph Node with the **_appropriate fork-base_** set. 1. मैपिंग सोर्स में परिवर्तन करें, जिसके बारे में आपको लगता है कि इससे समस्या हल हो जाएगी. -2. Deploy to the local Graph Node, **_forking the failing subgraph_** and **_starting from the problematic block_**. +2. Deploy to the local Graph Node, **_forking the failing Subgraph_** and **_starting from the problematic block_**. 3. यदि यह फिर से ब्रेक जाता है, तो 1 पर वापस जाएँ, अन्यथा: हुर्रे! अब, आपके 2 प्रश्न हो सकते हैं: @@ -69,18 +69,18 @@ Using **subgraph forking** we can essentially eliminate this step. Here is how i और मैं उत्तर देता हूं: -1. `fork-base` is the "base" URL, such that when the _subgraph id_ is appended the resulting URL (`/`) is a valid GraphQL endpoint for the subgraph's store. +1. `fork-base` is the "base" URL, such that when the _subgraph id_ is appended the resulting URL (`/`) is a valid GraphQL endpoint for the Subgraph's store. 2. फोर्किंग आसान है, पसीना बहाने की जरूरत नहीं: ```bash $ graph deploy --debug-fork --ipfs http://localhost:5001 --node http://localhost:8020 ``` -Also, don't forget to set the `dataSources.source.startBlock` field in the subgraph manifest to the number of the problematic block, so you can skip indexing unnecessary blocks and take advantage of the fork! +Also, don't forget to set the `dataSources.source.startBlock` field in the Subgraph manifest to the number of the problematic block, so you can skip indexing unnecessary blocks and take advantage of the fork! तो, यहाँ मैं क्या करता हूँ: -1. I spin-up a local Graph Node ([here is how to do it](https://github.com/graphprotocol/graph-node#running-a-local-graph-node)) with the `fork-base` option set to: `https://api.thegraph.com/subgraphs/id/`, since I will fork a subgraph, the buggy one I deployed earlier, from [Subgraph Studio](https://thegraph.com/studio/). +1. I spin-up a local Graph Node ([here is how to do it](https://github.com/graphprotocol/graph-node#running-a-local-graph-node)) with the `fork-base` option set to: `https://api.thegraph.com/subgraphs/id/`, since I will fork a Subgraph, the buggy one I deployed earlier, from [Subgraph Studio](https://thegraph.com/studio/). ``` $ cargo run -p graph-node --release -- \ @@ -91,11 +91,11 @@ $ cargo run -p graph-node --release -- \ ``` 2. After careful inspection I notice that there is a mismatch in the `id` representations used when indexing `Gravatar`s in my two handlers. While `handleNewGravatar` converts it to a hex (`event.params.id.toHex()`), `handleUpdatedGravatar` uses an int32 (`event.params.id.toI32()`) which causes the `handleUpdatedGravatar` to panic with "Gravatar not found!". I make them both convert the `id` to a hex. -3. After I made the changes I deploy my subgraph to the local Graph Node, **_forking the failing subgraph_** and setting `dataSources.source.startBlock` to `6190343` in `subgraph.yaml`: +3. After I made the changes I deploy my Subgraph to the local Graph Node, **_forking the failing Subgraph_** and setting `dataSources.source.startBlock` to `6190343` in `subgraph.yaml`: ```bash $ graph deploy gravity --debug-fork QmNp169tKvomnH3cPXTfGg4ZEhAHA6kEq5oy1XDqAxqHmW --ipfs http://localhost:5001 --node http://localhost:8020 ``` 4. I inspect the logs produced by the local Graph Node and, Hooray!, everything seems to be working. -5. I deploy my now bug-free subgraph to a remote Graph Node and live happily ever after! (no potatoes tho) +5. I deploy my now bug-free Subgraph to a remote Graph Node and live happily ever after! (no potatoes tho) From de45b035bb39b157f17f5f1fd8fbd32b75b4c19e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:32 -0500 Subject: [PATCH 0691/1789] New translations subgraph-debug-forking.mdx (Swahili) --- .../cookbook/subgraph-debug-forking.mdx | 101 ++++++++++++++++++ 1 file changed, 101 insertions(+) create mode 100644 website/src/pages/sw/subgraphs/cookbook/subgraph-debug-forking.mdx diff --git a/website/src/pages/sw/subgraphs/cookbook/subgraph-debug-forking.mdx b/website/src/pages/sw/subgraphs/cookbook/subgraph-debug-forking.mdx new file mode 100644 index 000000000000..91aa7484d2ec --- /dev/null +++ b/website/src/pages/sw/subgraphs/cookbook/subgraph-debug-forking.mdx @@ -0,0 +1,101 @@ +--- +title: Quick and Easy Subgraph Debugging Using Forks +--- + +As with many systems processing large amounts of data, The Graph's Indexers (Graph Nodes) may take quite some time to sync-up your Subgraph with the target blockchain. The discrepancy between quick changes with the purpose of debugging and long wait times needed for indexing is extremely counterproductive and we are well aware of that. This is why we are introducing **Subgraph forking**, developed by [LimeChain](https://limechain.tech/), and in this article I will show you how this feature can be used to substantially speed-up Subgraph debugging! + +## Ok, what is it? + +**Subgraph forking** is the process of lazily fetching entities from _another_ Subgraph's store (usually a remote one). + +In the context of debugging, **Subgraph forking** allows you to debug your failed Subgraph at block _X_ without needing to wait to sync-up to block _X_. + +## What?! How? + +When you deploy a Subgraph to a remote Graph Node for indexing and it fails at block _X_, the good news is that the Graph Node will still serve GraphQL queries using its store, which is synced-up to block _X_. That's great! This means we can take advantage of this "up-to-date" store to fix the bugs arising when indexing block _X_. + +In a nutshell, we are going to _fork the failing Subgraph_ from a remote Graph Node that is guaranteed to have the Subgraph indexed up to block _X_ in order to provide the locally deployed Subgraph being debugged at block _X_ an up-to-date view of the indexing state. + +## Please, show me some code! + +To stay focused on Subgraph debugging, let's keep things simple and run along with the [example-Subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexing the Ethereum Gravity smart contract. + +Here are the handlers defined for indexing `Gravatar`s, with no bugs whatsoever: + +```tsx +export function handleNewGravatar(event: NewGravatar): void { + let gravatar = new Gravatar(event.params.id.toHex().toString()) + gravatar.owner = event.params.owner + gravatar.displayName = event.params.displayName + gravatar.imageUrl = event.params.imageUrl + gravatar.save() +} + +export function handleUpdatedGravatar(event: UpdatedGravatar): void { + let gravatar = Gravatar.load(event.params.id.toI32().toString()) + if (gravatar == null) { + log.critical('Gravatar not found!', []) + return + } + gravatar.owner = event.params.owner + gravatar.displayName = event.params.displayName + gravatar.imageUrl = event.params.imageUrl + gravatar.save() +} +``` + +Oops, how unfortunate, when I deploy my perfect looking Subgraph to [Subgraph Studio](https://thegraph.com/studio/) it fails with the _"Gravatar not found!"_ error. + +The usual way to attempt a fix is: + +1. Make a change in the mappings source, which you believe will solve the issue (while I know it won't). +2. Re-deploy the Subgraph to [Subgraph Studio](https://thegraph.com/studio/) (or another remote Graph Node). +3. Wait for it to sync-up. +4. If it breaks again go back to 1, otherwise: Hooray! + +It is indeed pretty familiar to an ordinary debug process, but there is one step that horribly slows down the process: _3. Wait for it to sync-up._ + +Using **Subgraph forking** we can essentially eliminate this step. Here is how it looks: + +0. Spin-up a local Graph Node with the **_appropriate fork-base_** set. +1. Make a change in the mappings source, which you believe will solve the issue. +2. Deploy to the local Graph Node, **_forking the failing Subgraph_** and **_starting from the problematic block_**. +3. If it breaks again, go back to 1, otherwise: Hooray! + +Now, you may have 2 questions: + +1. fork-base what??? +2. Forking who?! + +And I answer: + +1. `fork-base` is the "base" URL, such that when the _subgraph id_ is appended the resulting URL (`/`) is a valid GraphQL endpoint for the Subgraph's store. +2. Forking is easy, no need to sweat: + +```bash +$ graph deploy --debug-fork --ipfs http://localhost:5001 --node http://localhost:8020 +``` + +Also, don't forget to set the `dataSources.source.startBlock` field in the Subgraph manifest to the number of the problematic block, so you can skip indexing unnecessary blocks and take advantage of the fork! + +So, here is what I do: + +1. I spin-up a local Graph Node ([here is how to do it](https://github.com/graphprotocol/graph-node#running-a-local-graph-node)) with the `fork-base` option set to: `https://api.thegraph.com/subgraphs/id/`, since I will fork a Subgraph, the buggy one I deployed earlier, from [Subgraph Studio](https://thegraph.com/studio/). + +``` +$ cargo run -p graph-node --release -- \ + --postgres-url postgresql://USERNAME[:PASSWORD]@localhost:5432/graph-node \ + --ethereum-rpc NETWORK_NAME:[CAPABILITIES]:URL \ + --ipfs 127.0.0.1:5001 + --fork-base https://api.thegraph.com/subgraphs/id/ +``` + +2. After careful inspection I notice that there is a mismatch in the `id` representations used when indexing `Gravatar`s in my two handlers. While `handleNewGravatar` converts it to a hex (`event.params.id.toHex()`), `handleUpdatedGravatar` uses an int32 (`event.params.id.toI32()`) which causes the `handleUpdatedGravatar` to panic with "Gravatar not found!". I make them both convert the `id` to a hex. +3. After I made the changes I deploy my Subgraph to the local Graph Node, **_forking the failing Subgraph_** and setting `dataSources.source.startBlock` to `6190343` in `subgraph.yaml`: + +```bash +$ graph deploy gravity --debug-fork QmNp169tKvomnH3cPXTfGg4ZEhAHA6kEq5oy1XDqAxqHmW --ipfs http://localhost:5001 --node http://localhost:8020 +``` + +4. I inspect the logs produced by the local Graph Node and, Hooray!, everything seems to be working. +5. I deploy my now bug-free Subgraph to a remote Graph Node and live happily ever after! (no potatoes tho) From 985df9acc8532d2e80d8dbfc5c4d6ac981f75f04 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:33 -0500 Subject: [PATCH 0692/1789] New translations subgraph-uncrashable.mdx (Romanian) --- .../ro/subgraphs/cookbook/subgraph-uncrashable.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/ro/subgraphs/cookbook/subgraph-uncrashable.mdx b/website/src/pages/ro/subgraphs/cookbook/subgraph-uncrashable.mdx index 0cc91a0fa2c3..a08e2a7ad8c9 100644 --- a/website/src/pages/ro/subgraphs/cookbook/subgraph-uncrashable.mdx +++ b/website/src/pages/ro/subgraphs/cookbook/subgraph-uncrashable.mdx @@ -2,23 +2,23 @@ title: Safe Subgraph Code Generator --- -[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) is a code generation tool that generates a set of helper functions from the graphql schema of a project. It ensures that all interactions with entities in your subgraph are completely safe and consistent. +[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) is a code generation tool that generates a set of helper functions from the graphql schema of a project. It ensures that all interactions with entities in your Subgraph are completely safe and consistent. ## Why integrate with Subgraph Uncrashable? -- **Continuous Uptime**. Mishandled entities may cause subgraphs to crash, which can be disruptive for projects that are dependent on The Graph. Set up helper functions to make your subgraphs “uncrashable” and ensure business continuity. +- **Continuous Uptime**. Mishandled entities may cause Subgraphs to crash, which can be disruptive for projects that are dependent on The Graph. Set up helper functions to make your Subgraphs “uncrashable” and ensure business continuity. -- **Completely Safe**. Common problems seen in subgraph development are issues of loading undefined entities, not setting or initializing all values of entities, and race conditions on loading and saving entities. Ensure all interactions with entities are completely atomic. +- **Completely Safe**. Common problems seen in Subgraph development are issues of loading undefined entities, not setting or initializing all values of entities, and race conditions on loading and saving entities. Ensure all interactions with entities are completely atomic. -- **User Configurable** Set default values and configure the level of security checks that suits your individual project's needs. Warning logs are recorded indicating where there is a breach of subgraph logic to help patch the issue to ensure data accuracy. +- **User Configurable** Set default values and configure the level of security checks that suits your individual project's needs. Warning logs are recorded indicating where there is a breach of Subgraph logic to help patch the issue to ensure data accuracy. **Key Features** -- The code generation tool accommodates **all** subgraph types and is configurable for users to set sane defaults on values. The code generation will use this config to generate helper functions that are to the users specification. +- The code generation tool accommodates **all** Subgraph types and is configurable for users to set sane defaults on values. The code generation will use this config to generate helper functions that are to the users specification. - The framework also includes a way (via the config file) to create custom, but safe, setter functions for groups of entity variables. This way it is impossible for the user to load/use a stale graph entity and it is also impossible to forget to save or set a variable that is required by the function. -- Warning logs are recorded as logs indicating where there is a breach of subgraph logic to help patch the issue to ensure data accuracy. +- Warning logs are recorded as logs indicating where there is a breach of Subgraph logic to help patch the issue to ensure data accuracy. Subgraph Uncrashable can be run as an optional flag using the Graph CLI codegen command. @@ -26,4 +26,4 @@ Subgraph Uncrashable can be run as an optional flag using the Graph CLI codegen graph codegen -u [options] [] ``` -Visit the [subgraph uncrashable documentation](https://float-capital.github.io/float-subgraph-uncrashable/docs/) or watch this [video tutorial](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) to learn more and to get started with developing safer subgraphs. +Visit the [Subgraph uncrashable documentation](https://float-capital.github.io/float-subgraph-uncrashable/docs/) or watch this [video tutorial](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) to learn more and to get started with developing safer Subgraphs. From f3b143257e8e153c75a320fb69ccf457ea85e653 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:34 -0500 Subject: [PATCH 0693/1789] New translations subgraph-uncrashable.mdx (French) --- .../subgraphs/cookbook/subgraph-uncrashable.mdx | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/website/src/pages/fr/subgraphs/cookbook/subgraph-uncrashable.mdx b/website/src/pages/fr/subgraphs/cookbook/subgraph-uncrashable.mdx index fadcd9b98faf..bb4a3f214759 100644 --- a/website/src/pages/fr/subgraphs/cookbook/subgraph-uncrashable.mdx +++ b/website/src/pages/fr/subgraphs/cookbook/subgraph-uncrashable.mdx @@ -2,23 +2,23 @@ title: Générateur de code de subgraph sécurisé --- -[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) is a code generation tool that generates a set of helper functions from the graphql schema of a project. It ensures that all interactions with entities in your subgraph are completely safe and consistent. +[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) is a code generation tool that generates a set of helper functions from the graphql schema of a project. It ensures that all interactions with entities in your Subgraph are completely safe and consistent. ## Pourquoi intégrer Subgraph Uncrashable ? -- **Continuous Uptime**. Mishandled entities may cause subgraphs to crash, which can be disruptive for projects that are dependent on The Graph. Set up helper functions to make your subgraphs “uncrashable” and ensure business continuity. +- **Continuous Uptime**. Mishandled entities may cause Subgraphs to crash, which can be disruptive for projects that are dependent on The Graph. Set up helper functions to make your Subgraphs “uncrashable” and ensure business continuity. -- **Completely Safe**. Common problems seen in subgraph development are issues of loading undefined entities, not setting or initializing all values of entities, and race conditions on loading and saving entities. Ensure all interactions with entities are completely atomic. +- **Completely Safe**. Common problems seen in Subgraph development are issues of loading undefined entities, not setting or initializing all values of entities, and race conditions on loading and saving entities. Ensure all interactions with entities are completely atomic. -- **User Configurable** Set default values and configure the level of security checks that suits your individual project's needs. Warning logs are recorded indicating where there is a breach of subgraph logic to help patch the issue to ensure data accuracy. +- **User Configurable** Set default values and configure the level of security checks that suits your individual project's needs. Warning logs are recorded indicating where there is a breach of Subgraph logic to help patch the issue to ensure data accuracy. -**Key Features** +**Caractéristiques principales** -- The code generation tool accommodates **all** subgraph types and is configurable for users to set sane defaults on values. The code generation will use this config to generate helper functions that are to the users specification. +- The code generation tool accommodates **all** Subgraph types and is configurable for users to set sane defaults on values. The code generation will use this config to generate helper functions that are to the users specification. - Le cadre comprend également un moyen (via le fichier de configuration) de créer des fonctions de définition personnalisées, mais sûres, pour des groupes de variables d'entité. De cette façon, il est impossible pour l'utilisateur de charger/utiliser une entité de graph obsolète et il est également impossible d'oublier de sauvegarder ou définissez une variable requise par la fonction. -- Les logs d'avertissement sont enregistrés sous forme de logs indiquant où il y a une violation de la logique du subgraph pour aider à corriger le problème afin d'assurer l'exactitude des données. +- Warning logs are recorded as logs indicating where there is a breach of Subgraph logic to help patch the issue to ensure data accuracy. Subgraph Uncrashable peut être exécuté en tant qu'indicateur facultatif à l'aide de la commande Graph CLI codegen. @@ -26,4 +26,4 @@ Subgraph Uncrashable peut être exécuté en tant qu'indicateur facultatif à l' graph codegen -u [options] [] ``` -Visit the [subgraph uncrashable documentation](https://float-capital.github.io/float-subgraph-uncrashable/docs/) or watch this [video tutorial](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) to learn more and to get started with developing safer subgraphs. +Visit the [Subgraph uncrashable documentation](https://float-capital.github.io/float-subgraph-uncrashable/docs/) or watch this [video tutorial](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) to learn more and to get started with developing safer Subgraphs. From 394ca9f08fe186f9cc22696c5d5c823425d6bca2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:35 -0500 Subject: [PATCH 0694/1789] New translations subgraph-uncrashable.mdx (Spanish) --- .../es/subgraphs/cookbook/subgraph-uncrashable.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/es/subgraphs/cookbook/subgraph-uncrashable.mdx b/website/src/pages/es/subgraphs/cookbook/subgraph-uncrashable.mdx index 59b33568a1f2..2794e6ab66d8 100644 --- a/website/src/pages/es/subgraphs/cookbook/subgraph-uncrashable.mdx +++ b/website/src/pages/es/subgraphs/cookbook/subgraph-uncrashable.mdx @@ -2,23 +2,23 @@ title: Generador de código de subgrafo seguro --- -[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) is a code generation tool that generates a set of helper functions from the graphql schema of a project. It ensures that all interactions with entities in your subgraph are completely safe and consistent. +[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) is a code generation tool that generates a set of helper functions from the graphql schema of a project. It ensures that all interactions with entities in your Subgraph are completely safe and consistent. ## ¿Por qué integrarse con Subgraph Uncrashable? -- **Continuous Uptime**. Mishandled entities may cause subgraphs to crash, which can be disruptive for projects that are dependent on The Graph. Set up helper functions to make your subgraphs “uncrashable” and ensure business continuity. +- **Continuous Uptime**. Mishandled entities may cause Subgraphs to crash, which can be disruptive for projects that are dependent on The Graph. Set up helper functions to make your Subgraphs “uncrashable” and ensure business continuity. -- **Completely Safe**. Common problems seen in subgraph development are issues of loading undefined entities, not setting or initializing all values of entities, and race conditions on loading and saving entities. Ensure all interactions with entities are completely atomic. +- **Completely Safe**. Common problems seen in Subgraph development are issues of loading undefined entities, not setting or initializing all values of entities, and race conditions on loading and saving entities. Ensure all interactions with entities are completely atomic. -- **User Configurable** Set default values and configure the level of security checks that suits your individual project's needs. Warning logs are recorded indicating where there is a breach of subgraph logic to help patch the issue to ensure data accuracy. +- **User Configurable** Set default values and configure the level of security checks that suits your individual project's needs. Warning logs are recorded indicating where there is a breach of Subgraph logic to help patch the issue to ensure data accuracy. **Key Features** -- The code generation tool accommodates **all** subgraph types and is configurable for users to set sane defaults on values. The code generation will use this config to generate helper functions that are to the users specification. +- The code generation tool accommodates **all** Subgraph types and is configurable for users to set sane defaults on values. The code generation will use this config to generate helper functions that are to the users specification. - El marco también incluye una forma (a través del archivo de configuración) para crear funciones de establecimiento personalizadas, pero seguras, para grupos de variables de entidad. De esta forma, es imposible que el usuario cargue/utilice una entidad gráfica obsoleta y también es imposible olvidarse de guardar o configurar una variable requerida por la función. -- Warning logs are recorded as logs indicating where there is a breach of subgraph logic to help patch the issue to ensure data accuracy. +- Warning logs are recorded as logs indicating where there is a breach of Subgraph logic to help patch the issue to ensure data accuracy. Subgraph Uncrashable se puede ejecutar como un indicador opcional mediante el comando codegen Graph CLI. @@ -26,4 +26,4 @@ Subgraph Uncrashable se puede ejecutar como un indicador opcional mediante el co graph codegen -u [options] [] ``` -Visit the [subgraph uncrashable documentation](https://float-capital.github.io/float-subgraph-uncrashable/docs/) or watch this [video tutorial](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) to learn more and to get started with developing safer subgraphs. +Visit the [Subgraph uncrashable documentation](https://float-capital.github.io/float-subgraph-uncrashable/docs/) or watch this [video tutorial](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) to learn more and to get started with developing safer Subgraphs. From 79385a4af383ac04fa3dc97c7e759175db509a6d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:36 -0500 Subject: [PATCH 0695/1789] New translations subgraph-uncrashable.mdx (Arabic) --- .../ar/subgraphs/cookbook/subgraph-uncrashable.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/ar/subgraphs/cookbook/subgraph-uncrashable.mdx b/website/src/pages/ar/subgraphs/cookbook/subgraph-uncrashable.mdx index 0cc91a0fa2c3..a08e2a7ad8c9 100644 --- a/website/src/pages/ar/subgraphs/cookbook/subgraph-uncrashable.mdx +++ b/website/src/pages/ar/subgraphs/cookbook/subgraph-uncrashable.mdx @@ -2,23 +2,23 @@ title: Safe Subgraph Code Generator --- -[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) is a code generation tool that generates a set of helper functions from the graphql schema of a project. It ensures that all interactions with entities in your subgraph are completely safe and consistent. +[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) is a code generation tool that generates a set of helper functions from the graphql schema of a project. It ensures that all interactions with entities in your Subgraph are completely safe and consistent. ## Why integrate with Subgraph Uncrashable? -- **Continuous Uptime**. Mishandled entities may cause subgraphs to crash, which can be disruptive for projects that are dependent on The Graph. Set up helper functions to make your subgraphs “uncrashable” and ensure business continuity. +- **Continuous Uptime**. Mishandled entities may cause Subgraphs to crash, which can be disruptive for projects that are dependent on The Graph. Set up helper functions to make your Subgraphs “uncrashable” and ensure business continuity. -- **Completely Safe**. Common problems seen in subgraph development are issues of loading undefined entities, not setting or initializing all values of entities, and race conditions on loading and saving entities. Ensure all interactions with entities are completely atomic. +- **Completely Safe**. Common problems seen in Subgraph development are issues of loading undefined entities, not setting or initializing all values of entities, and race conditions on loading and saving entities. Ensure all interactions with entities are completely atomic. -- **User Configurable** Set default values and configure the level of security checks that suits your individual project's needs. Warning logs are recorded indicating where there is a breach of subgraph logic to help patch the issue to ensure data accuracy. +- **User Configurable** Set default values and configure the level of security checks that suits your individual project's needs. Warning logs are recorded indicating where there is a breach of Subgraph logic to help patch the issue to ensure data accuracy. **Key Features** -- The code generation tool accommodates **all** subgraph types and is configurable for users to set sane defaults on values. The code generation will use this config to generate helper functions that are to the users specification. +- The code generation tool accommodates **all** Subgraph types and is configurable for users to set sane defaults on values. The code generation will use this config to generate helper functions that are to the users specification. - The framework also includes a way (via the config file) to create custom, but safe, setter functions for groups of entity variables. This way it is impossible for the user to load/use a stale graph entity and it is also impossible to forget to save or set a variable that is required by the function. -- Warning logs are recorded as logs indicating where there is a breach of subgraph logic to help patch the issue to ensure data accuracy. +- Warning logs are recorded as logs indicating where there is a breach of Subgraph logic to help patch the issue to ensure data accuracy. Subgraph Uncrashable can be run as an optional flag using the Graph CLI codegen command. @@ -26,4 +26,4 @@ Subgraph Uncrashable can be run as an optional flag using the Graph CLI codegen graph codegen -u [options] [] ``` -Visit the [subgraph uncrashable documentation](https://float-capital.github.io/float-subgraph-uncrashable/docs/) or watch this [video tutorial](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) to learn more and to get started with developing safer subgraphs. +Visit the [Subgraph uncrashable documentation](https://float-capital.github.io/float-subgraph-uncrashable/docs/) or watch this [video tutorial](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) to learn more and to get started with developing safer Subgraphs. From f231b622536641640021d57da0599b0aaf4942d6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:37 -0500 Subject: [PATCH 0696/1789] New translations subgraph-uncrashable.mdx (Czech) --- .../cs/subgraphs/cookbook/subgraph-uncrashable.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/cs/subgraphs/cookbook/subgraph-uncrashable.mdx b/website/src/pages/cs/subgraphs/cookbook/subgraph-uncrashable.mdx index 53750dd1cbee..bdc3671399e1 100644 --- a/website/src/pages/cs/subgraphs/cookbook/subgraph-uncrashable.mdx +++ b/website/src/pages/cs/subgraphs/cookbook/subgraph-uncrashable.mdx @@ -2,23 +2,23 @@ title: Generátor kódu bezpečného podgrafu --- -[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) is a code generation tool that generates a set of helper functions from the graphql schema of a project. It ensures that all interactions with entities in your subgraph are completely safe and consistent. +[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) is a code generation tool that generates a set of helper functions from the graphql schema of a project. It ensures that all interactions with entities in your Subgraph are completely safe and consistent. ## Proč se integrovat s aplikací Subgraph Uncrashable? -- **Continuous Uptime**. Mishandled entities may cause subgraphs to crash, which can be disruptive for projects that are dependent on The Graph. Set up helper functions to make your subgraphs “uncrashable” and ensure business continuity. +- **Continuous Uptime**. Mishandled entities may cause Subgraphs to crash, which can be disruptive for projects that are dependent on The Graph. Set up helper functions to make your Subgraphs “uncrashable” and ensure business continuity. -- **Completely Safe**. Common problems seen in subgraph development are issues of loading undefined entities, not setting or initializing all values of entities, and race conditions on loading and saving entities. Ensure all interactions with entities are completely atomic. +- **Completely Safe**. Common problems seen in Subgraph development are issues of loading undefined entities, not setting or initializing all values of entities, and race conditions on loading and saving entities. Ensure all interactions with entities are completely atomic. -- **User Configurable** Set default values and configure the level of security checks that suits your individual project's needs. Warning logs are recorded indicating where there is a breach of subgraph logic to help patch the issue to ensure data accuracy. +- **User Configurable** Set default values and configure the level of security checks that suits your individual project's needs. Warning logs are recorded indicating where there is a breach of Subgraph logic to help patch the issue to ensure data accuracy. **Key Features** -- The code generation tool accommodates **all** subgraph types and is configurable for users to set sane defaults on values. The code generation will use this config to generate helper functions that are to the users specification. +- The code generation tool accommodates **all** Subgraph types and is configurable for users to set sane defaults on values. The code generation will use this config to generate helper functions that are to the users specification. - Framework také obsahuje způsob (prostřednictvím konfiguračního souboru), jak vytvořit vlastní, ale bezpečné funkce setteru pro skupiny proměnných entit. Tímto způsobem není možné, aby uživatel načetl/použil zastaralou entitu grafu, a také není možné zapomenout uložit nebo nastavit proměnnou, kterou funkce vyžaduje. -- Warning logs are recorded as logs indicating where there is a breach of subgraph logic to help patch the issue to ensure data accuracy. +- Warning logs are recorded as logs indicating where there is a breach of Subgraph logic to help patch the issue to ensure data accuracy. Podgraf Uncrashable lze spustit jako volitelný příznak pomocí příkazu Graph CLI codegen. @@ -26,4 +26,4 @@ Podgraf Uncrashable lze spustit jako volitelný příznak pomocí příkazu Grap graph codegen -u [options] [] ``` -Visit the [subgraph uncrashable documentation](https://float-capital.github.io/float-subgraph-uncrashable/docs/) or watch this [video tutorial](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) to learn more and to get started with developing safer subgraphs. +Visit the [Subgraph uncrashable documentation](https://float-capital.github.io/float-subgraph-uncrashable/docs/) or watch this [video tutorial](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) to learn more and to get started with developing safer Subgraphs. From d325eab8dc66629b01ab62c93fcc6fc53f11b83f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:38 -0500 Subject: [PATCH 0697/1789] New translations subgraph-uncrashable.mdx (German) --- .../de/subgraphs/cookbook/subgraph-uncrashable.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/de/subgraphs/cookbook/subgraph-uncrashable.mdx b/website/src/pages/de/subgraphs/cookbook/subgraph-uncrashable.mdx index 0cc91a0fa2c3..a08e2a7ad8c9 100644 --- a/website/src/pages/de/subgraphs/cookbook/subgraph-uncrashable.mdx +++ b/website/src/pages/de/subgraphs/cookbook/subgraph-uncrashable.mdx @@ -2,23 +2,23 @@ title: Safe Subgraph Code Generator --- -[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) is a code generation tool that generates a set of helper functions from the graphql schema of a project. It ensures that all interactions with entities in your subgraph are completely safe and consistent. +[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) is a code generation tool that generates a set of helper functions from the graphql schema of a project. It ensures that all interactions with entities in your Subgraph are completely safe and consistent. ## Why integrate with Subgraph Uncrashable? -- **Continuous Uptime**. Mishandled entities may cause subgraphs to crash, which can be disruptive for projects that are dependent on The Graph. Set up helper functions to make your subgraphs “uncrashable” and ensure business continuity. +- **Continuous Uptime**. Mishandled entities may cause Subgraphs to crash, which can be disruptive for projects that are dependent on The Graph. Set up helper functions to make your Subgraphs “uncrashable” and ensure business continuity. -- **Completely Safe**. Common problems seen in subgraph development are issues of loading undefined entities, not setting or initializing all values of entities, and race conditions on loading and saving entities. Ensure all interactions with entities are completely atomic. +- **Completely Safe**. Common problems seen in Subgraph development are issues of loading undefined entities, not setting or initializing all values of entities, and race conditions on loading and saving entities. Ensure all interactions with entities are completely atomic. -- **User Configurable** Set default values and configure the level of security checks that suits your individual project's needs. Warning logs are recorded indicating where there is a breach of subgraph logic to help patch the issue to ensure data accuracy. +- **User Configurable** Set default values and configure the level of security checks that suits your individual project's needs. Warning logs are recorded indicating where there is a breach of Subgraph logic to help patch the issue to ensure data accuracy. **Key Features** -- The code generation tool accommodates **all** subgraph types and is configurable for users to set sane defaults on values. The code generation will use this config to generate helper functions that are to the users specification. +- The code generation tool accommodates **all** Subgraph types and is configurable for users to set sane defaults on values. The code generation will use this config to generate helper functions that are to the users specification. - The framework also includes a way (via the config file) to create custom, but safe, setter functions for groups of entity variables. This way it is impossible for the user to load/use a stale graph entity and it is also impossible to forget to save or set a variable that is required by the function. -- Warning logs are recorded as logs indicating where there is a breach of subgraph logic to help patch the issue to ensure data accuracy. +- Warning logs are recorded as logs indicating where there is a breach of Subgraph logic to help patch the issue to ensure data accuracy. Subgraph Uncrashable can be run as an optional flag using the Graph CLI codegen command. @@ -26,4 +26,4 @@ Subgraph Uncrashable can be run as an optional flag using the Graph CLI codegen graph codegen -u [options] [] ``` -Visit the [subgraph uncrashable documentation](https://float-capital.github.io/float-subgraph-uncrashable/docs/) or watch this [video tutorial](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) to learn more and to get started with developing safer subgraphs. +Visit the [Subgraph uncrashable documentation](https://float-capital.github.io/float-subgraph-uncrashable/docs/) or watch this [video tutorial](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) to learn more and to get started with developing safer Subgraphs. From eb7b6cd43f92ca57dd938e8ea62be365b447aa77 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:39 -0500 Subject: [PATCH 0698/1789] New translations subgraph-uncrashable.mdx (Italian) --- .../it/subgraphs/cookbook/subgraph-uncrashable.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/it/subgraphs/cookbook/subgraph-uncrashable.mdx b/website/src/pages/it/subgraphs/cookbook/subgraph-uncrashable.mdx index 0cc91a0fa2c3..a08e2a7ad8c9 100644 --- a/website/src/pages/it/subgraphs/cookbook/subgraph-uncrashable.mdx +++ b/website/src/pages/it/subgraphs/cookbook/subgraph-uncrashable.mdx @@ -2,23 +2,23 @@ title: Safe Subgraph Code Generator --- -[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) is a code generation tool that generates a set of helper functions from the graphql schema of a project. It ensures that all interactions with entities in your subgraph are completely safe and consistent. +[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) is a code generation tool that generates a set of helper functions from the graphql schema of a project. It ensures that all interactions with entities in your Subgraph are completely safe and consistent. ## Why integrate with Subgraph Uncrashable? -- **Continuous Uptime**. Mishandled entities may cause subgraphs to crash, which can be disruptive for projects that are dependent on The Graph. Set up helper functions to make your subgraphs “uncrashable” and ensure business continuity. +- **Continuous Uptime**. Mishandled entities may cause Subgraphs to crash, which can be disruptive for projects that are dependent on The Graph. Set up helper functions to make your Subgraphs “uncrashable” and ensure business continuity. -- **Completely Safe**. Common problems seen in subgraph development are issues of loading undefined entities, not setting or initializing all values of entities, and race conditions on loading and saving entities. Ensure all interactions with entities are completely atomic. +- **Completely Safe**. Common problems seen in Subgraph development are issues of loading undefined entities, not setting or initializing all values of entities, and race conditions on loading and saving entities. Ensure all interactions with entities are completely atomic. -- **User Configurable** Set default values and configure the level of security checks that suits your individual project's needs. Warning logs are recorded indicating where there is a breach of subgraph logic to help patch the issue to ensure data accuracy. +- **User Configurable** Set default values and configure the level of security checks that suits your individual project's needs. Warning logs are recorded indicating where there is a breach of Subgraph logic to help patch the issue to ensure data accuracy. **Key Features** -- The code generation tool accommodates **all** subgraph types and is configurable for users to set sane defaults on values. The code generation will use this config to generate helper functions that are to the users specification. +- The code generation tool accommodates **all** Subgraph types and is configurable for users to set sane defaults on values. The code generation will use this config to generate helper functions that are to the users specification. - The framework also includes a way (via the config file) to create custom, but safe, setter functions for groups of entity variables. This way it is impossible for the user to load/use a stale graph entity and it is also impossible to forget to save or set a variable that is required by the function. -- Warning logs are recorded as logs indicating where there is a breach of subgraph logic to help patch the issue to ensure data accuracy. +- Warning logs are recorded as logs indicating where there is a breach of Subgraph logic to help patch the issue to ensure data accuracy. Subgraph Uncrashable can be run as an optional flag using the Graph CLI codegen command. @@ -26,4 +26,4 @@ Subgraph Uncrashable can be run as an optional flag using the Graph CLI codegen graph codegen -u [options] [] ``` -Visit the [subgraph uncrashable documentation](https://float-capital.github.io/float-subgraph-uncrashable/docs/) or watch this [video tutorial](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) to learn more and to get started with developing safer subgraphs. +Visit the [Subgraph uncrashable documentation](https://float-capital.github.io/float-subgraph-uncrashable/docs/) or watch this [video tutorial](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) to learn more and to get started with developing safer Subgraphs. From 071a3823a2e56f898b37733661f2fd213074ae45 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:40 -0500 Subject: [PATCH 0699/1789] New translations subgraph-uncrashable.mdx (Japanese) --- .../ja/subgraphs/cookbook/subgraph-uncrashable.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/ja/subgraphs/cookbook/subgraph-uncrashable.mdx b/website/src/pages/ja/subgraphs/cookbook/subgraph-uncrashable.mdx index 74d66b27fcaa..5f51f521b214 100644 --- a/website/src/pages/ja/subgraphs/cookbook/subgraph-uncrashable.mdx +++ b/website/src/pages/ja/subgraphs/cookbook/subgraph-uncrashable.mdx @@ -2,23 +2,23 @@ title: 安全なサブグラフのコード生成 --- -[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) is a code generation tool that generates a set of helper functions from the graphql schema of a project. It ensures that all interactions with entities in your subgraph are completely safe and consistent. +[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) is a code generation tool that generates a set of helper functions from the graphql schema of a project. It ensures that all interactions with entities in your Subgraph are completely safe and consistent. ## Subgraph Uncrashable と統合する理由 -- **Continuous Uptime**. Mishandled entities may cause subgraphs to crash, which can be disruptive for projects that are dependent on The Graph. Set up helper functions to make your subgraphs “uncrashable” and ensure business continuity. +- **Continuous Uptime**. Mishandled entities may cause Subgraphs to crash, which can be disruptive for projects that are dependent on The Graph. Set up helper functions to make your Subgraphs “uncrashable” and ensure business continuity. -- **Completely Safe**. Common problems seen in subgraph development are issues of loading undefined entities, not setting or initializing all values of entities, and race conditions on loading and saving entities. Ensure all interactions with entities are completely atomic. +- **Completely Safe**. Common problems seen in Subgraph development are issues of loading undefined entities, not setting or initializing all values of entities, and race conditions on loading and saving entities. Ensure all interactions with entities are completely atomic. -- **User Configurable** Set default values and configure the level of security checks that suits your individual project's needs. Warning logs are recorded indicating where there is a breach of subgraph logic to help patch the issue to ensure data accuracy. +- **User Configurable** Set default values and configure the level of security checks that suits your individual project's needs. Warning logs are recorded indicating where there is a breach of Subgraph logic to help patch the issue to ensure data accuracy. **Key Features** -- The code generation tool accommodates **all** subgraph types and is configurable for users to set sane defaults on values. The code generation will use this config to generate helper functions that are to the users specification. +- The code generation tool accommodates **all** Subgraph types and is configurable for users to set sane defaults on values. The code generation will use this config to generate helper functions that are to the users specification. - また、このフレームワークには、エンティティ変数のグループに対して、カスタムだが安全なセッター関数を作成する方法が(設定ファイルを通じて)含まれています。この方法では、ユーザーが古いグラフ・エンティティをロード/使用することは不可能であり、また、関数が必要とする変数の保存や設定を忘れることも不可能です。 -- Warning logs are recorded as logs indicating where there is a breach of subgraph logic to help patch the issue to ensure data accuracy. +- Warning logs are recorded as logs indicating where there is a breach of Subgraph logic to help patch the issue to ensure data accuracy. Subgraph Uncrashableは、Graph CLI codegenコマンドでオプションのフラグとして実行することができます。 @@ -26,4 +26,4 @@ Subgraph Uncrashableは、Graph CLI codegenコマンドでオプションのフ graph codegen -u [options] [] ``` -Visit the [subgraph uncrashable documentation](https://float-capital.github.io/float-subgraph-uncrashable/docs/) or watch this [video tutorial](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) to learn more and to get started with developing safer subgraphs. +Visit the [Subgraph uncrashable documentation](https://float-capital.github.io/float-subgraph-uncrashable/docs/) or watch this [video tutorial](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) to learn more and to get started with developing safer Subgraphs. From 7d4e3240da204d22b750beb89f48e44f2c758007 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:41 -0500 Subject: [PATCH 0700/1789] New translations subgraph-uncrashable.mdx (Korean) --- .../ko/subgraphs/cookbook/subgraph-uncrashable.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/ko/subgraphs/cookbook/subgraph-uncrashable.mdx b/website/src/pages/ko/subgraphs/cookbook/subgraph-uncrashable.mdx index 0cc91a0fa2c3..a08e2a7ad8c9 100644 --- a/website/src/pages/ko/subgraphs/cookbook/subgraph-uncrashable.mdx +++ b/website/src/pages/ko/subgraphs/cookbook/subgraph-uncrashable.mdx @@ -2,23 +2,23 @@ title: Safe Subgraph Code Generator --- -[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) is a code generation tool that generates a set of helper functions from the graphql schema of a project. It ensures that all interactions with entities in your subgraph are completely safe and consistent. +[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) is a code generation tool that generates a set of helper functions from the graphql schema of a project. It ensures that all interactions with entities in your Subgraph are completely safe and consistent. ## Why integrate with Subgraph Uncrashable? -- **Continuous Uptime**. Mishandled entities may cause subgraphs to crash, which can be disruptive for projects that are dependent on The Graph. Set up helper functions to make your subgraphs “uncrashable” and ensure business continuity. +- **Continuous Uptime**. Mishandled entities may cause Subgraphs to crash, which can be disruptive for projects that are dependent on The Graph. Set up helper functions to make your Subgraphs “uncrashable” and ensure business continuity. -- **Completely Safe**. Common problems seen in subgraph development are issues of loading undefined entities, not setting or initializing all values of entities, and race conditions on loading and saving entities. Ensure all interactions with entities are completely atomic. +- **Completely Safe**. Common problems seen in Subgraph development are issues of loading undefined entities, not setting or initializing all values of entities, and race conditions on loading and saving entities. Ensure all interactions with entities are completely atomic. -- **User Configurable** Set default values and configure the level of security checks that suits your individual project's needs. Warning logs are recorded indicating where there is a breach of subgraph logic to help patch the issue to ensure data accuracy. +- **User Configurable** Set default values and configure the level of security checks that suits your individual project's needs. Warning logs are recorded indicating where there is a breach of Subgraph logic to help patch the issue to ensure data accuracy. **Key Features** -- The code generation tool accommodates **all** subgraph types and is configurable for users to set sane defaults on values. The code generation will use this config to generate helper functions that are to the users specification. +- The code generation tool accommodates **all** Subgraph types and is configurable for users to set sane defaults on values. The code generation will use this config to generate helper functions that are to the users specification. - The framework also includes a way (via the config file) to create custom, but safe, setter functions for groups of entity variables. This way it is impossible for the user to load/use a stale graph entity and it is also impossible to forget to save or set a variable that is required by the function. -- Warning logs are recorded as logs indicating where there is a breach of subgraph logic to help patch the issue to ensure data accuracy. +- Warning logs are recorded as logs indicating where there is a breach of Subgraph logic to help patch the issue to ensure data accuracy. Subgraph Uncrashable can be run as an optional flag using the Graph CLI codegen command. @@ -26,4 +26,4 @@ Subgraph Uncrashable can be run as an optional flag using the Graph CLI codegen graph codegen -u [options] [] ``` -Visit the [subgraph uncrashable documentation](https://float-capital.github.io/float-subgraph-uncrashable/docs/) or watch this [video tutorial](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) to learn more and to get started with developing safer subgraphs. +Visit the [Subgraph uncrashable documentation](https://float-capital.github.io/float-subgraph-uncrashable/docs/) or watch this [video tutorial](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) to learn more and to get started with developing safer Subgraphs. From f00ab4ef2d04ff7e94f000d0c4b5c1ab71f14b12 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:42 -0500 Subject: [PATCH 0701/1789] New translations subgraph-uncrashable.mdx (Dutch) --- .../nl/subgraphs/cookbook/subgraph-uncrashable.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/nl/subgraphs/cookbook/subgraph-uncrashable.mdx b/website/src/pages/nl/subgraphs/cookbook/subgraph-uncrashable.mdx index 0cc91a0fa2c3..a08e2a7ad8c9 100644 --- a/website/src/pages/nl/subgraphs/cookbook/subgraph-uncrashable.mdx +++ b/website/src/pages/nl/subgraphs/cookbook/subgraph-uncrashable.mdx @@ -2,23 +2,23 @@ title: Safe Subgraph Code Generator --- -[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) is a code generation tool that generates a set of helper functions from the graphql schema of a project. It ensures that all interactions with entities in your subgraph are completely safe and consistent. +[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) is a code generation tool that generates a set of helper functions from the graphql schema of a project. It ensures that all interactions with entities in your Subgraph are completely safe and consistent. ## Why integrate with Subgraph Uncrashable? -- **Continuous Uptime**. Mishandled entities may cause subgraphs to crash, which can be disruptive for projects that are dependent on The Graph. Set up helper functions to make your subgraphs “uncrashable” and ensure business continuity. +- **Continuous Uptime**. Mishandled entities may cause Subgraphs to crash, which can be disruptive for projects that are dependent on The Graph. Set up helper functions to make your Subgraphs “uncrashable” and ensure business continuity. -- **Completely Safe**. Common problems seen in subgraph development are issues of loading undefined entities, not setting or initializing all values of entities, and race conditions on loading and saving entities. Ensure all interactions with entities are completely atomic. +- **Completely Safe**. Common problems seen in Subgraph development are issues of loading undefined entities, not setting or initializing all values of entities, and race conditions on loading and saving entities. Ensure all interactions with entities are completely atomic. -- **User Configurable** Set default values and configure the level of security checks that suits your individual project's needs. Warning logs are recorded indicating where there is a breach of subgraph logic to help patch the issue to ensure data accuracy. +- **User Configurable** Set default values and configure the level of security checks that suits your individual project's needs. Warning logs are recorded indicating where there is a breach of Subgraph logic to help patch the issue to ensure data accuracy. **Key Features** -- The code generation tool accommodates **all** subgraph types and is configurable for users to set sane defaults on values. The code generation will use this config to generate helper functions that are to the users specification. +- The code generation tool accommodates **all** Subgraph types and is configurable for users to set sane defaults on values. The code generation will use this config to generate helper functions that are to the users specification. - The framework also includes a way (via the config file) to create custom, but safe, setter functions for groups of entity variables. This way it is impossible for the user to load/use a stale graph entity and it is also impossible to forget to save or set a variable that is required by the function. -- Warning logs are recorded as logs indicating where there is a breach of subgraph logic to help patch the issue to ensure data accuracy. +- Warning logs are recorded as logs indicating where there is a breach of Subgraph logic to help patch the issue to ensure data accuracy. Subgraph Uncrashable can be run as an optional flag using the Graph CLI codegen command. @@ -26,4 +26,4 @@ Subgraph Uncrashable can be run as an optional flag using the Graph CLI codegen graph codegen -u [options] [] ``` -Visit the [subgraph uncrashable documentation](https://float-capital.github.io/float-subgraph-uncrashable/docs/) or watch this [video tutorial](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) to learn more and to get started with developing safer subgraphs. +Visit the [Subgraph uncrashable documentation](https://float-capital.github.io/float-subgraph-uncrashable/docs/) or watch this [video tutorial](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) to learn more and to get started with developing safer Subgraphs. From a9bdadcb01ffcfd484a5da9dcecae96b504f36e9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:43 -0500 Subject: [PATCH 0702/1789] New translations subgraph-uncrashable.mdx (Polish) --- .../pl/subgraphs/cookbook/subgraph-uncrashable.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/pl/subgraphs/cookbook/subgraph-uncrashable.mdx b/website/src/pages/pl/subgraphs/cookbook/subgraph-uncrashable.mdx index 0cc91a0fa2c3..a08e2a7ad8c9 100644 --- a/website/src/pages/pl/subgraphs/cookbook/subgraph-uncrashable.mdx +++ b/website/src/pages/pl/subgraphs/cookbook/subgraph-uncrashable.mdx @@ -2,23 +2,23 @@ title: Safe Subgraph Code Generator --- -[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) is a code generation tool that generates a set of helper functions from the graphql schema of a project. It ensures that all interactions with entities in your subgraph are completely safe and consistent. +[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) is a code generation tool that generates a set of helper functions from the graphql schema of a project. It ensures that all interactions with entities in your Subgraph are completely safe and consistent. ## Why integrate with Subgraph Uncrashable? -- **Continuous Uptime**. Mishandled entities may cause subgraphs to crash, which can be disruptive for projects that are dependent on The Graph. Set up helper functions to make your subgraphs “uncrashable” and ensure business continuity. +- **Continuous Uptime**. Mishandled entities may cause Subgraphs to crash, which can be disruptive for projects that are dependent on The Graph. Set up helper functions to make your Subgraphs “uncrashable” and ensure business continuity. -- **Completely Safe**. Common problems seen in subgraph development are issues of loading undefined entities, not setting or initializing all values of entities, and race conditions on loading and saving entities. Ensure all interactions with entities are completely atomic. +- **Completely Safe**. Common problems seen in Subgraph development are issues of loading undefined entities, not setting or initializing all values of entities, and race conditions on loading and saving entities. Ensure all interactions with entities are completely atomic. -- **User Configurable** Set default values and configure the level of security checks that suits your individual project's needs. Warning logs are recorded indicating where there is a breach of subgraph logic to help patch the issue to ensure data accuracy. +- **User Configurable** Set default values and configure the level of security checks that suits your individual project's needs. Warning logs are recorded indicating where there is a breach of Subgraph logic to help patch the issue to ensure data accuracy. **Key Features** -- The code generation tool accommodates **all** subgraph types and is configurable for users to set sane defaults on values. The code generation will use this config to generate helper functions that are to the users specification. +- The code generation tool accommodates **all** Subgraph types and is configurable for users to set sane defaults on values. The code generation will use this config to generate helper functions that are to the users specification. - The framework also includes a way (via the config file) to create custom, but safe, setter functions for groups of entity variables. This way it is impossible for the user to load/use a stale graph entity and it is also impossible to forget to save or set a variable that is required by the function. -- Warning logs are recorded as logs indicating where there is a breach of subgraph logic to help patch the issue to ensure data accuracy. +- Warning logs are recorded as logs indicating where there is a breach of Subgraph logic to help patch the issue to ensure data accuracy. Subgraph Uncrashable can be run as an optional flag using the Graph CLI codegen command. @@ -26,4 +26,4 @@ Subgraph Uncrashable can be run as an optional flag using the Graph CLI codegen graph codegen -u [options] [] ``` -Visit the [subgraph uncrashable documentation](https://float-capital.github.io/float-subgraph-uncrashable/docs/) or watch this [video tutorial](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) to learn more and to get started with developing safer subgraphs. +Visit the [Subgraph uncrashable documentation](https://float-capital.github.io/float-subgraph-uncrashable/docs/) or watch this [video tutorial](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) to learn more and to get started with developing safer Subgraphs. From ca85c315a495534553ad81298b9cecabaa748c57 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:44 -0500 Subject: [PATCH 0703/1789] New translations subgraph-uncrashable.mdx (Portuguese) --- .../pt/subgraphs/cookbook/subgraph-uncrashable.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/pt/subgraphs/cookbook/subgraph-uncrashable.mdx b/website/src/pages/pt/subgraphs/cookbook/subgraph-uncrashable.mdx index 522740ee8246..b13496b1fba6 100644 --- a/website/src/pages/pt/subgraphs/cookbook/subgraph-uncrashable.mdx +++ b/website/src/pages/pt/subgraphs/cookbook/subgraph-uncrashable.mdx @@ -2,23 +2,23 @@ title: Gerador de Código Seguro para Subgraphs --- -O [Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) é uma ferramenta geradora de código, que gera um conjunto de funções de helper a partir do schema GraphQL de um projeto. Ela garante total segurança e consistência em todas as interações com entidades no seu subgraph. +[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) is a code generation tool that generates a set of helper functions from the graphql schema of a project. It ensures that all interactions with entities in your Subgraph are completely safe and consistent. ## Por que integrar com a Subgraph Uncrashable? -- **Atividade Contínua**. Entidades mal-cuidadas podem causar panes em subgraphs, o que pode ser perturbador para projetos dependentes no The Graph. Prepare funções de helper para deixar os seus subgraphs "impossíveis de travar" e garantir a continuidade dos negócios. +- **Continuous Uptime**. Mishandled entities may cause Subgraphs to crash, which can be disruptive for projects that are dependent on The Graph. Set up helper functions to make your Subgraphs “uncrashable” and ensure business continuity. -- **Totalmente Seguro**. Alguns dos problemas comuns vistos na programação de subgraphs são problemas de carregamento de entidades não definidas; o não-preparo ou inicialização de todos os valores de entidades; e condições de corrida sobre carregamento e salvamento de entidades. Garanta que todas as interações com entidades sejam completamente atômicas. +- **Completely Safe**. Common problems seen in Subgraph development are issues of loading undefined entities, not setting or initializing all values of entities, and race conditions on loading and saving entities. Ensure all interactions with entities are completely atomic. -- **Configurável pelo Utilizador**. Determine valores padrão e configure o nível necessário de verificações de segurança para o seu projeto. São gravados registros de aviso que indicam onde há uma brecha de lógica no subgraph, auxiliando o processo de solução de problemas e garantir a precisão dos dados. +- **User Configurable** Set default values and configure the level of security checks that suits your individual project's needs. Warning logs are recorded indicating where there is a breach of Subgraph logic to help patch the issue to ensure data accuracy. **Características Importantes** -- A ferramenta de geração de código acomoda **todos** os tipos de subgraph e pode ser configurada para que os utilizadores coloquem padrões razoáveis nos valores. A geração de código usará esta configuração para gerar funções de helper que combinem com a especificação do utilizador. +- The code generation tool accommodates **all** Subgraph types and is configurable for users to set sane defaults on values. The code generation will use this config to generate helper functions that are to the users specification. - A estrutura também inclui uma maneira (através do arquivo de configuração) de criar funções personalizadas, mas seguras, para configurar grupos de variáveis de entidade. Desta maneira, é impossível que o utilizador carregue/use uma entidade de graph obsoleta, e também é impossível esquecer de salvar ou determinar uma variável exigida pela função. -- Logs de aviso são registrados como logs que indicam onde há uma quebra de lógica no subgraph, para ajudar a consertar o problema e garantir a segurança dos dados. +- Warning logs are recorded as logs indicating where there is a breach of Subgraph logic to help patch the issue to ensure data accuracy. A Subgraph Uncrashable pode ser executada como flag opcional usando o comando codegen no Graph CLI. @@ -26,4 +26,4 @@ A Subgraph Uncrashable pode ser executada como flag opcional usando o comando co graph codegen -u [options] [] ``` -Visite a [documentação do Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/docs/) ou veja este [tutorial em vídeo](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) para aprender como programar subgraphs mais seguros. +Visit the [Subgraph uncrashable documentation](https://float-capital.github.io/float-subgraph-uncrashable/docs/) or watch this [video tutorial](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) to learn more and to get started with developing safer Subgraphs. From 53cfb419f0e068f62a41e9c314c9026f56811a27 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:45 -0500 Subject: [PATCH 0704/1789] New translations subgraph-uncrashable.mdx (Russian) --- .../ru/subgraphs/cookbook/subgraph-uncrashable.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/ru/subgraphs/cookbook/subgraph-uncrashable.mdx b/website/src/pages/ru/subgraphs/cookbook/subgraph-uncrashable.mdx index f81fe52608e8..67040c394cbd 100644 --- a/website/src/pages/ru/subgraphs/cookbook/subgraph-uncrashable.mdx +++ b/website/src/pages/ru/subgraphs/cookbook/subgraph-uncrashable.mdx @@ -2,23 +2,23 @@ title: Генератор кода безопасного субграфа --- -[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) — это инструмент для генерации кода, который создает набор вспомогательных функций из схемы GraphQL проекта. Он гарантирует, что все взаимодействия с объектами в Вашем субграфе будут полностью безопасными и последовательными. +[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) is a code generation tool that generates a set of helper functions from the graphql schema of a project. It ensures that all interactions with entities in your Subgraph are completely safe and consistent. ## Зачем интегрироваться с Subgraph Uncrashable? -- **Непрерывная работа**. Ошибки в обработке объектов могут привести к сбоям субграфов, что нарушит работу проектов, зависящих от The Graph. Настройте вспомогательные функции, чтобы Ваши субграфы оставались «непотопляемыми» и обеспечивали стабильную работу. +- **Continuous Uptime**. Mishandled entities may cause Subgraphs to crash, which can be disruptive for projects that are dependent on The Graph. Set up helper functions to make your Subgraphs “uncrashable” and ensure business continuity. -- **Полная безопасность**. Обычные проблемы при разработке субграфов — это ошибки загрузки неопределенных элементов, неинициализированные или неустановленные значения элементов, а также гонки при загрузке и сохранении элементов. Убедитесь, что все взаимодействия с объектами полностью атомарны. +- **Completely Safe**. Common problems seen in Subgraph development are issues of loading undefined entities, not setting or initializing all values of entities, and race conditions on loading and saving entities. Ensure all interactions with entities are completely atomic. -- **Настройка пользователем**. Установите значения по умолчанию и настройте уровень проверок безопасности, который соответствует потребностям Вашего индивидуального проекта. Записываются предупреждающие логи, указывающие на то, где произходит нарушение логики субграфа, что помогает исправить проблему и обеспечить точность данных. +- **User Configurable** Set default values and configure the level of security checks that suits your individual project's needs. Warning logs are recorded indicating where there is a breach of Subgraph logic to help patch the issue to ensure data accuracy. **Ключевые особенности** -- Инструмент генерации кода поддерживает **все** типы субграфов и настраивается таким образом, чтобы пользователи могли задать разумные значения по умолчанию. Генерация кода будет использовать эту настройку для создания вспомогательных функций в соответствии с требованиями пользователей. +- The code generation tool accommodates **all** Subgraph types and is configurable for users to set sane defaults on values. The code generation will use this config to generate helper functions that are to the users specification. - Фреймворк также включает в себя способ создания пользовательских, но безопасных функций установки для групп переменных объектов (через config-файл). Таким образом, пользователь не сможет загрузить/использовать устаревшую graph entity, и также не сможет забыть о сохранении или установке переменной, которая требуется функцией. -- Предупреждающие логи записываются как логи, указывающие на нарушение логики субграфа, чтобы помочь устранить проблему и обеспечить точность данных. +- Warning logs are recorded as logs indicating where there is a breach of Subgraph logic to help patch the issue to ensure data accuracy. Subgraph Uncrashable можно запустить как необязательный флаг с помощью команды Graph CLI codegen. @@ -26,4 +26,4 @@ Subgraph Uncrashable можно запустить как необязатель graph codegen -u [options] [] ``` -Ознакомьтесь с [документацией по subgraph uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/docs/) или посмотрите этот [видеоруководство](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial), чтобы узнать больше и начать разрабатывать более безопасные субграфы. +Visit the [Subgraph uncrashable documentation](https://float-capital.github.io/float-subgraph-uncrashable/docs/) or watch this [video tutorial](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) to learn more and to get started with developing safer Subgraphs. From 3adb39c18cfd8f00fa8ad56aca33d4432dc579b4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:46 -0500 Subject: [PATCH 0705/1789] New translations subgraph-uncrashable.mdx (Swedish) --- .../sv/subgraphs/cookbook/subgraph-uncrashable.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/sv/subgraphs/cookbook/subgraph-uncrashable.mdx b/website/src/pages/sv/subgraphs/cookbook/subgraph-uncrashable.mdx index ce8e87ecfd46..9b0652bf1a85 100644 --- a/website/src/pages/sv/subgraphs/cookbook/subgraph-uncrashable.mdx +++ b/website/src/pages/sv/subgraphs/cookbook/subgraph-uncrashable.mdx @@ -2,23 +2,23 @@ title: Säker subgraf kodgenerator --- -[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) is a code generation tool that generates a set of helper functions from the graphql schema of a project. It ensures that all interactions with entities in your subgraph are completely safe and consistent. +[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) is a code generation tool that generates a set of helper functions from the graphql schema of a project. It ensures that all interactions with entities in your Subgraph are completely safe and consistent. ## Varför integrera med Subgraf Uncrashable? -- **Continuous Uptime**. Mishandled entities may cause subgraphs to crash, which can be disruptive for projects that are dependent on The Graph. Set up helper functions to make your subgraphs “uncrashable” and ensure business continuity. +- **Continuous Uptime**. Mishandled entities may cause Subgraphs to crash, which can be disruptive for projects that are dependent on The Graph. Set up helper functions to make your Subgraphs “uncrashable” and ensure business continuity. -- **Completely Safe**. Common problems seen in subgraph development are issues of loading undefined entities, not setting or initializing all values of entities, and race conditions on loading and saving entities. Ensure all interactions with entities are completely atomic. +- **Completely Safe**. Common problems seen in Subgraph development are issues of loading undefined entities, not setting or initializing all values of entities, and race conditions on loading and saving entities. Ensure all interactions with entities are completely atomic. -- **User Configurable** Set default values and configure the level of security checks that suits your individual project's needs. Warning logs are recorded indicating where there is a breach of subgraph logic to help patch the issue to ensure data accuracy. +- **User Configurable** Set default values and configure the level of security checks that suits your individual project's needs. Warning logs are recorded indicating where there is a breach of Subgraph logic to help patch the issue to ensure data accuracy. **Key Features** -- The code generation tool accommodates **all** subgraph types and is configurable for users to set sane defaults on values. The code generation will use this config to generate helper functions that are to the users specification. +- The code generation tool accommodates **all** Subgraph types and is configurable for users to set sane defaults on values. The code generation will use this config to generate helper functions that are to the users specification. - Ramverket innehåller också ett sätt (via konfigurationsfilen) att skapa anpassade, men säkra, sätterfunktioner för grupper av entitetsvariabler. På så sätt är det omöjligt för användaren att ladda/använda en inaktuell grafenhet och det är också omöjligt att glömma att spara eller ställa in en variabel som krävs av funktionen. -- Warning logs are recorded as logs indicating where there is a breach of subgraph logic to help patch the issue to ensure data accuracy. +- Warning logs are recorded as logs indicating where there is a breach of Subgraph logic to help patch the issue to ensure data accuracy. Subgraph Uncrashable kan köras som en valfri flagga med kommandot Graph CLI codegen. @@ -26,4 +26,4 @@ Subgraph Uncrashable kan köras som en valfri flagga med kommandot Graph CLI cod graph codegen -u [options] [] ``` -Visit the [subgraph uncrashable documentation](https://float-capital.github.io/float-subgraph-uncrashable/docs/) or watch this [video tutorial](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) to learn more and to get started with developing safer subgraphs. +Visit the [Subgraph uncrashable documentation](https://float-capital.github.io/float-subgraph-uncrashable/docs/) or watch this [video tutorial](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) to learn more and to get started with developing safer Subgraphs. From 1b98e27b050821b69c46c36a9fd73a90a2ca67b5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:47 -0500 Subject: [PATCH 0706/1789] New translations subgraph-uncrashable.mdx (Turkish) --- .../tr/subgraphs/cookbook/subgraph-uncrashable.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/tr/subgraphs/cookbook/subgraph-uncrashable.mdx b/website/src/pages/tr/subgraphs/cookbook/subgraph-uncrashable.mdx index bd935dd33689..ba461201d71f 100644 --- a/website/src/pages/tr/subgraphs/cookbook/subgraph-uncrashable.mdx +++ b/website/src/pages/tr/subgraphs/cookbook/subgraph-uncrashable.mdx @@ -2,23 +2,23 @@ title: Güvenli Subgraph Kod Oluşturucu --- -[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) (çökmeyen subgraph), bir projenin graphql şemasından bir dizi yardımcı fonksiyon üreten bir kod oluşturma aracıdır. `subgraph`inizdeki varlıklarla tüm etkileşimlerin tamamen güvenli ve tutarlı olmasını sağlar. +[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) is a code generation tool that generates a set of helper functions from the graphql schema of a project. It ensures that all interactions with entities in your Subgraph are completely safe and consistent. ## Neden Subgraph Uncrashable'ı entegre etmelisiniz? -- **Sürekli Çalışırlık**. Yanlış yönetilen varlıklar subgraph'lerin çökmesine neden olabilir. Bu da The Graph'e bağımlı olan projeler için işleri aksatabilir. Subgraph'lerinizi "çökmez" hale getirmek ve iş sürekliliğini sağlamak için yardımcı fonksiyonları ayarlayın. +- **Continuous Uptime**. Mishandled entities may cause Subgraphs to crash, which can be disruptive for projects that are dependent on The Graph. Set up helper functions to make your Subgraphs “uncrashable” and ensure business continuity. -- **Tamamen Güvenli**. `subgraph` geliştirme sırasında yaygın görülen sorunlar, tanımsız varlıkların yüklenmesi, varlıkların tüm değerlerinin ayarlanmaması veya ilklendirilmemesi, varlıkların yüklenmemesi ve kaydedilmemesidir. Varlıklarla tüm etkileşimlerin çok az olduğundan emin olun. +- **Completely Safe**. Common problems seen in Subgraph development are issues of loading undefined entities, not setting or initializing all values of entities, and race conditions on loading and saving entities. Ensure all interactions with entities are completely atomic. -- **Kullanıcı Tarafından Yapılandırılabilir** Varsayılan değerleri ayarlayın ve projenizin ihtiyaçlarına uygun güvenlik kontrol seviyesini yapılandırın. `Subgraph` mantığındaki bir ihlali gösteren uyarı günlükleri kaydedilir ve verilerin doğruluğunu sağlamak için sorunun giderilmesine yardımcı olunur. +- **User Configurable** Set default values and configure the level of security checks that suits your individual project's needs. Warning logs are recorded indicating where there is a breach of Subgraph logic to help patch the issue to ensure data accuracy. **Ana Özellikler** -- Kod oluşturma aracı, **tüm** subgraph türlerini destekler ve kullanıcıların değerlerde makul varsayılanlar ayarlamaları için yapılandırılabilir. Kod oluşturma, kullanıcıların belirtimine uygun yardımcı fonksiyonlar oluşturmak için bu yapılandırmayı kullanacaktır. +- The code generation tool accommodates **all** Subgraph types and is configurable for users to set sane defaults on values. The code generation will use this config to generate helper functions that are to the users specification. - Framework ayrıca unsur değişkenleri grupları için özel, ancak güvenli ayarlayıcı fonksiyonları oluşturmanın bir yolunu (yapılandırma dosyası aracılığıyla) içerir. Bu sayede, kullanıcının eski bir graph unsurunu yüklemesi/kullanması ve ayrıca fonksiyonun gerektirdiği bir değişkeni kaydetmeyi veya ayarlamayı unutması imkansız hale gelir. -- Uyarı günlükleri, subgraph mantığında bir ihlalin nerede olduğunu gösteren günlükler olarak kaydedilir ve veri doğruluğunu sağlamak için sorunun düzeltilmesine yardımcı olur. +- Warning logs are recorded as logs indicating where there is a breach of Subgraph logic to help patch the issue to ensure data accuracy. Subgraph Uncrashable, Graph CLI codegen komutu kullanılarak isteğe bağlı bir bayrak olarak çalıştırılabilir. @@ -26,4 +26,4 @@ Subgraph Uncrashable, Graph CLI codegen komutu kullanılarak isteğe bağlı bir graph codegen -u [options] [] ``` -Daha fazla bilgi edinmek ve daha güvenli subgraph'ler geliştirmeye başlamak için [Subgraph Uncrashable dokümantasyonuna](https://float-capital.github.io/float-subgraph-uncrashable/docs/) göz atın veya bu [video rehberini](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) izleyin. +Visit the [Subgraph uncrashable documentation](https://float-capital.github.io/float-subgraph-uncrashable/docs/) or watch this [video tutorial](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) to learn more and to get started with developing safer Subgraphs. From ced028ee36fad094cb729d575ce41731b245134a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:48 -0500 Subject: [PATCH 0707/1789] New translations subgraph-uncrashable.mdx (Ukrainian) --- .../uk/subgraphs/cookbook/subgraph-uncrashable.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/uk/subgraphs/cookbook/subgraph-uncrashable.mdx b/website/src/pages/uk/subgraphs/cookbook/subgraph-uncrashable.mdx index 0cc91a0fa2c3..a08e2a7ad8c9 100644 --- a/website/src/pages/uk/subgraphs/cookbook/subgraph-uncrashable.mdx +++ b/website/src/pages/uk/subgraphs/cookbook/subgraph-uncrashable.mdx @@ -2,23 +2,23 @@ title: Safe Subgraph Code Generator --- -[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) is a code generation tool that generates a set of helper functions from the graphql schema of a project. It ensures that all interactions with entities in your subgraph are completely safe and consistent. +[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) is a code generation tool that generates a set of helper functions from the graphql schema of a project. It ensures that all interactions with entities in your Subgraph are completely safe and consistent. ## Why integrate with Subgraph Uncrashable? -- **Continuous Uptime**. Mishandled entities may cause subgraphs to crash, which can be disruptive for projects that are dependent on The Graph. Set up helper functions to make your subgraphs “uncrashable” and ensure business continuity. +- **Continuous Uptime**. Mishandled entities may cause Subgraphs to crash, which can be disruptive for projects that are dependent on The Graph. Set up helper functions to make your Subgraphs “uncrashable” and ensure business continuity. -- **Completely Safe**. Common problems seen in subgraph development are issues of loading undefined entities, not setting or initializing all values of entities, and race conditions on loading and saving entities. Ensure all interactions with entities are completely atomic. +- **Completely Safe**. Common problems seen in Subgraph development are issues of loading undefined entities, not setting or initializing all values of entities, and race conditions on loading and saving entities. Ensure all interactions with entities are completely atomic. -- **User Configurable** Set default values and configure the level of security checks that suits your individual project's needs. Warning logs are recorded indicating where there is a breach of subgraph logic to help patch the issue to ensure data accuracy. +- **User Configurable** Set default values and configure the level of security checks that suits your individual project's needs. Warning logs are recorded indicating where there is a breach of Subgraph logic to help patch the issue to ensure data accuracy. **Key Features** -- The code generation tool accommodates **all** subgraph types and is configurable for users to set sane defaults on values. The code generation will use this config to generate helper functions that are to the users specification. +- The code generation tool accommodates **all** Subgraph types and is configurable for users to set sane defaults on values. The code generation will use this config to generate helper functions that are to the users specification. - The framework also includes a way (via the config file) to create custom, but safe, setter functions for groups of entity variables. This way it is impossible for the user to load/use a stale graph entity and it is also impossible to forget to save or set a variable that is required by the function. -- Warning logs are recorded as logs indicating where there is a breach of subgraph logic to help patch the issue to ensure data accuracy. +- Warning logs are recorded as logs indicating where there is a breach of Subgraph logic to help patch the issue to ensure data accuracy. Subgraph Uncrashable can be run as an optional flag using the Graph CLI codegen command. @@ -26,4 +26,4 @@ Subgraph Uncrashable can be run as an optional flag using the Graph CLI codegen graph codegen -u [options] [] ``` -Visit the [subgraph uncrashable documentation](https://float-capital.github.io/float-subgraph-uncrashable/docs/) or watch this [video tutorial](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) to learn more and to get started with developing safer subgraphs. +Visit the [Subgraph uncrashable documentation](https://float-capital.github.io/float-subgraph-uncrashable/docs/) or watch this [video tutorial](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) to learn more and to get started with developing safer Subgraphs. From ad65b8712c55f943a691336c8092ffdd503394cb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:49 -0500 Subject: [PATCH 0708/1789] New translations subgraph-uncrashable.mdx (Chinese Simplified) --- .../zh/subgraphs/cookbook/subgraph-uncrashable.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/zh/subgraphs/cookbook/subgraph-uncrashable.mdx b/website/src/pages/zh/subgraphs/cookbook/subgraph-uncrashable.mdx index 959ec7b532cc..c80db1886eee 100644 --- a/website/src/pages/zh/subgraphs/cookbook/subgraph-uncrashable.mdx +++ b/website/src/pages/zh/subgraphs/cookbook/subgraph-uncrashable.mdx @@ -2,23 +2,23 @@ title: 安全子图代码生成器 --- -[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) is a code generation tool that generates a set of helper functions from the graphql schema of a project. It ensures that all interactions with entities in your subgraph are completely safe and consistent. +[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) is a code generation tool that generates a set of helper functions from the graphql schema of a project. It ensures that all interactions with entities in your Subgraph are completely safe and consistent. ## 为什么要整合子图使其不崩溃? -- **Continuous Uptime**. Mishandled entities may cause subgraphs to crash, which can be disruptive for projects that are dependent on The Graph. Set up helper functions to make your subgraphs “uncrashable” and ensure business continuity. +- **Continuous Uptime**. Mishandled entities may cause Subgraphs to crash, which can be disruptive for projects that are dependent on The Graph. Set up helper functions to make your Subgraphs “uncrashable” and ensure business continuity. -- **Completely Safe**. Common problems seen in subgraph development are issues of loading undefined entities, not setting or initializing all values of entities, and race conditions on loading and saving entities. Ensure all interactions with entities are completely atomic. +- **Completely Safe**. Common problems seen in Subgraph development are issues of loading undefined entities, not setting or initializing all values of entities, and race conditions on loading and saving entities. Ensure all interactions with entities are completely atomic. -- **User Configurable** Set default values and configure the level of security checks that suits your individual project's needs. Warning logs are recorded indicating where there is a breach of subgraph logic to help patch the issue to ensure data accuracy. +- **User Configurable** Set default values and configure the level of security checks that suits your individual project's needs. Warning logs are recorded indicating where there is a breach of Subgraph logic to help patch the issue to ensure data accuracy. **Key Features** -- The code generation tool accommodates **all** subgraph types and is configurable for users to set sane defaults on values. The code generation will use this config to generate helper functions that are to the users specification. +- The code generation tool accommodates **all** Subgraph types and is configurable for users to set sane defaults on values. The code generation will use this config to generate helper functions that are to the users specification. - 该框架还包括一种方法(通过配置文件) 为实体变量组创建自定义但安全的 setter 函数。这样,用户就不可能加载/使用过时的图形实体,也不可能忘记保存或设置函数所需的变量。 -- Warning logs are recorded as logs indicating where there is a breach of subgraph logic to help patch the issue to ensure data accuracy. +- Warning logs are recorded as logs indicating where there is a breach of Subgraph logic to help patch the issue to ensure data accuracy. 使用 Graph CLI codegen 命令,Subgraph Uncrashable 可以作为一个可选标志运行。 @@ -26,4 +26,4 @@ title: 安全子图代码生成器 graph codegen -u [options] [] ``` -Visit the [subgraph uncrashable documentation](https://float-capital.github.io/float-subgraph-uncrashable/docs/) or watch this [video tutorial](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) to learn more and to get started with developing safer subgraphs. +Visit the [Subgraph uncrashable documentation](https://float-capital.github.io/float-subgraph-uncrashable/docs/) or watch this [video tutorial](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) to learn more and to get started with developing safer Subgraphs. From 07e24df847d563923ea3b3d584c8bd9e6e6e428b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:50 -0500 Subject: [PATCH 0709/1789] New translations subgraph-uncrashable.mdx (Urdu (Pakistan)) --- .../ur/subgraphs/cookbook/subgraph-uncrashable.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/ur/subgraphs/cookbook/subgraph-uncrashable.mdx b/website/src/pages/ur/subgraphs/cookbook/subgraph-uncrashable.mdx index 1ce722e5639d..f75738833744 100644 --- a/website/src/pages/ur/subgraphs/cookbook/subgraph-uncrashable.mdx +++ b/website/src/pages/ur/subgraphs/cookbook/subgraph-uncrashable.mdx @@ -2,23 +2,23 @@ title: محفوظ سب گراف کوڈ جنریٹر --- -[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) is a code generation tool that generates a set of helper functions from the graphql schema of a project. It ensures that all interactions with entities in your subgraph are completely safe and consistent. +[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) is a code generation tool that generates a set of helper functions from the graphql schema of a project. It ensures that all interactions with entities in your Subgraph are completely safe and consistent. ## سب گراف ان کریش ایبل کے ساتھ کیوں ضم کیا جائے؟ -- **Continuous Uptime**. Mishandled entities may cause subgraphs to crash, which can be disruptive for projects that are dependent on The Graph. Set up helper functions to make your subgraphs “uncrashable” and ensure business continuity. +- **Continuous Uptime**. Mishandled entities may cause Subgraphs to crash, which can be disruptive for projects that are dependent on The Graph. Set up helper functions to make your Subgraphs “uncrashable” and ensure business continuity. -- **Completely Safe**. Common problems seen in subgraph development are issues of loading undefined entities, not setting or initializing all values of entities, and race conditions on loading and saving entities. Ensure all interactions with entities are completely atomic. +- **Completely Safe**. Common problems seen in Subgraph development are issues of loading undefined entities, not setting or initializing all values of entities, and race conditions on loading and saving entities. Ensure all interactions with entities are completely atomic. -- **User Configurable** Set default values and configure the level of security checks that suits your individual project's needs. Warning logs are recorded indicating where there is a breach of subgraph logic to help patch the issue to ensure data accuracy. +- **User Configurable** Set default values and configure the level of security checks that suits your individual project's needs. Warning logs are recorded indicating where there is a breach of Subgraph logic to help patch the issue to ensure data accuracy. **Key Features** -- The code generation tool accommodates **all** subgraph types and is configurable for users to set sane defaults on values. The code generation will use this config to generate helper functions that are to the users specification. +- The code generation tool accommodates **all** Subgraph types and is configurable for users to set sane defaults on values. The code generation will use this config to generate helper functions that are to the users specification. - فریم ورک میں ہستی متغیرات کے گروپس کے لیے حسب ضرورت، لیکن محفوظ، سیٹر فنکشنز بنانے کا ایک طریقہ (کنفگ فائل کے ذریعے) بھی شامل ہے۔ اس طرح صارف کے لیے کسی باسی گراف ہستی کو لوڈ/استعمال کرنا ناممکن ہے اور فنکشن کے لیے مطلوبہ متغیر کو محفوظ کرنا یا سیٹ کرنا بھولنا بھی ناممکن ہے. -- Warning logs are recorded as logs indicating where there is a breach of subgraph logic to help patch the issue to ensure data accuracy. +- Warning logs are recorded as logs indicating where there is a breach of Subgraph logic to help patch the issue to ensure data accuracy. گراف CLI کوڈجن کمانڈ کا استعمال کرتے ہوئے سب گراف ان کریش ایبل کو اختیاری پرچم کے طور پر چلایا جا سکتا ہے. @@ -26,4 +26,4 @@ title: محفوظ سب گراف کوڈ جنریٹر graph codegen -u [options] [] ``` -Visit the [subgraph uncrashable documentation](https://float-capital.github.io/float-subgraph-uncrashable/docs/) or watch this [video tutorial](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) to learn more and to get started with developing safer subgraphs. +Visit the [Subgraph uncrashable documentation](https://float-capital.github.io/float-subgraph-uncrashable/docs/) or watch this [video tutorial](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) to learn more and to get started with developing safer Subgraphs. From 5671cd8b272fc13df50fb6c93ba7995362a41fa1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:51 -0500 Subject: [PATCH 0710/1789] New translations subgraph-uncrashable.mdx (Vietnamese) --- .../vi/subgraphs/cookbook/subgraph-uncrashable.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/vi/subgraphs/cookbook/subgraph-uncrashable.mdx b/website/src/pages/vi/subgraphs/cookbook/subgraph-uncrashable.mdx index 0cc91a0fa2c3..a08e2a7ad8c9 100644 --- a/website/src/pages/vi/subgraphs/cookbook/subgraph-uncrashable.mdx +++ b/website/src/pages/vi/subgraphs/cookbook/subgraph-uncrashable.mdx @@ -2,23 +2,23 @@ title: Safe Subgraph Code Generator --- -[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) is a code generation tool that generates a set of helper functions from the graphql schema of a project. It ensures that all interactions with entities in your subgraph are completely safe and consistent. +[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) is a code generation tool that generates a set of helper functions from the graphql schema of a project. It ensures that all interactions with entities in your Subgraph are completely safe and consistent. ## Why integrate with Subgraph Uncrashable? -- **Continuous Uptime**. Mishandled entities may cause subgraphs to crash, which can be disruptive for projects that are dependent on The Graph. Set up helper functions to make your subgraphs “uncrashable” and ensure business continuity. +- **Continuous Uptime**. Mishandled entities may cause Subgraphs to crash, which can be disruptive for projects that are dependent on The Graph. Set up helper functions to make your Subgraphs “uncrashable” and ensure business continuity. -- **Completely Safe**. Common problems seen in subgraph development are issues of loading undefined entities, not setting or initializing all values of entities, and race conditions on loading and saving entities. Ensure all interactions with entities are completely atomic. +- **Completely Safe**. Common problems seen in Subgraph development are issues of loading undefined entities, not setting or initializing all values of entities, and race conditions on loading and saving entities. Ensure all interactions with entities are completely atomic. -- **User Configurable** Set default values and configure the level of security checks that suits your individual project's needs. Warning logs are recorded indicating where there is a breach of subgraph logic to help patch the issue to ensure data accuracy. +- **User Configurable** Set default values and configure the level of security checks that suits your individual project's needs. Warning logs are recorded indicating where there is a breach of Subgraph logic to help patch the issue to ensure data accuracy. **Key Features** -- The code generation tool accommodates **all** subgraph types and is configurable for users to set sane defaults on values. The code generation will use this config to generate helper functions that are to the users specification. +- The code generation tool accommodates **all** Subgraph types and is configurable for users to set sane defaults on values. The code generation will use this config to generate helper functions that are to the users specification. - The framework also includes a way (via the config file) to create custom, but safe, setter functions for groups of entity variables. This way it is impossible for the user to load/use a stale graph entity and it is also impossible to forget to save or set a variable that is required by the function. -- Warning logs are recorded as logs indicating where there is a breach of subgraph logic to help patch the issue to ensure data accuracy. +- Warning logs are recorded as logs indicating where there is a breach of Subgraph logic to help patch the issue to ensure data accuracy. Subgraph Uncrashable can be run as an optional flag using the Graph CLI codegen command. @@ -26,4 +26,4 @@ Subgraph Uncrashable can be run as an optional flag using the Graph CLI codegen graph codegen -u [options] [] ``` -Visit the [subgraph uncrashable documentation](https://float-capital.github.io/float-subgraph-uncrashable/docs/) or watch this [video tutorial](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) to learn more and to get started with developing safer subgraphs. +Visit the [Subgraph uncrashable documentation](https://float-capital.github.io/float-subgraph-uncrashable/docs/) or watch this [video tutorial](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) to learn more and to get started with developing safer Subgraphs. From 6a696a0e9ed65f464b16d578868bd78329a1566b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:52 -0500 Subject: [PATCH 0711/1789] New translations subgraph-uncrashable.mdx (Marathi) --- .../mr/subgraphs/cookbook/subgraph-uncrashable.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/mr/subgraphs/cookbook/subgraph-uncrashable.mdx b/website/src/pages/mr/subgraphs/cookbook/subgraph-uncrashable.mdx index 9a7e3d9f008e..55cf87cd0af1 100644 --- a/website/src/pages/mr/subgraphs/cookbook/subgraph-uncrashable.mdx +++ b/website/src/pages/mr/subgraphs/cookbook/subgraph-uncrashable.mdx @@ -2,23 +2,23 @@ title: सुरक्षित सबग्राफ कोड जनरेटर --- -[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) is a code generation tool that generates a set of helper functions from the graphql schema of a project. It ensures that all interactions with entities in your subgraph are completely safe and consistent. +[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) is a code generation tool that generates a set of helper functions from the graphql schema of a project. It ensures that all interactions with entities in your Subgraph are completely safe and consistent. ## Subgraph Uncrashable सह समाकलित का? -- **Continuous Uptime**. Mishandled entities may cause subgraphs to crash, which can be disruptive for projects that are dependent on The Graph. Set up helper functions to make your subgraphs “uncrashable” and ensure business continuity. +- **Continuous Uptime**. Mishandled entities may cause Subgraphs to crash, which can be disruptive for projects that are dependent on The Graph. Set up helper functions to make your Subgraphs “uncrashable” and ensure business continuity. -- **Completely Safe**. Common problems seen in subgraph development are issues of loading undefined entities, not setting or initializing all values of entities, and race conditions on loading and saving entities. Ensure all interactions with entities are completely atomic. +- **Completely Safe**. Common problems seen in Subgraph development are issues of loading undefined entities, not setting or initializing all values of entities, and race conditions on loading and saving entities. Ensure all interactions with entities are completely atomic. -- **User Configurable** Set default values and configure the level of security checks that suits your individual project's needs. Warning logs are recorded indicating where there is a breach of subgraph logic to help patch the issue to ensure data accuracy. +- **User Configurable** Set default values and configure the level of security checks that suits your individual project's needs. Warning logs are recorded indicating where there is a breach of Subgraph logic to help patch the issue to ensure data accuracy. **Key Features** -- The code generation tool accommodates **all** subgraph types and is configurable for users to set sane defaults on values. The code generation will use this config to generate helper functions that are to the users specification. +- The code generation tool accommodates **all** Subgraph types and is configurable for users to set sane defaults on values. The code generation will use this config to generate helper functions that are to the users specification. - फ्रेमवर्कमध्ये एंटिटी व्हेरिएबल्सच्या गटांसाठी सानुकूल, परंतु सुरक्षित, सेटर फंक्शन्स तयार करण्याचा मार्ग (कॉन्फिग फाइलद्वारे) देखील समाविष्ट आहे. अशा प्रकारे वापरकर्त्याला जुना आलेख घटक लोड करणे/वापरणे अशक्य आहे आणि फंक्शनसाठी आवश्यक असलेले व्हेरिएबल सेव्ह करणे किंवा सेट करणे विसरणे देखील अशक्य आहे. -- Warning logs are recorded as logs indicating where there is a breach of subgraph logic to help patch the issue to ensure data accuracy. +- Warning logs are recorded as logs indicating where there is a breach of Subgraph logic to help patch the issue to ensure data accuracy. ग्राफ CLI codegen कमांड वापरून Subgraph Uncrashable हा पर्यायी ध्वज म्हणून चालवला जाऊ शकतो. @@ -26,4 +26,4 @@ title: सुरक्षित सबग्राफ कोड जनरेट graph codegen -u [options] [] ``` -Visit the [subgraph uncrashable documentation](https://float-capital.github.io/float-subgraph-uncrashable/docs/) or watch this [video tutorial](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) to learn more and to get started with developing safer subgraphs. +Visit the [Subgraph uncrashable documentation](https://float-capital.github.io/float-subgraph-uncrashable/docs/) or watch this [video tutorial](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) to learn more and to get started with developing safer Subgraphs. From 595448609cdd3c8d640924736651b09135e28d0e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:53 -0500 Subject: [PATCH 0712/1789] New translations subgraph-uncrashable.mdx (Hindi) --- .../hi/subgraphs/cookbook/subgraph-uncrashable.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/hi/subgraphs/cookbook/subgraph-uncrashable.mdx b/website/src/pages/hi/subgraphs/cookbook/subgraph-uncrashable.mdx index ace90495aef8..f53c976a796b 100644 --- a/website/src/pages/hi/subgraphs/cookbook/subgraph-uncrashable.mdx +++ b/website/src/pages/hi/subgraphs/cookbook/subgraph-uncrashable.mdx @@ -2,23 +2,23 @@ title: सुरक्षित सबग्राफ कोड जेनरेटर --- -[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) is a code generation tool that generates a set of helper functions from the graphql schema of a project. It ensures that all interactions with entities in your subgraph are completely safe and consistent. +[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) is a code generation tool that generates a set of helper functions from the graphql schema of a project. It ensures that all interactions with entities in your Subgraph are completely safe and consistent. ## सबग्राफ अनक्रैशेबल के साथ एकीकृत क्यों करें? -- **Continuous Uptime**. Mishandled entities may cause subgraphs to crash, which can be disruptive for projects that are dependent on The Graph. Set up helper functions to make your subgraphs “uncrashable” and ensure business continuity. +- **Continuous Uptime**. Mishandled entities may cause Subgraphs to crash, which can be disruptive for projects that are dependent on The Graph. Set up helper functions to make your Subgraphs “uncrashable” and ensure business continuity. -- **Completely Safe**. Common problems seen in subgraph development are issues of loading undefined entities, not setting or initializing all values of entities, and race conditions on loading and saving entities. Ensure all interactions with entities are completely atomic. +- **Completely Safe**. Common problems seen in Subgraph development are issues of loading undefined entities, not setting or initializing all values of entities, and race conditions on loading and saving entities. Ensure all interactions with entities are completely atomic. -- **User Configurable** Set default values and configure the level of security checks that suits your individual project's needs. Warning logs are recorded indicating where there is a breach of subgraph logic to help patch the issue to ensure data accuracy. +- **User Configurable** Set default values and configure the level of security checks that suits your individual project's needs. Warning logs are recorded indicating where there is a breach of Subgraph logic to help patch the issue to ensure data accuracy. **Key Features** -- The code generation tool accommodates **all** subgraph types and is configurable for users to set sane defaults on values. The code generation will use this config to generate helper functions that are to the users specification. +- The code generation tool accommodates **all** Subgraph types and is configurable for users to set sane defaults on values. The code generation will use this config to generate helper functions that are to the users specification. - फ्रेमवर्क में इकाई वैरिएबल के समूहों के लिए कस्टम, लेकिन सुरक्षित, सेटर फ़ंक्शन बनाने का एक तरीका (कॉन्फिग फ़ाइल के माध्यम से) भी शामिल है। इस तरह उपयोगकर्ता के लिए एक पुरानी ग्राफ़ इकाई को लोड/उपयोग करना असंभव है और फ़ंक्शन द्वारा आवश्यक वैरिएबल को सहेजना या सेट करना भूलना भी असंभव है। -- चेतावनी लॉग को लॉग के रूप में रिकॉर्ड किया जाता है, जो यह इंगित करता है कि Subgraph लॉजिक में कहां उल्लंघन हो रहा है, ताकि समस्या को ठीक किया जा सके और डेटा की सटीकता सुनिश्चित हो सके। +- Warning logs are recorded as logs indicating where there is a breach of Subgraph logic to help patch the issue to ensure data accuracy. सबग्राफ अनक्रैशेबल को ग्राफ़ CLI codegen कमांड का उपयोग करके एक वैकल्पिक फ़्लैग के रूप में चलाया जा सकता है। @@ -26,4 +26,4 @@ title: सुरक्षित सबग्राफ कोड जेनरे graph codegen -u [options] [] ``` -Visit the [subgraph uncrashable documentation](https://float-capital.github.io/float-subgraph-uncrashable/docs/) or watch this [video tutorial](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) to learn more and to get started with developing safer subgraphs. +Visit the [Subgraph uncrashable documentation](https://float-capital.github.io/float-subgraph-uncrashable/docs/) or watch this [video tutorial](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) to learn more and to get started with developing safer Subgraphs. From 210c641ee05717f27a0b9320931ada8d70425e34 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:54 -0500 Subject: [PATCH 0713/1789] New translations subgraph-uncrashable.mdx (Swahili) --- .../cookbook/subgraph-uncrashable.mdx | 29 +++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 website/src/pages/sw/subgraphs/cookbook/subgraph-uncrashable.mdx diff --git a/website/src/pages/sw/subgraphs/cookbook/subgraph-uncrashable.mdx b/website/src/pages/sw/subgraphs/cookbook/subgraph-uncrashable.mdx new file mode 100644 index 000000000000..a08e2a7ad8c9 --- /dev/null +++ b/website/src/pages/sw/subgraphs/cookbook/subgraph-uncrashable.mdx @@ -0,0 +1,29 @@ +--- +title: Safe Subgraph Code Generator +--- + +[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) is a code generation tool that generates a set of helper functions from the graphql schema of a project. It ensures that all interactions with entities in your Subgraph are completely safe and consistent. + +## Why integrate with Subgraph Uncrashable? + +- **Continuous Uptime**. Mishandled entities may cause Subgraphs to crash, which can be disruptive for projects that are dependent on The Graph. Set up helper functions to make your Subgraphs “uncrashable” and ensure business continuity. + +- **Completely Safe**. Common problems seen in Subgraph development are issues of loading undefined entities, not setting or initializing all values of entities, and race conditions on loading and saving entities. Ensure all interactions with entities are completely atomic. + +- **User Configurable** Set default values and configure the level of security checks that suits your individual project's needs. Warning logs are recorded indicating where there is a breach of Subgraph logic to help patch the issue to ensure data accuracy. + +**Key Features** + +- The code generation tool accommodates **all** Subgraph types and is configurable for users to set sane defaults on values. The code generation will use this config to generate helper functions that are to the users specification. + +- The framework also includes a way (via the config file) to create custom, but safe, setter functions for groups of entity variables. This way it is impossible for the user to load/use a stale graph entity and it is also impossible to forget to save or set a variable that is required by the function. + +- Warning logs are recorded as logs indicating where there is a breach of Subgraph logic to help patch the issue to ensure data accuracy. + +Subgraph Uncrashable can be run as an optional flag using the Graph CLI codegen command. + +```sh +graph codegen -u [options] [] +``` + +Visit the [Subgraph uncrashable documentation](https://float-capital.github.io/float-subgraph-uncrashable/docs/) or watch this [video tutorial](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) to learn more and to get started with developing safer Subgraphs. From 8935aff3e277b27d20af2e8b95c81c74ea899504 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:55 -0500 Subject: [PATCH 0714/1789] New translations transfer-to-the-graph.mdx (Romanian) --- .../cookbook/transfer-to-the-graph.mdx | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/website/src/pages/ro/subgraphs/cookbook/transfer-to-the-graph.mdx b/website/src/pages/ro/subgraphs/cookbook/transfer-to-the-graph.mdx index 194deb018404..0ac02c8234a4 100644 --- a/website/src/pages/ro/subgraphs/cookbook/transfer-to-the-graph.mdx +++ b/website/src/pages/ro/subgraphs/cookbook/transfer-to-the-graph.mdx @@ -2,13 +2,13 @@ title: Tranfer to The Graph --- -Quickly upgrade your subgraphs from any platform to [The Graph's decentralized network](https://thegraph.com/networks/). +Quickly upgrade your Subgraphs from any platform to [The Graph's decentralized network](https://thegraph.com/networks/). ## Benefits of Switching to The Graph -- Use the same subgraph that your apps already use with zero-downtime migration. +- Use the same Subgraph that your apps already use with zero-downtime migration. - Increase reliability from a global network supported by 100+ Indexers. -- Receive lightning-fast support for subgraphs 24/7, with an on-call engineering team. +- Receive lightning-fast support for Subgraphs 24/7, with an on-call engineering team. ## Upgrade Your Subgraph to The Graph in 3 Easy Steps @@ -21,9 +21,9 @@ Quickly upgrade your subgraphs from any platform to [The Graph's decentralized n ### Create a Subgraph in Subgraph Studio - Go to [Subgraph Studio](https://thegraph.com/studio/) and connect your wallet. -- Click "Create a Subgraph". It is recommended to name the subgraph in Title Case: "Subgraph Name Chain Name". +- Click "Create a Subgraph". It is recommended to name the Subgraph in Title Case: "Subgraph Name Chain Name". -> Note: After publishing, the subgraph name will be editable but requires onchain action each time, so name it properly. +> Note: After publishing, the Subgraph name will be editable but requires onchain action each time, so name it properly. ### Install the Graph CLI⁠ @@ -37,7 +37,7 @@ Using [npm](https://www.npmjs.com/): npm install -g @graphprotocol/graph-cli@latest ``` -Use the following command to create a subgraph in Studio using the CLI: +Use the following command to create a Subgraph in Studio using the CLI: ```sh graph init --product subgraph-studio @@ -53,7 +53,7 @@ graph auth ## 2. Deploy Your Subgraph to Studio -If you have your source code, you can easily deploy it to Studio. If you don't have it, here's a quick way to deploy your subgraph. +If you have your source code, you can easily deploy it to Studio. If you don't have it, here's a quick way to deploy your Subgraph. In The Graph CLI, run the following command: @@ -62,7 +62,7 @@ graph deploy --ipfs-hash ``` -> **Note:** Every subgraph has an IPFS hash (Deployment ID), which looks like this: "Qmasdfad...". To deploy simply use this **IPFS hash**. You’ll be prompted to enter a version (e.g., v0.0.1). +> **Note:** Every Subgraph has an IPFS hash (Deployment ID), which looks like this: "Qmasdfad...". To deploy simply use this **IPFS hash**. You’ll be prompted to enter a version (e.g., v0.0.1). ## 3. Publish Your Subgraph to The Graph Network @@ -70,17 +70,17 @@ graph deploy --ipfs-hash ### Query Your Subgraph -> To attract about 3 indexers to query your subgraph, it’s recommended to curate at least 3,000 GRT. To learn more about curating, check out [Curating](/resources/roles/curating/) on The Graph. +> To attract about 3 indexers to query your Subgraph, it’s recommended to curate at least 3,000 GRT. To learn more about curating, check out [Curating](/resources/roles/curating/) on The Graph. -You can start [querying](/subgraphs/querying/introduction/) any subgraph by sending a GraphQL query into the subgraph’s query URL endpoint, which is located at the top of its Explorer page in Subgraph Studio. +You can start [querying](/subgraphs/querying/introduction/) any Subgraph by sending a GraphQL query into the Subgraph’s query URL endpoint, which is located at the top of its Explorer page in Subgraph Studio. #### Example -[CryptoPunks Ethereum subgraph](https://thegraph.com/explorer/subgraphs/HdVdERFUe8h61vm2fDyycHgxjsde5PbB832NHgJfZNqK) by Messari: +[CryptoPunks Ethereum Subgraph](https://thegraph.com/explorer/subgraphs/HdVdERFUe8h61vm2fDyycHgxjsde5PbB832NHgJfZNqK) by Messari: ![Query URL](/img/cryptopunks-screenshot-transfer.png) -The query URL for this subgraph is: +The query URL for this Subgraph is: ```sh https://gateway-arbitrum.network.thegraph.com/api/`**your-own-api-key**`/subgraphs/id/HdVdERFUe8h61vm2fDyycgxjsde5PbB832NHgJfZNqK @@ -96,9 +96,9 @@ You can create API Keys in Subgraph Studio under the “API Keys” menu at the ### Monitor Subgraph Status -Once you upgrade, you can access and manage your subgraphs in [Subgraph Studio](https://thegraph.com/studio/) and explore all subgraphs in [The Graph Explorer](https://thegraph.com/networks/). +Once you upgrade, you can access and manage your Subgraphs in [Subgraph Studio](https://thegraph.com/studio/) and explore all Subgraphs in [The Graph Explorer](https://thegraph.com/networks/). ### Additional Resources -- To quickly create and publish a new subgraph, check out the [Quick Start](/subgraphs/quick-start/). -- To explore all the ways you can optimize and customize your subgraph for a better performance, read more about [creating a subgraph here](/developing/creating-a-subgraph/). +- To quickly create and publish a new Subgraph, check out the [Quick Start](/subgraphs/quick-start/). +- To explore all the ways you can optimize and customize your Subgraph for a better performance, read more about [creating a Subgraph here](/developing/creating-a-subgraph/). From bc437b68c86c0d7a4a25c7903d5290a974167564 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:56 -0500 Subject: [PATCH 0715/1789] New translations transfer-to-the-graph.mdx (French) --- .../cookbook/transfer-to-the-graph.mdx | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/website/src/pages/fr/subgraphs/cookbook/transfer-to-the-graph.mdx b/website/src/pages/fr/subgraphs/cookbook/transfer-to-the-graph.mdx index d34a88327c64..7cb795a56ce1 100644 --- a/website/src/pages/fr/subgraphs/cookbook/transfer-to-the-graph.mdx +++ b/website/src/pages/fr/subgraphs/cookbook/transfer-to-the-graph.mdx @@ -2,13 +2,13 @@ title: Transférer vers The Graph --- -Mettez rapidement à jour vos subgraphs depuis n'importe quelle plateforme vers [le réseau décentralisé de The Graph](https://thegraph.com/networks/). +Quickly upgrade your Subgraphs from any platform to [The Graph's decentralized network](https://thegraph.com/networks/). ## Avantages du passage à The Graph -- Utilisez le même subgraph que vos applications utilisent déjà avec une migration sans interruption de service. +- Use the same Subgraph that your apps already use with zero-downtime migration. - Améliorez la fiabilité grâce à un réseau mondial pris en charge par plus de 100 Indexers. -- Bénéficiez d’un support ultra-rapide pour vos subgraphs 24/7, avec une équipe d’ingénieurs de garde. +- Receive lightning-fast support for Subgraphs 24/7, with an on-call engineering team. ## Mettez à jour votre Subgraph vers The Graph en 3 étapes simples @@ -21,9 +21,9 @@ Mettez rapidement à jour vos subgraphs depuis n'importe quelle plateforme vers ### Créer un subgraph dans Subgraph Studio - Accédez à [Subgraph Studio](https://thegraph.com/studio/) et connectez votre portefeuille. -- Cliquez sur « Créer un subgraph ». Il est recommandé de nommer le subgraph en majuscule : « Nom du subgraph Nom de la chaîne ». +- Click "Create a Subgraph". It is recommended to name the Subgraph in Title Case: "Subgraph Name Chain Name". -> Note: After publishing, the subgraph name will be editable but requires onchain action each time, so name it properly. +> Note: After publishing, the Subgraph name will be editable but requires onchain action each time, so name it properly. ### Installer Graph CLI @@ -37,7 +37,7 @@ Utilisation de [npm](https://www.npmjs.com/) : npm install -g @graphprotocol/graph-cli@latest ``` -Utilisez la commande suivante pour créer un subgraph dans Studio en utilisant CLI : +Use the following command to create a Subgraph in Studio using the CLI: ```sh graph init --product subgraph-studio @@ -53,7 +53,7 @@ graph auth ## 2. Déployez votre Subgraph sur Studio -Si vous avez votre code source, vous pouvez facilement le déployer sur Studio. Si vous ne l'avez pas, voici un moyen rapide de déployer votre subgraph. +If you have your source code, you can easily deploy it to Studio. If you don't have it, here's a quick way to deploy your Subgraph. Dans Graph CLI, exécutez la commande suivante : @@ -62,7 +62,7 @@ graph deploy --ipfs-hash ``` -> **Note:**: Chaque subgraph a un hash IPFS (ID de déploiement), qui ressemble à ceci : "Qmasdfad...". Pour déployer, utilisez simplement ce **hash IPFS**. Vous serez invité à entrer une version (par exemple, v0.0.1). +> **Note:** Every Subgraph has an IPFS hash (Deployment ID), which looks like this: "Qmasdfad...". To deploy simply use this **IPFS hash**. You’ll be prompted to enter a version (e.g., v0.0.1). ## 3. Publier votre Subgraph sur The Graph Network @@ -70,17 +70,17 @@ graph deploy --ipfs-hash ### Interroger votre Subgraph -> To attract about 3 indexers to query your subgraph, it’s recommended to curate at least 3,000 GRT. To learn more about curating, check out [Curating](/resources/roles/curating/) on The Graph. +> To attract about 3 indexers to query your Subgraph, it’s recommended to curate at least 3,000 GRT. To learn more about curating, check out [Curating](/resources/roles/curating/) on The Graph. -You can start [querying](/subgraphs/querying/introduction/) any subgraph by sending a GraphQL query into the subgraph’s query URL endpoint, which is located at the top of its Explorer page in Subgraph Studio. +You can start [querying](/subgraphs/querying/introduction/) any Subgraph by sending a GraphQL query into the Subgraph’s query URL endpoint, which is located at the top of its Explorer page in Subgraph Studio. #### Exemple -[Subgraph Ethereum CryptoPunks](https://thegraph.com/explorer/subgraphs/HdVdERFUe8h61vm2fDyycHgxjsde5PbB832NHgJfZNqK) par Messari: +[CryptoPunks Ethereum Subgraph](https://thegraph.com/explorer/subgraphs/HdVdERFUe8h61vm2fDyycHgxjsde5PbB832NHgJfZNqK) by Messari: ![L'URL de requête](/img/cryptopunks-screenshot-transfer.png) -L'URL de requête pour ce subgraph est : +The query URL for this Subgraph is: ```sh https://gateway-arbitrum.network.thegraph.com/api/`**votre-propre-clé-Api**`/subgraphs/id/HdVdERFUe8h61vm2fDyycgxjsde5PbB832NHgJfZNqK @@ -96,9 +96,9 @@ Vous pouvez créer des clés API dans Subgraph Studio sous le menu "API Keys" en ### Surveiller l'état du Subgraph -Une fois que vous avez mis à jour, vous pouvez accéder et gérer vos subgraphs dans [Subgraph Studio](https://thegraph.com/studio/) et explorer tous les subgraphs dans [The Graph Explorer](https://thegraph.com/networks/). +Once you upgrade, you can access and manage your Subgraphs in [Subgraph Studio](https://thegraph.com/studio/) and explore all Subgraphs in [The Graph Explorer](https://thegraph.com/networks/). ### Ressources supplémentaires -- To quickly create and publish a new subgraph, check out the [Quick Start](/subgraphs/quick-start/). -- Pour explorer toutes les façons d'optimiser et de personnaliser votre subgraph pour de meilleures performances, lisez plus sur [la création d'un subgraph ici](/developing/creating-a-subgraph/). +- To quickly create and publish a new Subgraph, check out the [Quick Start](/subgraphs/quick-start/). +- To explore all the ways you can optimize and customize your Subgraph for a better performance, read more about [creating a Subgraph here](/developing/creating-a-subgraph/). From 129c3457b2eb111e8d88d6c109706efcd5a2d63e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:57 -0500 Subject: [PATCH 0716/1789] New translations transfer-to-the-graph.mdx (Spanish) --- .../cookbook/transfer-to-the-graph.mdx | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/website/src/pages/es/subgraphs/cookbook/transfer-to-the-graph.mdx b/website/src/pages/es/subgraphs/cookbook/transfer-to-the-graph.mdx index 339032915f35..b38e3c26d8d5 100644 --- a/website/src/pages/es/subgraphs/cookbook/transfer-to-the-graph.mdx +++ b/website/src/pages/es/subgraphs/cookbook/transfer-to-the-graph.mdx @@ -2,13 +2,13 @@ title: Tranfer to The Graph --- -Quickly upgrade your subgraphs from any platform to [The Graph's decentralized network](https://thegraph.com/networks/). +Quickly upgrade your Subgraphs from any platform to [The Graph's decentralized network](https://thegraph.com/networks/). ## Benefits of Switching to The Graph -- Use the same subgraph that your apps already use with zero-downtime migration. +- Use the same Subgraph that your apps already use with zero-downtime migration. - Increase reliability from a global network supported by 100+ Indexers. -- Receive lightning-fast support for subgraphs 24/7, with an on-call engineering team. +- Receive lightning-fast support for Subgraphs 24/7, with an on-call engineering team. ## Upgrade Your Subgraph to The Graph in 3 Easy Steps @@ -21,9 +21,9 @@ Quickly upgrade your subgraphs from any platform to [The Graph's decentralized n ### Create a Subgraph in Subgraph Studio - Go to [Subgraph Studio](https://thegraph.com/studio/) and connect your wallet. -- Click "Create a Subgraph". It is recommended to name the subgraph in Title Case: "Subgraph Name Chain Name". +- Click "Create a Subgraph". It is recommended to name the Subgraph in Title Case: "Subgraph Name Chain Name". -> Note: After publishing, the subgraph name will be editable but requires onchain action each time, so name it properly. +> Note: After publishing, the Subgraph name will be editable but requires onchain action each time, so name it properly. ### Install the Graph CLI⁠ @@ -37,7 +37,7 @@ Using [npm](https://www.npmjs.com/): npm install -g @graphprotocol/graph-cli@latest ``` -Use the following command to create a subgraph in Studio using the CLI: +Use the following command to create a Subgraph in Studio using the CLI: ```sh graph init --product subgraph-studio @@ -53,7 +53,7 @@ graph auth ## 2. Deploy Your Subgraph to Studio -If you have your source code, you can easily deploy it to Studio. If you don't have it, here's a quick way to deploy your subgraph. +If you have your source code, you can easily deploy it to Studio. If you don't have it, here's a quick way to deploy your Subgraph. In The Graph CLI, run the following command: @@ -62,7 +62,7 @@ graph deploy --ipfs-hash ``` -> **Note:** Every subgraph has an IPFS hash (Deployment ID), which looks like this: "Qmasdfad...". To deploy simply use this **IPFS hash**. You’ll be prompted to enter a version (e.g., v0.0.1). +> **Note:** Every Subgraph has an IPFS hash (Deployment ID), which looks like this: "Qmasdfad...". To deploy simply use this **IPFS hash**. You’ll be prompted to enter a version (e.g., v0.0.1). ## 3. Publish Your Subgraph to The Graph Network @@ -70,17 +70,17 @@ graph deploy --ipfs-hash ### Query Your Subgraph -> To attract about 3 indexers to query your subgraph, it’s recommended to curate at least 3,000 GRT. To learn more about curating, check out [Curating](/resources/roles/curating/) on The Graph. +> To attract about 3 indexers to query your Subgraph, it’s recommended to curate at least 3,000 GRT. To learn more about curating, check out [Curating](/resources/roles/curating/) on The Graph. -You can start [querying](/subgraphs/querying/introduction/) any subgraph by sending a GraphQL query into the subgraph’s query URL endpoint, which is located at the top of its Explorer page in Subgraph Studio. +You can start [querying](/subgraphs/querying/introduction/) any Subgraph by sending a GraphQL query into the Subgraph’s query URL endpoint, which is located at the top of its Explorer page in Subgraph Studio. #### Ejemplo -[CryptoPunks Ethereum subgraph](https://thegraph.com/explorer/subgraphs/HdVdERFUe8h61vm2fDyycHgxjsde5PbB832NHgJfZNqK) by Messari: +[CryptoPunks Ethereum Subgraph](https://thegraph.com/explorer/subgraphs/HdVdERFUe8h61vm2fDyycHgxjsde5PbB832NHgJfZNqK) by Messari: ![Query URL](/img/cryptopunks-screenshot-transfer.png) -The query URL for this subgraph is: +The query URL for this Subgraph is: ```sh https://gateway-arbitrum.network.thegraph.com/api/`**your-own-api-key**`/subgraphs/id/HdVdERFUe8h61vm2fDyycgxjsde5PbB832NHgJfZNqK @@ -96,9 +96,9 @@ You can create API Keys in Subgraph Studio under the “API Keys” menu at the ### Monitor Subgraph Status -Once you upgrade, you can access and manage your subgraphs in [Subgraph Studio](https://thegraph.com/studio/) and explore all subgraphs in [The Graph Explorer](https://thegraph.com/networks/). +Once you upgrade, you can access and manage your Subgraphs in [Subgraph Studio](https://thegraph.com/studio/) and explore all Subgraphs in [The Graph Explorer](https://thegraph.com/networks/). ### Recursos Adicionales -- To quickly create and publish a new subgraph, check out the [Quick Start](/subgraphs/quick-start/). -- To explore all the ways you can optimize and customize your subgraph for a better performance, read more about [creating a subgraph here](/developing/creating-a-subgraph/). +- To quickly create and publish a new Subgraph, check out the [Quick Start](/subgraphs/quick-start/). +- To explore all the ways you can optimize and customize your Subgraph for a better performance, read more about [creating a Subgraph here](/developing/creating-a-subgraph/). From 7ad9cfa3f8f57c87875c92c178a36caff01014a8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:58 -0500 Subject: [PATCH 0717/1789] New translations transfer-to-the-graph.mdx (Arabic) --- .../cookbook/transfer-to-the-graph.mdx | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/website/src/pages/ar/subgraphs/cookbook/transfer-to-the-graph.mdx b/website/src/pages/ar/subgraphs/cookbook/transfer-to-the-graph.mdx index f713ec3a5e76..e110436fe6db 100644 --- a/website/src/pages/ar/subgraphs/cookbook/transfer-to-the-graph.mdx +++ b/website/src/pages/ar/subgraphs/cookbook/transfer-to-the-graph.mdx @@ -2,13 +2,13 @@ title: Tranfer to The Graph --- -Quickly upgrade your subgraphs from any platform to [The Graph's decentralized network](https://thegraph.com/networks/). +Quickly upgrade your Subgraphs from any platform to [The Graph's decentralized network](https://thegraph.com/networks/). ## Benefits of Switching to The Graph -- Use the same subgraph that your apps already use with zero-downtime migration. +- Use the same Subgraph that your apps already use with zero-downtime migration. - Increase reliability from a global network supported by 100+ Indexers. -- Receive lightning-fast support for subgraphs 24/7, with an on-call engineering team. +- Receive lightning-fast support for Subgraphs 24/7, with an on-call engineering team. ## Upgrade Your Subgraph to The Graph in 3 Easy Steps @@ -21,9 +21,9 @@ Quickly upgrade your subgraphs from any platform to [The Graph's decentralized n ### Create a Subgraph in Subgraph Studio - Go to [Subgraph Studio](https://thegraph.com/studio/) and connect your wallet. -- Click "Create a Subgraph". It is recommended to name the subgraph in Title Case: "Subgraph Name Chain Name". +- Click "Create a Subgraph". It is recommended to name the Subgraph in Title Case: "Subgraph Name Chain Name". -> Note: After publishing, the subgraph name will be editable but requires onchain action each time, so name it properly. +> Note: After publishing, the Subgraph name will be editable but requires onchain action each time, so name it properly. ### Install the Graph CLI⁠ @@ -37,7 +37,7 @@ Using [npm](https://www.npmjs.com/): npm install -g @graphprotocol/graph-cli@latest ``` -Use the following command to create a subgraph in Studio using the CLI: +Use the following command to create a Subgraph in Studio using the CLI: ```sh graph init --product subgraph-studio @@ -53,7 +53,7 @@ graph auth ## 2. Deploy Your Subgraph to Studio -If you have your source code, you can easily deploy it to Studio. If you don't have it, here's a quick way to deploy your subgraph. +If you have your source code, you can easily deploy it to Studio. If you don't have it, here's a quick way to deploy your Subgraph. In The Graph CLI, run the following command: @@ -62,7 +62,7 @@ graph deploy --ipfs-hash ``` -> **Note:** Every subgraph has an IPFS hash (Deployment ID), which looks like this: "Qmasdfad...". To deploy simply use this **IPFS hash**. You’ll be prompted to enter a version (e.g., v0.0.1). +> **Note:** Every Subgraph has an IPFS hash (Deployment ID), which looks like this: "Qmasdfad...". To deploy simply use this **IPFS hash**. You’ll be prompted to enter a version (e.g., v0.0.1). ## 3. Publish Your Subgraph to The Graph Network @@ -70,17 +70,17 @@ graph deploy --ipfs-hash ### Query Your Subgraph -> To attract about 3 indexers to query your subgraph, it’s recommended to curate at least 3,000 GRT. To learn more about curating, check out [Curating](/resources/roles/curating/) on The Graph. +> To attract about 3 indexers to query your Subgraph, it’s recommended to curate at least 3,000 GRT. To learn more about curating, check out [Curating](/resources/roles/curating/) on The Graph. -You can start [querying](/subgraphs/querying/introduction/) any subgraph by sending a GraphQL query into the subgraph’s query URL endpoint, which is located at the top of its Explorer page in Subgraph Studio. +You can start [querying](/subgraphs/querying/introduction/) any Subgraph by sending a GraphQL query into the Subgraph’s query URL endpoint, which is located at the top of its Explorer page in Subgraph Studio. #### Example -[CryptoPunks Ethereum subgraph](https://thegraph.com/explorer/subgraphs/HdVdERFUe8h61vm2fDyycHgxjsde5PbB832NHgJfZNqK) by Messari: +[CryptoPunks Ethereum Subgraph](https://thegraph.com/explorer/subgraphs/HdVdERFUe8h61vm2fDyycHgxjsde5PbB832NHgJfZNqK) by Messari: ![Query URL](/img/cryptopunks-screenshot-transfer.png) -The query URL for this subgraph is: +The query URL for this Subgraph is: ```sh https://gateway-arbitrum.network.thegraph.com/api/`**your-own-api-key**`/subgraphs/id/HdVdERFUe8h61vm2fDyycgxjsde5PbB832NHgJfZNqK @@ -96,9 +96,9 @@ You can create API Keys in Subgraph Studio under the “API Keys” menu at the ### Monitor Subgraph Status -Once you upgrade, you can access and manage your subgraphs in [Subgraph Studio](https://thegraph.com/studio/) and explore all subgraphs in [The Graph Explorer](https://thegraph.com/networks/). +Once you upgrade, you can access and manage your Subgraphs in [Subgraph Studio](https://thegraph.com/studio/) and explore all Subgraphs in [The Graph Explorer](https://thegraph.com/networks/). ### مصادر إضافية -- To quickly create and publish a new subgraph, check out the [Quick Start](/subgraphs/quick-start/). -- To explore all the ways you can optimize and customize your subgraph for a better performance, read more about [creating a subgraph here](/developing/creating-a-subgraph/). +- To quickly create and publish a new Subgraph, check out the [Quick Start](/subgraphs/quick-start/). +- To explore all the ways you can optimize and customize your Subgraph for a better performance, read more about [creating a Subgraph here](/developing/creating-a-subgraph/). From d882747076338f8fe7725f37b236fc2c2e0cbe58 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:20:59 -0500 Subject: [PATCH 0718/1789] New translations transfer-to-the-graph.mdx (Czech) --- .../cookbook/transfer-to-the-graph.mdx | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/website/src/pages/cs/subgraphs/cookbook/transfer-to-the-graph.mdx b/website/src/pages/cs/subgraphs/cookbook/transfer-to-the-graph.mdx index 3e4f8eee8ccf..09e25fe3f012 100644 --- a/website/src/pages/cs/subgraphs/cookbook/transfer-to-the-graph.mdx +++ b/website/src/pages/cs/subgraphs/cookbook/transfer-to-the-graph.mdx @@ -2,13 +2,13 @@ title: Tranfer to The Graph --- -Quickly upgrade your subgraphs from any platform to [The Graph's decentralized network](https://thegraph.com/networks/). +Quickly upgrade your Subgraphs from any platform to [The Graph's decentralized network](https://thegraph.com/networks/). ## Benefits of Switching to The Graph -- Use the same subgraph that your apps already use with zero-downtime migration. +- Use the same Subgraph that your apps already use with zero-downtime migration. - Increase reliability from a global network supported by 100+ Indexers. -- Receive lightning-fast support for subgraphs 24/7, with an on-call engineering team. +- Receive lightning-fast support for Subgraphs 24/7, with an on-call engineering team. ## Upgrade Your Subgraph to The Graph in 3 Easy Steps @@ -21,9 +21,9 @@ Quickly upgrade your subgraphs from any platform to [The Graph's decentralized n ### Create a Subgraph in Subgraph Studio - Go to [Subgraph Studio](https://thegraph.com/studio/) and connect your wallet. -- Click "Create a Subgraph". It is recommended to name the subgraph in Title Case: "Subgraph Name Chain Name". +- Click "Create a Subgraph". It is recommended to name the Subgraph in Title Case: "Subgraph Name Chain Name". -> Note: After publishing, the subgraph name will be editable but requires onchain action each time, so name it properly. +> Note: After publishing, the Subgraph name will be editable but requires onchain action each time, so name it properly. ### Install the Graph CLI⁠ @@ -37,7 +37,7 @@ Použitím [npm](https://www.npmjs.com/): npm install -g @graphprotocol/graph-cli@latest ``` -Use the following command to create a subgraph in Studio using the CLI: +Use the following command to create a Subgraph in Studio using the CLI: ```sh graph init --product subgraph-studio @@ -53,7 +53,7 @@ graph auth ## 2. Deploy Your Subgraph to Studio -If you have your source code, you can easily deploy it to Studio. If you don't have it, here's a quick way to deploy your subgraph. +If you have your source code, you can easily deploy it to Studio. If you don't have it, here's a quick way to deploy your Subgraph. In The Graph CLI, run the following command: @@ -62,7 +62,7 @@ graph deploy --ipfs-hash ``` -> **Note:** Every subgraph has an IPFS hash (Deployment ID), which looks like this: "Qmasdfad...". To deploy simply use this **IPFS hash**. You’ll be prompted to enter a version (e.g., v0.0.1). +> **Note:** Every Subgraph has an IPFS hash (Deployment ID), which looks like this: "Qmasdfad...". To deploy simply use this **IPFS hash**. You’ll be prompted to enter a version (e.g., v0.0.1). ## 3. Publish Your Subgraph to The Graph Network @@ -70,17 +70,17 @@ graph deploy --ipfs-hash ### Query Your Subgraph -> To attract about 3 indexers to query your subgraph, it’s recommended to curate at least 3,000 GRT. To learn more about curating, check out [Curating](/resources/roles/curating/) on The Graph. +> To attract about 3 indexers to query your Subgraph, it’s recommended to curate at least 3,000 GRT. To learn more about curating, check out [Curating](/resources/roles/curating/) on The Graph. -You can start [querying](/subgraphs/querying/introduction/) any subgraph by sending a GraphQL query into the subgraph’s query URL endpoint, which is located at the top of its Explorer page in Subgraph Studio. +You can start [querying](/subgraphs/querying/introduction/) any Subgraph by sending a GraphQL query into the Subgraph’s query URL endpoint, which is located at the top of its Explorer page in Subgraph Studio. #### Příklad -[CryptoPunks Ethereum subgraph](https://thegraph.com/explorer/subgraphs/HdVdERFUe8h61vm2fDyycHgxjsde5PbB832NHgJfZNqK) by Messari: +[CryptoPunks Ethereum Subgraph](https://thegraph.com/explorer/subgraphs/HdVdERFUe8h61vm2fDyycHgxjsde5PbB832NHgJfZNqK) by Messari: ![Query URL](/img/cryptopunks-screenshot-transfer.png) -The query URL for this subgraph is: +The query URL for this Subgraph is: ```sh https://gateway-arbitrum.network.thegraph.com/api/`**your-own-api-key**`/subgraphs/id/HdVdERFUe8h61vm2fDyycgxjsde5PbB832NHgJfZNqK @@ -96,9 +96,9 @@ You can create API Keys in Subgraph Studio under the “API Keys” menu at the ### Monitor Subgraph Status -Once you upgrade, you can access and manage your subgraphs in [Subgraph Studio](https://thegraph.com/studio/) and explore all subgraphs in [The Graph Explorer](https://thegraph.com/networks/). +Once you upgrade, you can access and manage your Subgraphs in [Subgraph Studio](https://thegraph.com/studio/) and explore all Subgraphs in [The Graph Explorer](https://thegraph.com/networks/). ### Další zdroje -- To quickly create and publish a new subgraph, check out the [Quick Start](/subgraphs/quick-start/). -- To explore all the ways you can optimize and customize your subgraph for a better performance, read more about [creating a subgraph here](/developing/creating-a-subgraph/). +- To quickly create and publish a new Subgraph, check out the [Quick Start](/subgraphs/quick-start/). +- To explore all the ways you can optimize and customize your Subgraph for a better performance, read more about [creating a Subgraph here](/developing/creating-a-subgraph/). From 24076e1b9e134ba54aaede33be2fb577f38d3e8b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:21:00 -0500 Subject: [PATCH 0719/1789] New translations transfer-to-the-graph.mdx (German) --- .../cookbook/transfer-to-the-graph.mdx | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/website/src/pages/de/subgraphs/cookbook/transfer-to-the-graph.mdx b/website/src/pages/de/subgraphs/cookbook/transfer-to-the-graph.mdx index a97a3c618c03..9af9fbf0b38c 100644 --- a/website/src/pages/de/subgraphs/cookbook/transfer-to-the-graph.mdx +++ b/website/src/pages/de/subgraphs/cookbook/transfer-to-the-graph.mdx @@ -2,13 +2,13 @@ title: Tranfer to The Graph --- -Quickly upgrade your subgraphs from any platform to [The Graph's decentralized network](https://thegraph.com/networks/). +Quickly upgrade your Subgraphs from any platform to [The Graph's decentralized network](https://thegraph.com/networks/). ## Benefits of Switching to The Graph -- Use the same subgraph that your apps already use with zero-downtime migration. +- Use the same Subgraph that your apps already use with zero-downtime migration. - Increase reliability from a global network supported by 100+ Indexers. -- Receive lightning-fast support for subgraphs 24/7, with an on-call engineering team. +- Receive lightning-fast support for Subgraphs 24/7, with an on-call engineering team. ## Upgrade Your Subgraph to The Graph in 3 Easy Steps @@ -21,9 +21,9 @@ Quickly upgrade your subgraphs from any platform to [The Graph's decentralized n ### Create a Subgraph in Subgraph Studio - Gehen Sie zu [Subgraph Studio] (https://thegraph.com/studio/) und verbinden Sie Ihre Wallet. -- Click "Create a Subgraph". It is recommended to name the subgraph in Title Case: "Subgraph Name Chain Name". +- Click "Create a Subgraph". It is recommended to name the Subgraph in Title Case: "Subgraph Name Chain Name". -> Note: After publishing, the subgraph name will be editable but requires onchain action each time, so name it properly. +> Note: After publishing, the Subgraph name will be editable but requires onchain action each time, so name it properly. ### Install the Graph CLI⁠ @@ -37,7 +37,7 @@ Verwendung von [npm](https://www.npmjs.com/): npm install -g @graphprotocol/graph-cli@latest ``` -Use the following command to create a subgraph in Studio using the CLI: +Use the following command to create a Subgraph in Studio using the CLI: ```sh graph init --product subgraph-studio @@ -53,7 +53,7 @@ graph auth ## 2. Deploy Your Subgraph to Studio -If you have your source code, you can easily deploy it to Studio. If you don't have it, here's a quick way to deploy your subgraph. +If you have your source code, you can easily deploy it to Studio. If you don't have it, here's a quick way to deploy your Subgraph. In The Graph CLI, run the following command: @@ -62,7 +62,7 @@ graph deploy --ipfs-hash ``` -> **Note:** Every subgraph has an IPFS hash (Deployment ID), which looks like this: "Qmasdfad...". To deploy simply use this **IPFS hash**. You’ll be prompted to enter a version (e.g., v0.0.1). +> **Note:** Every Subgraph has an IPFS hash (Deployment ID), which looks like this: "Qmasdfad...". To deploy simply use this **IPFS hash**. You’ll be prompted to enter a version (e.g., v0.0.1). ## 3. Publish Your Subgraph to The Graph Network @@ -70,17 +70,17 @@ graph deploy --ipfs-hash ### Query Your Subgraph -> To attract about 3 indexers to query your subgraph, it’s recommended to curate at least 3,000 GRT. To learn more about curating, check out [Curating](/resources/roles/curating/) on The Graph. +> To attract about 3 indexers to query your Subgraph, it’s recommended to curate at least 3,000 GRT. To learn more about curating, check out [Curating](/resources/roles/curating/) on The Graph. -You can start [querying](/subgraphs/querying/introduction/) any subgraph by sending a GraphQL query into the subgraph’s query URL endpoint, which is located at the top of its Explorer page in Subgraph Studio. +You can start [querying](/subgraphs/querying/introduction/) any Subgraph by sending a GraphQL query into the Subgraph’s query URL endpoint, which is located at the top of its Explorer page in Subgraph Studio. #### Beispiel -[CryptoPunks Ethereum subgraph](https://thegraph.com/explorer/subgraphs/HdVdERFUe8h61vm2fDyycHgxjsde5PbB832NHgJfZNqK) by Messari: +[CryptoPunks Ethereum Subgraph](https://thegraph.com/explorer/subgraphs/HdVdERFUe8h61vm2fDyycHgxjsde5PbB832NHgJfZNqK) by Messari: ![Query URL](/img/cryptopunks-screenshot-transfer.png) -The query URL for this subgraph is: +The query URL for this Subgraph is: ```sh https://gateway-arbitrum.network.thegraph.com/api/`**your-own-api-key**`/subgraphs/id/HdVdERFUe8h61vm2fDyycgxjsde5PbB832NHgJfZNqK @@ -96,9 +96,9 @@ You can create API Keys in Subgraph Studio under the “API Keys” menu at the ### Monitor Subgraph Status -Once you upgrade, you can access and manage your subgraphs in [Subgraph Studio](https://thegraph.com/studio/) and explore all subgraphs in [The Graph Explorer](https://thegraph.com/networks/). +Once you upgrade, you can access and manage your Subgraphs in [Subgraph Studio](https://thegraph.com/studio/) and explore all Subgraphs in [The Graph Explorer](https://thegraph.com/networks/). ### Zusätzliche Ressourcen -- To quickly create and publish a new subgraph, check out the [Quick Start](/subgraphs/quick-start/). -- To explore all the ways you can optimize and customize your subgraph for a better performance, read more about [creating a subgraph here](/developing/creating-a-subgraph/). +- To quickly create and publish a new Subgraph, check out the [Quick Start](/subgraphs/quick-start/). +- To explore all the ways you can optimize and customize your Subgraph for a better performance, read more about [creating a Subgraph here](/developing/creating-a-subgraph/). From d7ed234eb078f41d759fa91556ec5aaad217cef6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:21:02 -0500 Subject: [PATCH 0720/1789] New translations transfer-to-the-graph.mdx (Italian) --- .../cookbook/transfer-to-the-graph.mdx | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/website/src/pages/it/subgraphs/cookbook/transfer-to-the-graph.mdx b/website/src/pages/it/subgraphs/cookbook/transfer-to-the-graph.mdx index 4c435d24f56c..c442b7245e8a 100644 --- a/website/src/pages/it/subgraphs/cookbook/transfer-to-the-graph.mdx +++ b/website/src/pages/it/subgraphs/cookbook/transfer-to-the-graph.mdx @@ -2,13 +2,13 @@ title: Tranfer to The Graph --- -Quickly upgrade your subgraphs from any platform to [The Graph's decentralized network](https://thegraph.com/networks/). +Quickly upgrade your Subgraphs from any platform to [The Graph's decentralized network](https://thegraph.com/networks/). ## Benefits of Switching to The Graph -- Use the same subgraph that your apps already use with zero-downtime migration. +- Use the same Subgraph that your apps already use with zero-downtime migration. - Increase reliability from a global network supported by 100+ Indexers. -- Receive lightning-fast support for subgraphs 24/7, with an on-call engineering team. +- Receive lightning-fast support for Subgraphs 24/7, with an on-call engineering team. ## Upgrade Your Subgraph to The Graph in 3 Easy Steps @@ -21,9 +21,9 @@ Quickly upgrade your subgraphs from any platform to [The Graph's decentralized n ### Create a Subgraph in Subgraph Studio - Go to [Subgraph Studio](https://thegraph.com/studio/) and connect your wallet. -- Click "Create a Subgraph". It is recommended to name the subgraph in Title Case: "Subgraph Name Chain Name". +- Click "Create a Subgraph". It is recommended to name the Subgraph in Title Case: "Subgraph Name Chain Name". -> Note: After publishing, the subgraph name will be editable but requires onchain action each time, so name it properly. +> Note: After publishing, the Subgraph name will be editable but requires onchain action each time, so name it properly. ### Install the Graph CLI⁠ @@ -37,7 +37,7 @@ Using [npm](https://www.npmjs.com/): npm install -g @graphprotocol/graph-cli@latest ``` -Use the following command to create a subgraph in Studio using the CLI: +Use the following command to create a Subgraph in Studio using the CLI: ```sh graph init --product subgraph-studio @@ -53,7 +53,7 @@ graph auth ## 2. Deploy Your Subgraph to Studio -If you have your source code, you can easily deploy it to Studio. If you don't have it, here's a quick way to deploy your subgraph. +If you have your source code, you can easily deploy it to Studio. If you don't have it, here's a quick way to deploy your Subgraph. In The Graph CLI, run the following command: @@ -62,7 +62,7 @@ graph deploy --ipfs-hash ``` -> **Note:** Every subgraph has an IPFS hash (Deployment ID), which looks like this: "Qmasdfad...". To deploy simply use this **IPFS hash**. You’ll be prompted to enter a version (e.g., v0.0.1). +> **Note:** Every Subgraph has an IPFS hash (Deployment ID), which looks like this: "Qmasdfad...". To deploy simply use this **IPFS hash**. You’ll be prompted to enter a version (e.g., v0.0.1). ## 3. Publish Your Subgraph to The Graph Network @@ -70,17 +70,17 @@ graph deploy --ipfs-hash ### Query Your Subgraph -> To attract about 3 indexers to query your subgraph, it’s recommended to curate at least 3,000 GRT. To learn more about curating, check out [Curating](/resources/roles/curating/) on The Graph. +> To attract about 3 indexers to query your Subgraph, it’s recommended to curate at least 3,000 GRT. To learn more about curating, check out [Curating](/resources/roles/curating/) on The Graph. -You can start [querying](/subgraphs/querying/introduction/) any subgraph by sending a GraphQL query into the subgraph’s query URL endpoint, which is located at the top of its Explorer page in Subgraph Studio. +You can start [querying](/subgraphs/querying/introduction/) any Subgraph by sending a GraphQL query into the Subgraph’s query URL endpoint, which is located at the top of its Explorer page in Subgraph Studio. #### Esempio -[CryptoPunks Ethereum subgraph](https://thegraph.com/explorer/subgraphs/HdVdERFUe8h61vm2fDyycHgxjsde5PbB832NHgJfZNqK) by Messari: +[CryptoPunks Ethereum Subgraph](https://thegraph.com/explorer/subgraphs/HdVdERFUe8h61vm2fDyycHgxjsde5PbB832NHgJfZNqK) by Messari: ![Query URL](/img/cryptopunks-screenshot-transfer.png) -The query URL for this subgraph is: +The query URL for this Subgraph is: ```sh https://gateway-arbitrum.network.thegraph.com/api/`**your-own-api-key**`/subgraphs/id/HdVdERFUe8h61vm2fDyycgxjsde5PbB832NHgJfZNqK @@ -96,9 +96,9 @@ You can create API Keys in Subgraph Studio under the “API Keys” menu at the ### Monitor Subgraph Status -Once you upgrade, you can access and manage your subgraphs in [Subgraph Studio](https://thegraph.com/studio/) and explore all subgraphs in [The Graph Explorer](https://thegraph.com/networks/). +Once you upgrade, you can access and manage your Subgraphs in [Subgraph Studio](https://thegraph.com/studio/) and explore all Subgraphs in [The Graph Explorer](https://thegraph.com/networks/). ### Additional Resources -- To quickly create and publish a new subgraph, check out the [Quick Start](/subgraphs/quick-start/). -- To explore all the ways you can optimize and customize your subgraph for a better performance, read more about [creating a subgraph here](/developing/creating-a-subgraph/). +- To quickly create and publish a new Subgraph, check out the [Quick Start](/subgraphs/quick-start/). +- To explore all the ways you can optimize and customize your Subgraph for a better performance, read more about [creating a Subgraph here](/developing/creating-a-subgraph/). From 4f5339e341e4ee25aee1d19637e9828cd7bda2d1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:21:03 -0500 Subject: [PATCH 0721/1789] New translations transfer-to-the-graph.mdx (Japanese) --- .../cookbook/transfer-to-the-graph.mdx | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/website/src/pages/ja/subgraphs/cookbook/transfer-to-the-graph.mdx b/website/src/pages/ja/subgraphs/cookbook/transfer-to-the-graph.mdx index 6ef52284a5f5..0e5b652ddf1b 100644 --- a/website/src/pages/ja/subgraphs/cookbook/transfer-to-the-graph.mdx +++ b/website/src/pages/ja/subgraphs/cookbook/transfer-to-the-graph.mdx @@ -2,13 +2,13 @@ title: Tranfer to The Graph --- -Quickly upgrade your subgraphs from any platform to [The Graph's decentralized network](https://thegraph.com/networks/). +Quickly upgrade your Subgraphs from any platform to [The Graph's decentralized network](https://thegraph.com/networks/). ## Benefits of Switching to The Graph -- Use the same subgraph that your apps already use with zero-downtime migration. +- Use the same Subgraph that your apps already use with zero-downtime migration. - Increase reliability from a global network supported by 100+ Indexers. -- Receive lightning-fast support for subgraphs 24/7, with an on-call engineering team. +- Receive lightning-fast support for Subgraphs 24/7, with an on-call engineering team. ## Upgrade Your Subgraph to The Graph in 3 Easy Steps @@ -21,9 +21,9 @@ Quickly upgrade your subgraphs from any platform to [The Graph's decentralized n ### Create a Subgraph in Subgraph Studio - Go to [Subgraph Studio](https://thegraph.com/studio/) and connect your wallet. -- Click "Create a Subgraph". It is recommended to name the subgraph in Title Case: "Subgraph Name Chain Name". +- Click "Create a Subgraph". It is recommended to name the Subgraph in Title Case: "Subgraph Name Chain Name". -> Note: After publishing, the subgraph name will be editable but requires onchain action each time, so name it properly. +> Note: After publishing, the Subgraph name will be editable but requires onchain action each time, so name it properly. ### Install the Graph CLI⁠ @@ -37,7 +37,7 @@ Using [npm](https://www.npmjs.com/): npm install -g @graphprotocol/graph-cli@latest ``` -Use the following command to create a subgraph in Studio using the CLI: +Use the following command to create a Subgraph in Studio using the CLI: ```sh graph init --product subgraph-studio @@ -53,7 +53,7 @@ graph auth ## 2. Deploy Your Subgraph to Studio -If you have your source code, you can easily deploy it to Studio. If you don't have it, here's a quick way to deploy your subgraph. +If you have your source code, you can easily deploy it to Studio. If you don't have it, here's a quick way to deploy your Subgraph. In The Graph CLI, run the following command: @@ -62,7 +62,7 @@ graph deploy --ipfs-hash ``` -> **Note:** Every subgraph has an IPFS hash (Deployment ID), which looks like this: "Qmasdfad...". To deploy simply use this **IPFS hash**. You’ll be prompted to enter a version (e.g., v0.0.1). +> **Note:** Every Subgraph has an IPFS hash (Deployment ID), which looks like this: "Qmasdfad...". To deploy simply use this **IPFS hash**. You’ll be prompted to enter a version (e.g., v0.0.1). ## 3. Publish Your Subgraph to The Graph Network @@ -70,17 +70,17 @@ graph deploy --ipfs-hash ### Query Your Subgraph -> To attract about 3 indexers to query your subgraph, it’s recommended to curate at least 3,000 GRT. To learn more about curating, check out [Curating](/resources/roles/curating/) on The Graph. +> To attract about 3 indexers to query your Subgraph, it’s recommended to curate at least 3,000 GRT. To learn more about curating, check out [Curating](/resources/roles/curating/) on The Graph. -You can start [querying](/subgraphs/querying/introduction/) any subgraph by sending a GraphQL query into the subgraph’s query URL endpoint, which is located at the top of its Explorer page in Subgraph Studio. +You can start [querying](/subgraphs/querying/introduction/) any Subgraph by sending a GraphQL query into the Subgraph’s query URL endpoint, which is located at the top of its Explorer page in Subgraph Studio. #### 例 -[CryptoPunks Ethereum subgraph](https://thegraph.com/explorer/subgraphs/HdVdERFUe8h61vm2fDyycHgxjsde5PbB832NHgJfZNqK) by Messari: +[CryptoPunks Ethereum Subgraph](https://thegraph.com/explorer/subgraphs/HdVdERFUe8h61vm2fDyycHgxjsde5PbB832NHgJfZNqK) by Messari: ![Query URL](/img/cryptopunks-screenshot-transfer.png) -The query URL for this subgraph is: +The query URL for this Subgraph is: ```sh https://gateway-arbitrum.network.thegraph.com/api/`**your-own-api-key**`/subgraphs/id/HdVdERFUe8h61vm2fDyycgxjsde5PbB832NHgJfZNqK @@ -96,9 +96,9 @@ You can create API Keys in Subgraph Studio under the “API Keys” menu at the ### Monitor Subgraph Status -Once you upgrade, you can access and manage your subgraphs in [Subgraph Studio](https://thegraph.com/studio/) and explore all subgraphs in [The Graph Explorer](https://thegraph.com/networks/). +Once you upgrade, you can access and manage your Subgraphs in [Subgraph Studio](https://thegraph.com/studio/) and explore all Subgraphs in [The Graph Explorer](https://thegraph.com/networks/). ### その他のリソース -- To quickly create and publish a new subgraph, check out the [Quick Start](/subgraphs/quick-start/). -- To explore all the ways you can optimize and customize your subgraph for a better performance, read more about [creating a subgraph here](/developing/creating-a-subgraph/). +- To quickly create and publish a new Subgraph, check out the [Quick Start](/subgraphs/quick-start/). +- To explore all the ways you can optimize and customize your Subgraph for a better performance, read more about [creating a Subgraph here](/developing/creating-a-subgraph/). From fd642e0eae74b5dc35ccd68a5c2b89d166a72ce0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:21:04 -0500 Subject: [PATCH 0722/1789] New translations transfer-to-the-graph.mdx (Korean) --- .../cookbook/transfer-to-the-graph.mdx | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/website/src/pages/ko/subgraphs/cookbook/transfer-to-the-graph.mdx b/website/src/pages/ko/subgraphs/cookbook/transfer-to-the-graph.mdx index 194deb018404..0ac02c8234a4 100644 --- a/website/src/pages/ko/subgraphs/cookbook/transfer-to-the-graph.mdx +++ b/website/src/pages/ko/subgraphs/cookbook/transfer-to-the-graph.mdx @@ -2,13 +2,13 @@ title: Tranfer to The Graph --- -Quickly upgrade your subgraphs from any platform to [The Graph's decentralized network](https://thegraph.com/networks/). +Quickly upgrade your Subgraphs from any platform to [The Graph's decentralized network](https://thegraph.com/networks/). ## Benefits of Switching to The Graph -- Use the same subgraph that your apps already use with zero-downtime migration. +- Use the same Subgraph that your apps already use with zero-downtime migration. - Increase reliability from a global network supported by 100+ Indexers. -- Receive lightning-fast support for subgraphs 24/7, with an on-call engineering team. +- Receive lightning-fast support for Subgraphs 24/7, with an on-call engineering team. ## Upgrade Your Subgraph to The Graph in 3 Easy Steps @@ -21,9 +21,9 @@ Quickly upgrade your subgraphs from any platform to [The Graph's decentralized n ### Create a Subgraph in Subgraph Studio - Go to [Subgraph Studio](https://thegraph.com/studio/) and connect your wallet. -- Click "Create a Subgraph". It is recommended to name the subgraph in Title Case: "Subgraph Name Chain Name". +- Click "Create a Subgraph". It is recommended to name the Subgraph in Title Case: "Subgraph Name Chain Name". -> Note: After publishing, the subgraph name will be editable but requires onchain action each time, so name it properly. +> Note: After publishing, the Subgraph name will be editable but requires onchain action each time, so name it properly. ### Install the Graph CLI⁠ @@ -37,7 +37,7 @@ Using [npm](https://www.npmjs.com/): npm install -g @graphprotocol/graph-cli@latest ``` -Use the following command to create a subgraph in Studio using the CLI: +Use the following command to create a Subgraph in Studio using the CLI: ```sh graph init --product subgraph-studio @@ -53,7 +53,7 @@ graph auth ## 2. Deploy Your Subgraph to Studio -If you have your source code, you can easily deploy it to Studio. If you don't have it, here's a quick way to deploy your subgraph. +If you have your source code, you can easily deploy it to Studio. If you don't have it, here's a quick way to deploy your Subgraph. In The Graph CLI, run the following command: @@ -62,7 +62,7 @@ graph deploy --ipfs-hash ``` -> **Note:** Every subgraph has an IPFS hash (Deployment ID), which looks like this: "Qmasdfad...". To deploy simply use this **IPFS hash**. You’ll be prompted to enter a version (e.g., v0.0.1). +> **Note:** Every Subgraph has an IPFS hash (Deployment ID), which looks like this: "Qmasdfad...". To deploy simply use this **IPFS hash**. You’ll be prompted to enter a version (e.g., v0.0.1). ## 3. Publish Your Subgraph to The Graph Network @@ -70,17 +70,17 @@ graph deploy --ipfs-hash ### Query Your Subgraph -> To attract about 3 indexers to query your subgraph, it’s recommended to curate at least 3,000 GRT. To learn more about curating, check out [Curating](/resources/roles/curating/) on The Graph. +> To attract about 3 indexers to query your Subgraph, it’s recommended to curate at least 3,000 GRT. To learn more about curating, check out [Curating](/resources/roles/curating/) on The Graph. -You can start [querying](/subgraphs/querying/introduction/) any subgraph by sending a GraphQL query into the subgraph’s query URL endpoint, which is located at the top of its Explorer page in Subgraph Studio. +You can start [querying](/subgraphs/querying/introduction/) any Subgraph by sending a GraphQL query into the Subgraph’s query URL endpoint, which is located at the top of its Explorer page in Subgraph Studio. #### Example -[CryptoPunks Ethereum subgraph](https://thegraph.com/explorer/subgraphs/HdVdERFUe8h61vm2fDyycHgxjsde5PbB832NHgJfZNqK) by Messari: +[CryptoPunks Ethereum Subgraph](https://thegraph.com/explorer/subgraphs/HdVdERFUe8h61vm2fDyycHgxjsde5PbB832NHgJfZNqK) by Messari: ![Query URL](/img/cryptopunks-screenshot-transfer.png) -The query URL for this subgraph is: +The query URL for this Subgraph is: ```sh https://gateway-arbitrum.network.thegraph.com/api/`**your-own-api-key**`/subgraphs/id/HdVdERFUe8h61vm2fDyycgxjsde5PbB832NHgJfZNqK @@ -96,9 +96,9 @@ You can create API Keys in Subgraph Studio under the “API Keys” menu at the ### Monitor Subgraph Status -Once you upgrade, you can access and manage your subgraphs in [Subgraph Studio](https://thegraph.com/studio/) and explore all subgraphs in [The Graph Explorer](https://thegraph.com/networks/). +Once you upgrade, you can access and manage your Subgraphs in [Subgraph Studio](https://thegraph.com/studio/) and explore all Subgraphs in [The Graph Explorer](https://thegraph.com/networks/). ### Additional Resources -- To quickly create and publish a new subgraph, check out the [Quick Start](/subgraphs/quick-start/). -- To explore all the ways you can optimize and customize your subgraph for a better performance, read more about [creating a subgraph here](/developing/creating-a-subgraph/). +- To quickly create and publish a new Subgraph, check out the [Quick Start](/subgraphs/quick-start/). +- To explore all the ways you can optimize and customize your Subgraph for a better performance, read more about [creating a Subgraph here](/developing/creating-a-subgraph/). From e44d3e8429327aac6a463788507481884a4242fb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:21:05 -0500 Subject: [PATCH 0723/1789] New translations transfer-to-the-graph.mdx (Dutch) --- .../cookbook/transfer-to-the-graph.mdx | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/website/src/pages/nl/subgraphs/cookbook/transfer-to-the-graph.mdx b/website/src/pages/nl/subgraphs/cookbook/transfer-to-the-graph.mdx index 194deb018404..0ac02c8234a4 100644 --- a/website/src/pages/nl/subgraphs/cookbook/transfer-to-the-graph.mdx +++ b/website/src/pages/nl/subgraphs/cookbook/transfer-to-the-graph.mdx @@ -2,13 +2,13 @@ title: Tranfer to The Graph --- -Quickly upgrade your subgraphs from any platform to [The Graph's decentralized network](https://thegraph.com/networks/). +Quickly upgrade your Subgraphs from any platform to [The Graph's decentralized network](https://thegraph.com/networks/). ## Benefits of Switching to The Graph -- Use the same subgraph that your apps already use with zero-downtime migration. +- Use the same Subgraph that your apps already use with zero-downtime migration. - Increase reliability from a global network supported by 100+ Indexers. -- Receive lightning-fast support for subgraphs 24/7, with an on-call engineering team. +- Receive lightning-fast support for Subgraphs 24/7, with an on-call engineering team. ## Upgrade Your Subgraph to The Graph in 3 Easy Steps @@ -21,9 +21,9 @@ Quickly upgrade your subgraphs from any platform to [The Graph's decentralized n ### Create a Subgraph in Subgraph Studio - Go to [Subgraph Studio](https://thegraph.com/studio/) and connect your wallet. -- Click "Create a Subgraph". It is recommended to name the subgraph in Title Case: "Subgraph Name Chain Name". +- Click "Create a Subgraph". It is recommended to name the Subgraph in Title Case: "Subgraph Name Chain Name". -> Note: After publishing, the subgraph name will be editable but requires onchain action each time, so name it properly. +> Note: After publishing, the Subgraph name will be editable but requires onchain action each time, so name it properly. ### Install the Graph CLI⁠ @@ -37,7 +37,7 @@ Using [npm](https://www.npmjs.com/): npm install -g @graphprotocol/graph-cli@latest ``` -Use the following command to create a subgraph in Studio using the CLI: +Use the following command to create a Subgraph in Studio using the CLI: ```sh graph init --product subgraph-studio @@ -53,7 +53,7 @@ graph auth ## 2. Deploy Your Subgraph to Studio -If you have your source code, you can easily deploy it to Studio. If you don't have it, here's a quick way to deploy your subgraph. +If you have your source code, you can easily deploy it to Studio. If you don't have it, here's a quick way to deploy your Subgraph. In The Graph CLI, run the following command: @@ -62,7 +62,7 @@ graph deploy --ipfs-hash ``` -> **Note:** Every subgraph has an IPFS hash (Deployment ID), which looks like this: "Qmasdfad...". To deploy simply use this **IPFS hash**. You’ll be prompted to enter a version (e.g., v0.0.1). +> **Note:** Every Subgraph has an IPFS hash (Deployment ID), which looks like this: "Qmasdfad...". To deploy simply use this **IPFS hash**. You’ll be prompted to enter a version (e.g., v0.0.1). ## 3. Publish Your Subgraph to The Graph Network @@ -70,17 +70,17 @@ graph deploy --ipfs-hash ### Query Your Subgraph -> To attract about 3 indexers to query your subgraph, it’s recommended to curate at least 3,000 GRT. To learn more about curating, check out [Curating](/resources/roles/curating/) on The Graph. +> To attract about 3 indexers to query your Subgraph, it’s recommended to curate at least 3,000 GRT. To learn more about curating, check out [Curating](/resources/roles/curating/) on The Graph. -You can start [querying](/subgraphs/querying/introduction/) any subgraph by sending a GraphQL query into the subgraph’s query URL endpoint, which is located at the top of its Explorer page in Subgraph Studio. +You can start [querying](/subgraphs/querying/introduction/) any Subgraph by sending a GraphQL query into the Subgraph’s query URL endpoint, which is located at the top of its Explorer page in Subgraph Studio. #### Example -[CryptoPunks Ethereum subgraph](https://thegraph.com/explorer/subgraphs/HdVdERFUe8h61vm2fDyycHgxjsde5PbB832NHgJfZNqK) by Messari: +[CryptoPunks Ethereum Subgraph](https://thegraph.com/explorer/subgraphs/HdVdERFUe8h61vm2fDyycHgxjsde5PbB832NHgJfZNqK) by Messari: ![Query URL](/img/cryptopunks-screenshot-transfer.png) -The query URL for this subgraph is: +The query URL for this Subgraph is: ```sh https://gateway-arbitrum.network.thegraph.com/api/`**your-own-api-key**`/subgraphs/id/HdVdERFUe8h61vm2fDyycgxjsde5PbB832NHgJfZNqK @@ -96,9 +96,9 @@ You can create API Keys in Subgraph Studio under the “API Keys” menu at the ### Monitor Subgraph Status -Once you upgrade, you can access and manage your subgraphs in [Subgraph Studio](https://thegraph.com/studio/) and explore all subgraphs in [The Graph Explorer](https://thegraph.com/networks/). +Once you upgrade, you can access and manage your Subgraphs in [Subgraph Studio](https://thegraph.com/studio/) and explore all Subgraphs in [The Graph Explorer](https://thegraph.com/networks/). ### Additional Resources -- To quickly create and publish a new subgraph, check out the [Quick Start](/subgraphs/quick-start/). -- To explore all the ways you can optimize and customize your subgraph for a better performance, read more about [creating a subgraph here](/developing/creating-a-subgraph/). +- To quickly create and publish a new Subgraph, check out the [Quick Start](/subgraphs/quick-start/). +- To explore all the ways you can optimize and customize your Subgraph for a better performance, read more about [creating a Subgraph here](/developing/creating-a-subgraph/). From 360018aeed35b4718e8e8e1509a03eacea0e606e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:21:06 -0500 Subject: [PATCH 0724/1789] New translations transfer-to-the-graph.mdx (Polish) --- .../cookbook/transfer-to-the-graph.mdx | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/website/src/pages/pl/subgraphs/cookbook/transfer-to-the-graph.mdx b/website/src/pages/pl/subgraphs/cookbook/transfer-to-the-graph.mdx index 194deb018404..0ac02c8234a4 100644 --- a/website/src/pages/pl/subgraphs/cookbook/transfer-to-the-graph.mdx +++ b/website/src/pages/pl/subgraphs/cookbook/transfer-to-the-graph.mdx @@ -2,13 +2,13 @@ title: Tranfer to The Graph --- -Quickly upgrade your subgraphs from any platform to [The Graph's decentralized network](https://thegraph.com/networks/). +Quickly upgrade your Subgraphs from any platform to [The Graph's decentralized network](https://thegraph.com/networks/). ## Benefits of Switching to The Graph -- Use the same subgraph that your apps already use with zero-downtime migration. +- Use the same Subgraph that your apps already use with zero-downtime migration. - Increase reliability from a global network supported by 100+ Indexers. -- Receive lightning-fast support for subgraphs 24/7, with an on-call engineering team. +- Receive lightning-fast support for Subgraphs 24/7, with an on-call engineering team. ## Upgrade Your Subgraph to The Graph in 3 Easy Steps @@ -21,9 +21,9 @@ Quickly upgrade your subgraphs from any platform to [The Graph's decentralized n ### Create a Subgraph in Subgraph Studio - Go to [Subgraph Studio](https://thegraph.com/studio/) and connect your wallet. -- Click "Create a Subgraph". It is recommended to name the subgraph in Title Case: "Subgraph Name Chain Name". +- Click "Create a Subgraph". It is recommended to name the Subgraph in Title Case: "Subgraph Name Chain Name". -> Note: After publishing, the subgraph name will be editable but requires onchain action each time, so name it properly. +> Note: After publishing, the Subgraph name will be editable but requires onchain action each time, so name it properly. ### Install the Graph CLI⁠ @@ -37,7 +37,7 @@ Using [npm](https://www.npmjs.com/): npm install -g @graphprotocol/graph-cli@latest ``` -Use the following command to create a subgraph in Studio using the CLI: +Use the following command to create a Subgraph in Studio using the CLI: ```sh graph init --product subgraph-studio @@ -53,7 +53,7 @@ graph auth ## 2. Deploy Your Subgraph to Studio -If you have your source code, you can easily deploy it to Studio. If you don't have it, here's a quick way to deploy your subgraph. +If you have your source code, you can easily deploy it to Studio. If you don't have it, here's a quick way to deploy your Subgraph. In The Graph CLI, run the following command: @@ -62,7 +62,7 @@ graph deploy --ipfs-hash ``` -> **Note:** Every subgraph has an IPFS hash (Deployment ID), which looks like this: "Qmasdfad...". To deploy simply use this **IPFS hash**. You’ll be prompted to enter a version (e.g., v0.0.1). +> **Note:** Every Subgraph has an IPFS hash (Deployment ID), which looks like this: "Qmasdfad...". To deploy simply use this **IPFS hash**. You’ll be prompted to enter a version (e.g., v0.0.1). ## 3. Publish Your Subgraph to The Graph Network @@ -70,17 +70,17 @@ graph deploy --ipfs-hash ### Query Your Subgraph -> To attract about 3 indexers to query your subgraph, it’s recommended to curate at least 3,000 GRT. To learn more about curating, check out [Curating](/resources/roles/curating/) on The Graph. +> To attract about 3 indexers to query your Subgraph, it’s recommended to curate at least 3,000 GRT. To learn more about curating, check out [Curating](/resources/roles/curating/) on The Graph. -You can start [querying](/subgraphs/querying/introduction/) any subgraph by sending a GraphQL query into the subgraph’s query URL endpoint, which is located at the top of its Explorer page in Subgraph Studio. +You can start [querying](/subgraphs/querying/introduction/) any Subgraph by sending a GraphQL query into the Subgraph’s query URL endpoint, which is located at the top of its Explorer page in Subgraph Studio. #### Example -[CryptoPunks Ethereum subgraph](https://thegraph.com/explorer/subgraphs/HdVdERFUe8h61vm2fDyycHgxjsde5PbB832NHgJfZNqK) by Messari: +[CryptoPunks Ethereum Subgraph](https://thegraph.com/explorer/subgraphs/HdVdERFUe8h61vm2fDyycHgxjsde5PbB832NHgJfZNqK) by Messari: ![Query URL](/img/cryptopunks-screenshot-transfer.png) -The query URL for this subgraph is: +The query URL for this Subgraph is: ```sh https://gateway-arbitrum.network.thegraph.com/api/`**your-own-api-key**`/subgraphs/id/HdVdERFUe8h61vm2fDyycgxjsde5PbB832NHgJfZNqK @@ -96,9 +96,9 @@ You can create API Keys in Subgraph Studio under the “API Keys” menu at the ### Monitor Subgraph Status -Once you upgrade, you can access and manage your subgraphs in [Subgraph Studio](https://thegraph.com/studio/) and explore all subgraphs in [The Graph Explorer](https://thegraph.com/networks/). +Once you upgrade, you can access and manage your Subgraphs in [Subgraph Studio](https://thegraph.com/studio/) and explore all Subgraphs in [The Graph Explorer](https://thegraph.com/networks/). ### Additional Resources -- To quickly create and publish a new subgraph, check out the [Quick Start](/subgraphs/quick-start/). -- To explore all the ways you can optimize and customize your subgraph for a better performance, read more about [creating a subgraph here](/developing/creating-a-subgraph/). +- To quickly create and publish a new Subgraph, check out the [Quick Start](/subgraphs/quick-start/). +- To explore all the ways you can optimize and customize your Subgraph for a better performance, read more about [creating a Subgraph here](/developing/creating-a-subgraph/). From 903115cf58a49f6f6c3e3b63e67044cae78c45d6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:21:07 -0500 Subject: [PATCH 0725/1789] New translations transfer-to-the-graph.mdx (Portuguese) --- .../cookbook/transfer-to-the-graph.mdx | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/website/src/pages/pt/subgraphs/cookbook/transfer-to-the-graph.mdx b/website/src/pages/pt/subgraphs/cookbook/transfer-to-the-graph.mdx index e5ad802a2941..c2e44cbec023 100644 --- a/website/src/pages/pt/subgraphs/cookbook/transfer-to-the-graph.mdx +++ b/website/src/pages/pt/subgraphs/cookbook/transfer-to-the-graph.mdx @@ -2,13 +2,13 @@ title: Transfira-se para The Graph --- -Migre rapidamente os seus subgraphs, de qualquer plataforma para a [rede descentralizada do The Graph](https://thegraph.com/networks/). +Quickly upgrade your Subgraphs from any platform to [The Graph's decentralized network](https://thegraph.com/networks/). ## Vantagens de Trocar para The Graph -- Use o mesmo subgraph que os seus aplicativos já usam, com migração sem tempo de ócio. +- Use the same Subgraph that your apps already use with zero-downtime migration. - Aumenta a confiabilidade de uma rede global mantida por mais de 100 Indexadores. -- Receba suporte rápido para subgraphs, com uma equipa de engenharia de plantão disponível a todas as horas. +- Receive lightning-fast support for Subgraphs 24/7, with an on-call engineering team. ## Atualize o Seu Subgraph para The Graph em 3 Etapas Fáceis @@ -21,9 +21,9 @@ Migre rapidamente os seus subgraphs, de qualquer plataforma para a [rede descent ### Como Criar um Subgraph no Subgraph Studio - Entre no [Subgraph Studio](https://thegraph.com/studio/) e conecte a sua carteira de criptomoedas. -- Clique em "Create a Subgraph" ("Criar um Subgraph"). É recomendado nomear o subgraph em caixa de título: por exemplo, "Nome De Subgraph Nome da Chain". +- Click "Create a Subgraph". It is recommended to name the Subgraph in Title Case: "Subgraph Name Chain Name". -> Observação: após a edição, o nome do subgraph poderá ser editado, mas isto sempre exigirá uma ação on-chain sempre, então pense bem no nome que irá dar. +> Note: After publishing, the Subgraph name will be editable but requires onchain action each time, so name it properly. ### Instale a Graph CLI @@ -37,7 +37,7 @@ Uso de [npm](https://www.npmjs.com/): npm install -g @graphprotocol/graph-cli@latest ``` -Use o comando a seguir para criar um subgraph no Studio com a CLI: +Use the following command to create a Subgraph in Studio using the CLI: ```sh graph init --product subgraph-studio @@ -53,7 +53,7 @@ graph auth ## 2. Implante o Seu Subgraph no Studio -Se tiver o seu código-fonte, pode facilmente implantá-lo no Studio. Se não o tiver, aqui está uma maneira rápida de implantar o seu subgraph. +If you have your source code, you can easily deploy it to Studio. If you don't have it, here's a quick way to deploy your Subgraph. Na The Graph CLI, execute o seguinte comando: @@ -62,7 +62,7 @@ graph deploy --ipfs-hash ``` -> **Observação:** Cada subgraph tem um hash IPFS (ID de Implantação), que se parece com isto: "Qmasdfad...". Para implantar, basta usar este **hash IPFS**. Aparecerá uma solicitação de versão (por exemplo, v0.0.1). +> **Note:** Every Subgraph has an IPFS hash (Deployment ID), which looks like this: "Qmasdfad...". To deploy simply use this **IPFS hash**. You’ll be prompted to enter a version (e.g., v0.0.1). ## 3. Edite o Seu Subgraph na The Graph Network @@ -70,17 +70,17 @@ graph deploy --ipfs-hash ### Faça um Query -> Para atrair cerca de 3 indexadores para fazer queries no seu subgraph, recomendamos curar pelo menos 3.000 GRT. Para saber mais sobre a curadoria, leia sobre [Curadoria](/resources/roles/curating/) no The Graph. +> To attract about 3 indexers to query your Subgraph, it’s recommended to curate at least 3,000 GRT. To learn more about curating, check out [Curating](/resources/roles/curating/) on The Graph. -Dá para começar a [fazer queries](/subgraphs/querying/introduction/) em qualquer subgraph enviando um query GraphQL para o ponto final da URL de query do subgraph, localizado na parte superior da página do Explorer no Subgraph Studio. +You can start [querying](/subgraphs/querying/introduction/) any Subgraph by sending a GraphQL query into the Subgraph’s query URL endpoint, which is located at the top of its Explorer page in Subgraph Studio. #### Exemplo -[Subgraph: CryptoPunks Ethereum](https://thegraph.com/explorer/subgraphs/HdVdERFUe8h61vm2fDyycHgxjsde5PbB832NHgJfZNqK) por Messari: +[CryptoPunks Ethereum Subgraph](https://thegraph.com/explorer/subgraphs/HdVdERFUe8h61vm2fDyycHgxjsde5PbB832NHgJfZNqK) by Messari: ![URL de Query](/img/cryptopunks-screenshot-transfer.png) -A URL de queries para este subgraph é: +The query URL for this Subgraph is: ```sh https://gateway-arbitrum.network.thegraph.com/api/`**sua-chave-de-api**`/subgraphs/id/HdVdERFUe8h61vm2fDyycgxjsde5PbB832NHgJfZNqK @@ -96,9 +96,9 @@ Agora, você só precisa preencher **sua própria chave de API** para começar a ### Como Monitorar o Estado do Seu Subgraph -Após a atualização, poderá acessar e gerir os seus subgraphs no [Subgraph Studio](https://thegraph.com/studio/) e explorar todos os subgraphs no [The Graph Explorer](https://thegraph.com/networks/). +Once you upgrade, you can access and manage your Subgraphs in [Subgraph Studio](https://thegraph.com/studio/) and explore all Subgraphs in [The Graph Explorer](https://thegraph.com/networks/). ### Outros Recursos -- Para criar e editar um novo subgraph, veja o [Guia de Início Rápido](/subgraphs/quick-start/). -- Para explorar todas as maneiras de otimizar e personalizar o seu subgraph para melhor desempenho, leia mais sobre [como criar um subgraph aqui](/developing/creating-a-subgraph/). +- To quickly create and publish a new Subgraph, check out the [Quick Start](/subgraphs/quick-start/). +- To explore all the ways you can optimize and customize your Subgraph for a better performance, read more about [creating a Subgraph here](/developing/creating-a-subgraph/). From 4a30e35917746ec9a8e0787e1ad37ddf8e3536be Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:21:08 -0500 Subject: [PATCH 0726/1789] New translations transfer-to-the-graph.mdx (Russian) --- .../cookbook/transfer-to-the-graph.mdx | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/website/src/pages/ru/subgraphs/cookbook/transfer-to-the-graph.mdx b/website/src/pages/ru/subgraphs/cookbook/transfer-to-the-graph.mdx index 570aab81debc..3ed49f67eba1 100644 --- a/website/src/pages/ru/subgraphs/cookbook/transfer-to-the-graph.mdx +++ b/website/src/pages/ru/subgraphs/cookbook/transfer-to-the-graph.mdx @@ -2,13 +2,13 @@ title: Перенос в The Graph --- -Быстро обновите свои субграфы с любой платформы до [децентрализованной сети The Graph](https://thegraph.com/networks/). +Quickly upgrade your Subgraphs from any platform to [The Graph's decentralized network](https://thegraph.com/networks/). ## Преимущества перехода на The Graph -- Используйте тот же субграф, который уже используют ваши приложения, с миграцией без времени простоя. +- Use the same Subgraph that your apps already use with zero-downtime migration. - Повышайте надежность благодаря глобальной сети, поддерживаемой более чем 100 индексаторами. -- Получайте молниеносную поддержку для субграфов 24/7 от дежурной команды инженеров. +- Receive lightning-fast support for Subgraphs 24/7, with an on-call engineering team. ## Обновите свой субграф до The Graph за 3 простых шага @@ -21,9 +21,9 @@ title: Перенос в The Graph ### Создайте субграф в Subgraph Studio - Перейдите в [Subgraph Studio](https://thegraph.com/studio/) и подключите свой кошелек. -- Нажмите "Создать субграф". Рекомендуется называть субграф с использованием Заглавного регистра: "Subgraph Name Chain Name". +- Click "Create a Subgraph". It is recommended to name the Subgraph in Title Case: "Subgraph Name Chain Name". -> Примечание: после публикации имя субграфа будет доступно для редактирования, но для этого каждый раз потребуется действие на он-чейне, поэтому выберите подходящее имя сразу. +> Note: After publishing, the Subgraph name will be editable but requires onchain action each time, so name it properly. ### Установите Graph CLI @@ -37,7 +37,7 @@ title: Перенос в The Graph npm install -g @graphprotocol/graph-cli@latest ``` -Используйте следующую команду для создания субграфа в Studio с помощью CLI: +Use the following command to create a Subgraph in Studio using the CLI: ```sh graph init --product subgraph-studio @@ -53,7 +53,7 @@ graph auth ## 2. Разверните свой субграф в Studio -Если у Вас есть исходный код, Вы можете с легкостью развернуть его в Studio. Если его нет, вот быстрый способ развернуть Ваш субграф. +If you have your source code, you can easily deploy it to Studio. If you don't have it, here's a quick way to deploy your Subgraph. В Graph CLI выполните следующую команду: @@ -62,7 +62,7 @@ graph deploy --ipfs-hash ``` -> **Примечание:** Каждый субграф имеет хэш IPFS (идентификатор развертывания), который выглядит так: "Qmasdfad...". Для развертывания просто используйте этот **IPFS хэш**. Вам будет предложено ввести версию (например, v0.0.1). +> **Note:** Every Subgraph has an IPFS hash (Deployment ID), which looks like this: "Qmasdfad...". To deploy simply use this **IPFS hash**. You’ll be prompted to enter a version (e.g., v0.0.1). ## 3. Опубликуйте свой субграф в The Graph Network @@ -70,17 +70,17 @@ graph deploy --ipfs-hash ### Запросите Ваш Субграф -> Для того чтобы привлечь около 3 индексаторов для запроса Вашего субграфа, рекомендуется зафиксировать как минимум 3000 GRT. Чтобы узнать больше о кураторстве, ознакомьтесь с разделом [Кураторство](/resources/roles/curating/) на платформе The Graph. +> To attract about 3 indexers to query your Subgraph, it’s recommended to curate at least 3,000 GRT. To learn more about curating, check out [Curating](/resources/roles/curating/) on The Graph. -Вы можете начать [запрашивать](/subgraphs/querying/introduction/) любой субграф, отправив запрос GraphQL на конечную точку URL-адреса его запроса, которая расположена в верхней части страницы его эксплорера в Subgraph Studio. +You can start [querying](/subgraphs/querying/introduction/) any Subgraph by sending a GraphQL query into the Subgraph’s query URL endpoint, which is located at the top of its Explorer page in Subgraph Studio. #### Пример -[Субграф CryptoPunks на Ethereum](https://thegraph.com/explorer/subgraphs/HdVdERFUe8h61vm2fDyycHgxjsde5PbB832NHgJfZNqK) от Messari: +[CryptoPunks Ethereum Subgraph](https://thegraph.com/explorer/subgraphs/HdVdERFUe8h61vm2fDyycHgxjsde5PbB832NHgJfZNqK) by Messari: ![URL запроса](/img/cryptopunks-screenshot-transfer.png) -URL запроса для этого субграфа: +The query URL for this Subgraph is: ```sh https://gateway-arbitrum.network.thegraph.com/api/`**Ваш-api-ключ**`/subgraphs/id/HdVdERFUe8h61vm2fDyycgxjsde5PbB832NHgJfZNqK @@ -96,9 +96,9 @@ https://gateway-arbitrum.network.thegraph.com/api/`**Ваш-api-ключ**`/subg ### Мониторинг статуса субграфа -После обновления Вы сможете получить доступ к своим субграфам и управлять ими в [Subgraph Studio](https://thegraph.com/studio/) и исследовать все субграфы в [The Graph Explorer](https://thegraph.com/networks/). +Once you upgrade, you can access and manage your Subgraphs in [Subgraph Studio](https://thegraph.com/studio/) and explore all Subgraphs in [The Graph Explorer](https://thegraph.com/networks/). ### Дополнительные ресурсы -- Чтобы быстро создать и опубликовать новый субграф, ознакомьтесь с [Руководством по быстрому старту](/subgraphs/quick-start/). -- Чтобы исследовать все способы оптимизации и настройки своего субграфа для улучшения производительности, читайте больше о [создании субграфа здесь](/developing/creating-a-subgraph/). +- To quickly create and publish a new Subgraph, check out the [Quick Start](/subgraphs/quick-start/). +- To explore all the ways you can optimize and customize your Subgraph for a better performance, read more about [creating a Subgraph here](/developing/creating-a-subgraph/). From d7b86b8e135380f9156df7df9f02266eab56d18d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:21:09 -0500 Subject: [PATCH 0727/1789] New translations transfer-to-the-graph.mdx (Swedish) --- .../cookbook/transfer-to-the-graph.mdx | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/website/src/pages/sv/subgraphs/cookbook/transfer-to-the-graph.mdx b/website/src/pages/sv/subgraphs/cookbook/transfer-to-the-graph.mdx index f06ed1722258..8adf5dfd2521 100644 --- a/website/src/pages/sv/subgraphs/cookbook/transfer-to-the-graph.mdx +++ b/website/src/pages/sv/subgraphs/cookbook/transfer-to-the-graph.mdx @@ -2,13 +2,13 @@ title: Tranfer to The Graph --- -Quickly upgrade your subgraphs from any platform to [The Graph's decentralized network](https://thegraph.com/networks/). +Quickly upgrade your Subgraphs from any platform to [The Graph's decentralized network](https://thegraph.com/networks/). ## Benefits of Switching to The Graph -- Use the same subgraph that your apps already use with zero-downtime migration. +- Use the same Subgraph that your apps already use with zero-downtime migration. - Increase reliability from a global network supported by 100+ Indexers. -- Receive lightning-fast support for subgraphs 24/7, with an on-call engineering team. +- Receive lightning-fast support for Subgraphs 24/7, with an on-call engineering team. ## Upgrade Your Subgraph to The Graph in 3 Easy Steps @@ -21,9 +21,9 @@ Quickly upgrade your subgraphs from any platform to [The Graph's decentralized n ### Create a Subgraph in Subgraph Studio - Go to [Subgraph Studio](https://thegraph.com/studio/) and connect your wallet. -- Click "Create a Subgraph". It is recommended to name the subgraph in Title Case: "Subgraph Name Chain Name". +- Click "Create a Subgraph". It is recommended to name the Subgraph in Title Case: "Subgraph Name Chain Name". -> Note: After publishing, the subgraph name will be editable but requires onchain action each time, so name it properly. +> Note: After publishing, the Subgraph name will be editable but requires onchain action each time, so name it properly. ### Install the Graph CLI⁠ @@ -37,7 +37,7 @@ Using [npm](https://www.npmjs.com/): npm install -g @graphprotocol/graph-cli@latest ``` -Use the following command to create a subgraph in Studio using the CLI: +Use the following command to create a Subgraph in Studio using the CLI: ```sh graph init --product subgraph-studio @@ -53,7 +53,7 @@ graph auth ## 2. Deploy Your Subgraph to Studio -If you have your source code, you can easily deploy it to Studio. If you don't have it, here's a quick way to deploy your subgraph. +If you have your source code, you can easily deploy it to Studio. If you don't have it, here's a quick way to deploy your Subgraph. In The Graph CLI, run the following command: @@ -62,7 +62,7 @@ graph deploy --ipfs-hash ``` -> **Note:** Every subgraph has an IPFS hash (Deployment ID), which looks like this: "Qmasdfad...". To deploy simply use this **IPFS hash**. You’ll be prompted to enter a version (e.g., v0.0.1). +> **Note:** Every Subgraph has an IPFS hash (Deployment ID), which looks like this: "Qmasdfad...". To deploy simply use this **IPFS hash**. You’ll be prompted to enter a version (e.g., v0.0.1). ## 3. Publish Your Subgraph to The Graph Network @@ -70,17 +70,17 @@ graph deploy --ipfs-hash ### Query Your Subgraph -> To attract about 3 indexers to query your subgraph, it’s recommended to curate at least 3,000 GRT. To learn more about curating, check out [Curating](/resources/roles/curating/) on The Graph. +> To attract about 3 indexers to query your Subgraph, it’s recommended to curate at least 3,000 GRT. To learn more about curating, check out [Curating](/resources/roles/curating/) on The Graph. -You can start [querying](/subgraphs/querying/introduction/) any subgraph by sending a GraphQL query into the subgraph’s query URL endpoint, which is located at the top of its Explorer page in Subgraph Studio. +You can start [querying](/subgraphs/querying/introduction/) any Subgraph by sending a GraphQL query into the Subgraph’s query URL endpoint, which is located at the top of its Explorer page in Subgraph Studio. #### Exempel -[CryptoPunks Ethereum subgraph](https://thegraph.com/explorer/subgraphs/HdVdERFUe8h61vm2fDyycHgxjsde5PbB832NHgJfZNqK) by Messari: +[CryptoPunks Ethereum Subgraph](https://thegraph.com/explorer/subgraphs/HdVdERFUe8h61vm2fDyycHgxjsde5PbB832NHgJfZNqK) by Messari: ![Query URL](/img/cryptopunks-screenshot-transfer.png) -The query URL for this subgraph is: +The query URL for this Subgraph is: ```sh https://gateway-arbitrum.network.thegraph.com/api/`**your-own-api-key**`/subgraphs/id/HdVdERFUe8h61vm2fDyycgxjsde5PbB832NHgJfZNqK @@ -96,9 +96,9 @@ You can create API Keys in Subgraph Studio under the “API Keys” menu at the ### Monitor Subgraph Status -Once you upgrade, you can access and manage your subgraphs in [Subgraph Studio](https://thegraph.com/studio/) and explore all subgraphs in [The Graph Explorer](https://thegraph.com/networks/). +Once you upgrade, you can access and manage your Subgraphs in [Subgraph Studio](https://thegraph.com/studio/) and explore all Subgraphs in [The Graph Explorer](https://thegraph.com/networks/). ### Ytterligare resurser -- To quickly create and publish a new subgraph, check out the [Quick Start](/subgraphs/quick-start/). -- To explore all the ways you can optimize and customize your subgraph for a better performance, read more about [creating a subgraph here](/developing/creating-a-subgraph/). +- To quickly create and publish a new Subgraph, check out the [Quick Start](/subgraphs/quick-start/). +- To explore all the ways you can optimize and customize your Subgraph for a better performance, read more about [creating a Subgraph here](/developing/creating-a-subgraph/). From ac9a176e0ded6152d60e9398be15c2c0022fe9ed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:21:10 -0500 Subject: [PATCH 0728/1789] New translations transfer-to-the-graph.mdx (Turkish) --- .../cookbook/transfer-to-the-graph.mdx | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/website/src/pages/tr/subgraphs/cookbook/transfer-to-the-graph.mdx b/website/src/pages/tr/subgraphs/cookbook/transfer-to-the-graph.mdx index a02f136958c2..12defb581449 100644 --- a/website/src/pages/tr/subgraphs/cookbook/transfer-to-the-graph.mdx +++ b/website/src/pages/tr/subgraphs/cookbook/transfer-to-the-graph.mdx @@ -2,13 +2,13 @@ title: The Graph'e Transfer --- -Subgraph'lerinizi herhangi bir platformdan hızlıca [The Graph'in merkezi olmayan ağına](https://thegraph.com/networks/) yükseltin. +Quickly upgrade your Subgraphs from any platform to [The Graph's decentralized network](https://thegraph.com/networks/). ## The Graph'e Geçmenin Avantajları -- Uygulamalarınızın zaten kullandığı subgraph'i kesinti yaşamadan aynı şekilde kullanabilirsiniz. +- Use the same Subgraph that your apps already use with zero-downtime migration. - Yüzden fazla Endeksleyici tarafından desteklenip global bir ağdan gelen güvenilirliği artırabilirsiniz. -- Subgraph'ler için, her zaman yardıma hazır mühendislik ekibinden 7/24 yıldırım hızında destek alabilirsiniz. +- Receive lightning-fast support for Subgraphs 24/7, with an on-call engineering team. ## Subgraph'inizi The Graph'e 3 Kolay Adımda Yükseltin @@ -21,9 +21,9 @@ Subgraph'lerinizi herhangi bir platformdan hızlıca [The Graph'in merkezi olmay ### Subgraph Studio'da Bir Subgraph Oluştur - [Subgraph Studio](https://thegraph.com/studio/)'ya gidin ve cüzdanınızı bağlayın. -- "Subgraph Oluştur" düğmesine tıklayın. Subgraph'in adını başlık formunda vermeniz önerilir: "Subgraph Adı Ağ Adı". +- Click "Create a Subgraph". It is recommended to name the Subgraph in Title Case: "Subgraph Name Chain Name". -> Not: Yayımladıktan sonra subgraph ismi değiştirilebilir, ancak bunu yapmak her seferinde zincir üzerinde işlem gerektirir. Bu yüzden isim verirken iyi düşünün. +> Note: After publishing, the Subgraph name will be editable but requires onchain action each time, so name it properly. ### Graph CLI'ı Yükle @@ -37,7 +37,7 @@ Yerel makinenizde şu komutu çalıştırın: npm install -g @graphprotocol/graph-cli@latest ``` -CLI kullanarak Studio'da subgraph oluşturmak için aşağıdaki komutu kullanın: +Use the following command to create a Subgraph in Studio using the CLI: ```sh graph init --product subgraph-studio @@ -53,7 +53,7 @@ graph auth ## 2. Subgraph'inizi Studio'ya Dağıtın -Kaynak kodunuz elinizdeyse kodunuzu Studio'ya kolayca dağıtabilirsiniz. Kaynak kodunuza sahip değilseniz, subgraph'inizi dağıtmanın hızlı yolunu aşağıda bulabilirsiniz. +If you have your source code, you can easily deploy it to Studio. If you don't have it, here's a quick way to deploy your Subgraph. The Graph CLI'de aşağıdaki komutu çalıştırın: @@ -62,7 +62,7 @@ graph deploy --ipfs-hash ``` -> **Not:** Her subgraph'in bir IPFS hash değeri (Dağıtım Kimliği) vardır ve bu şekilde görünür: "Qmasdfad...". Dağıtmak için bu **IPFS hash'ini** kullanmanız yeterlidir. Sizden bir versiyon girmeniz istenecektir (örneğin, v0.0.1). +> **Note:** Every Subgraph has an IPFS hash (Deployment ID), which looks like this: "Qmasdfad...". To deploy simply use this **IPFS hash**. You’ll be prompted to enter a version (e.g., v0.0.1). ## 3. Subgraph'inizi The Graph Ağı'nda Yayımlayın @@ -70,17 +70,17 @@ graph deploy --ipfs-hash ### Subgraph'inizi Sorgulayın -> Subgraph'inizi sorgulamak için yaklaşık üç endeksleyici çekmek için, en az 3000 GRT ile kürasyon yapmanız önerilir. Kürasyon hakkında daha fazla bilgi için, The Graph üzerindeki [Kürasyon](/resources/roles/curating/) sayfasına göz atın. +> To attract about 3 indexers to query your Subgraph, it’s recommended to curate at least 3,000 GRT. To learn more about curating, check out [Curating](/resources/roles/curating/) on The Graph. -Herhangi bir subgraph'i [sorgulamaya](/subgraphs/querying/introduction/) başlamak için, bir GraphQL sorgusunu subgraph’in sorgu URL uç noktasına gönderebilirsiniz. Bu uç nokta Subgraph Studio'daki Gezgin sayfasının üst kısmında bulunur. +You can start [querying](/subgraphs/querying/introduction/) any Subgraph by sending a GraphQL query into the Subgraph’s query URL endpoint, which is located at the top of its Explorer page in Subgraph Studio. #### Örnek -Messari tarafından hazırlanmış [CryptoPunks Ethereum subgraph'i](https://thegraph.com/explorer/subgraphs/HdVdERFUe8h61vm2fDyycHgxjsde5PbB832NHgJfZNqK): +[CryptoPunks Ethereum Subgraph](https://thegraph.com/explorer/subgraphs/HdVdERFUe8h61vm2fDyycHgxjsde5PbB832NHgJfZNqK) by Messari: ![Sorgu URL'si](/img/cryptopunks-screenshot-transfer.png) -Bu subgraph için sorgu URL'si: +The query URL for this Subgraph is: ```sh https://gateway-arbitrum.network.thegraph.com/api/`**kendi-api-anahtarınız**`/subgraphs/id/HdVdERFUe8h61vm2fDyycgxjsde5PbB832NHgJfZNqK @@ -96,9 +96,9 @@ API Anahtarlarını Subgraph Studio'da sayfanın üst kısmındaki “API Anahta ### Subgraph Durumunu İzle -Yükseltme yaptıktan sonra, [Subgraph Studio](https://thegraph.com/studio/) üzerinde subgraph'lerinize erişip onları yönetebilirsiniz. Ayrıca, [The Graph Gezgini](https://thegraph.com/networks/) içindeki tüm subgraph'leri burada keşfedebilirsiniz. +Once you upgrade, you can access and manage your Subgraphs in [Subgraph Studio](https://thegraph.com/studio/) and explore all Subgraphs in [The Graph Explorer](https://thegraph.com/networks/). ### Ek Kaynaklar -- Hızlı bir şekilde yeni bir subgraph oluşturmak ve yayımlamak için [Hızlı Başlangıç](/subgraphs/quick-start/) bölümüne göz atın. -- Daha iyi bir performans için subgraph'inizi optimize etmenin ve özelleştirmenin tüm yollarını keşfetmek için [subgraph oluşturma](/developing/creating-a-subgraph/) hakkında daha fazla okuyun. +- To quickly create and publish a new Subgraph, check out the [Quick Start](/subgraphs/quick-start/). +- To explore all the ways you can optimize and customize your Subgraph for a better performance, read more about [creating a Subgraph here](/developing/creating-a-subgraph/). From 275ad21bc33f51c6a9b6a85807960d434453ff02 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:21:11 -0500 Subject: [PATCH 0729/1789] New translations transfer-to-the-graph.mdx (Ukrainian) --- .../cookbook/transfer-to-the-graph.mdx | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/website/src/pages/uk/subgraphs/cookbook/transfer-to-the-graph.mdx b/website/src/pages/uk/subgraphs/cookbook/transfer-to-the-graph.mdx index aed61c2c695b..5f6f0824769b 100644 --- a/website/src/pages/uk/subgraphs/cookbook/transfer-to-the-graph.mdx +++ b/website/src/pages/uk/subgraphs/cookbook/transfer-to-the-graph.mdx @@ -2,13 +2,13 @@ title: Tranfer to The Graph --- -Quickly upgrade your subgraphs from any platform to [The Graph's decentralized network](https://thegraph.com/networks/). +Quickly upgrade your Subgraphs from any platform to [The Graph's decentralized network](https://thegraph.com/networks/). ## Benefits of Switching to The Graph -- Use the same subgraph that your apps already use with zero-downtime migration. +- Use the same Subgraph that your apps already use with zero-downtime migration. - Increase reliability from a global network supported by 100+ Indexers. -- Receive lightning-fast support for subgraphs 24/7, with an on-call engineering team. +- Receive lightning-fast support for Subgraphs 24/7, with an on-call engineering team. ## Upgrade Your Subgraph to The Graph in 3 Easy Steps @@ -21,9 +21,9 @@ Quickly upgrade your subgraphs from any platform to [The Graph's decentralized n ### Create a Subgraph in Subgraph Studio - Go to [Subgraph Studio](https://thegraph.com/studio/) and connect your wallet. -- Click "Create a Subgraph". It is recommended to name the subgraph in Title Case: "Subgraph Name Chain Name". +- Click "Create a Subgraph". It is recommended to name the Subgraph in Title Case: "Subgraph Name Chain Name". -> Note: After publishing, the subgraph name will be editable but requires onchain action each time, so name it properly. +> Note: After publishing, the Subgraph name will be editable but requires onchain action each time, so name it properly. ### Install the Graph CLI⁠ @@ -37,7 +37,7 @@ Using [npm](https://www.npmjs.com/): npm install -g @graphprotocol/graph-cli@latest ``` -Use the following command to create a subgraph in Studio using the CLI: +Use the following command to create a Subgraph in Studio using the CLI: ```sh graph init --product subgraph-studio @@ -53,7 +53,7 @@ graph auth ## 2. Deploy Your Subgraph to Studio -If you have your source code, you can easily deploy it to Studio. If you don't have it, here's a quick way to deploy your subgraph. +If you have your source code, you can easily deploy it to Studio. If you don't have it, here's a quick way to deploy your Subgraph. In The Graph CLI, run the following command: @@ -62,7 +62,7 @@ graph deploy --ipfs-hash ``` -> **Note:** Every subgraph has an IPFS hash (Deployment ID), which looks like this: "Qmasdfad...". To deploy simply use this **IPFS hash**. You’ll be prompted to enter a version (e.g., v0.0.1). +> **Note:** Every Subgraph has an IPFS hash (Deployment ID), which looks like this: "Qmasdfad...". To deploy simply use this **IPFS hash**. You’ll be prompted to enter a version (e.g., v0.0.1). ## 3. Publish Your Subgraph to The Graph Network @@ -70,17 +70,17 @@ graph deploy --ipfs-hash ### Query Your Subgraph -> To attract about 3 indexers to query your subgraph, it’s recommended to curate at least 3,000 GRT. To learn more about curating, check out [Curating](/resources/roles/curating/) on The Graph. +> To attract about 3 indexers to query your Subgraph, it’s recommended to curate at least 3,000 GRT. To learn more about curating, check out [Curating](/resources/roles/curating/) on The Graph. -You can start [querying](/subgraphs/querying/introduction/) any subgraph by sending a GraphQL query into the subgraph’s query URL endpoint, which is located at the top of its Explorer page in Subgraph Studio. +You can start [querying](/subgraphs/querying/introduction/) any Subgraph by sending a GraphQL query into the Subgraph’s query URL endpoint, which is located at the top of its Explorer page in Subgraph Studio. #### Example -[CryptoPunks Ethereum subgraph](https://thegraph.com/explorer/subgraphs/HdVdERFUe8h61vm2fDyycHgxjsde5PbB832NHgJfZNqK) by Messari: +[CryptoPunks Ethereum Subgraph](https://thegraph.com/explorer/subgraphs/HdVdERFUe8h61vm2fDyycHgxjsde5PbB832NHgJfZNqK) by Messari: ![Query URL](/img/cryptopunks-screenshot-transfer.png) -The query URL for this subgraph is: +The query URL for this Subgraph is: ```sh https://gateway-arbitrum.network.thegraph.com/api/`**your-own-api-key**`/subgraphs/id/HdVdERFUe8h61vm2fDyycgxjsde5PbB832NHgJfZNqK @@ -96,9 +96,9 @@ You can create API Keys in Subgraph Studio under the “API Keys” menu at the ### Monitor Subgraph Status -Once you upgrade, you can access and manage your subgraphs in [Subgraph Studio](https://thegraph.com/studio/) and explore all subgraphs in [The Graph Explorer](https://thegraph.com/networks/). +Once you upgrade, you can access and manage your Subgraphs in [Subgraph Studio](https://thegraph.com/studio/) and explore all Subgraphs in [The Graph Explorer](https://thegraph.com/networks/). ### Додаткові матеріали -- To quickly create and publish a new subgraph, check out the [Quick Start](/subgraphs/quick-start/). -- To explore all the ways you can optimize and customize your subgraph for a better performance, read more about [creating a subgraph here](/developing/creating-a-subgraph/). +- To quickly create and publish a new Subgraph, check out the [Quick Start](/subgraphs/quick-start/). +- To explore all the ways you can optimize and customize your Subgraph for a better performance, read more about [creating a Subgraph here](/developing/creating-a-subgraph/). From b388f3053a1fefaad7e9bcc39784f6f2b880cdda Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:21:12 -0500 Subject: [PATCH 0730/1789] New translations transfer-to-the-graph.mdx (Chinese Simplified) --- .../cookbook/transfer-to-the-graph.mdx | 34 +++++++++---------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/website/src/pages/zh/subgraphs/cookbook/transfer-to-the-graph.mdx b/website/src/pages/zh/subgraphs/cookbook/transfer-to-the-graph.mdx index 497dc254ddf1..964f2303e057 100644 --- a/website/src/pages/zh/subgraphs/cookbook/transfer-to-the-graph.mdx +++ b/website/src/pages/zh/subgraphs/cookbook/transfer-to-the-graph.mdx @@ -2,13 +2,13 @@ title: Tranfer to The Graph --- -Quickly upgrade your subgraphs from any platform to [The Graph's decentralized network](https://thegraph.com/networks/). +Quickly upgrade your Subgraphs from any platform to [The Graph's decentralized network](https://thegraph.com/networks/). ## Benefits of Switching to The Graph -- Use the same subgraph that your apps already use with zero-downtime migration. +- Use the same Subgraph that your apps already use with zero-downtime migration. - Increase reliability from a global network supported by 100+ Indexers. -- Receive lightning-fast support for subgraphs 24/7, with an on-call engineering team. +- Receive lightning-fast support for Subgraphs 24/7, with an on-call engineering team. ## Upgrade Your Subgraph to The Graph in 3 Easy Steps @@ -21,9 +21,9 @@ Quickly upgrade your subgraphs from any platform to [The Graph's decentralized n ### Create a Subgraph in Subgraph Studio - Go to [Subgraph Studio](https://thegraph.com/studio/) and connect your wallet. -- Click "Create a Subgraph". It is recommended to name the subgraph in Title Case: "Subgraph Name Chain Name". +- Click "Create a Subgraph". It is recommended to name the Subgraph in Title Case: "Subgraph Name Chain Name". -> Note: After publishing, the subgraph name will be editable but requires onchain action each time, so name it properly. +> Note: After publishing, the Subgraph name will be editable but requires onchain action each time, so name it properly. ### Install the Graph CLI⁠ @@ -31,13 +31,13 @@ You must have [Node.js](https://nodejs.org/) and a package manager of your choic On your local machine, run the following command: -Using [npm](https://www.npmjs.com/): +使用[npm](https://www.npmjs.com/): ```sh npm install -g @graphprotocol/graph-cli@latest ``` -Use the following command to create a subgraph in Studio using the CLI: +Use the following command to create a Subgraph in Studio using the CLI: ```sh graph init --product subgraph-studio @@ -53,7 +53,7 @@ graph auth ## 2. Deploy Your Subgraph to Studio -If you have your source code, you can easily deploy it to Studio. If you don't have it, here's a quick way to deploy your subgraph. +If you have your source code, you can easily deploy it to Studio. If you don't have it, here's a quick way to deploy your Subgraph. In The Graph CLI, run the following command: @@ -62,7 +62,7 @@ graph deploy --ipfs-hash ``` -> **Note:** Every subgraph has an IPFS hash (Deployment ID), which looks like this: "Qmasdfad...". To deploy simply use this **IPFS hash**. You’ll be prompted to enter a version (e.g., v0.0.1). +> **Note:** Every Subgraph has an IPFS hash (Deployment ID), which looks like this: "Qmasdfad...". To deploy simply use this **IPFS hash**. You’ll be prompted to enter a version (e.g., v0.0.1). ## 3. Publish Your Subgraph to The Graph Network @@ -70,17 +70,17 @@ graph deploy --ipfs-hash ### Query Your Subgraph -> To attract about 3 indexers to query your subgraph, it’s recommended to curate at least 3,000 GRT. To learn more about curating, check out [Curating](/resources/roles/curating/) on The Graph. +> To attract about 3 indexers to query your Subgraph, it’s recommended to curate at least 3,000 GRT. To learn more about curating, check out [Curating](/resources/roles/curating/) on The Graph. -You can start [querying](/subgraphs/querying/introduction/) any subgraph by sending a GraphQL query into the subgraph’s query URL endpoint, which is located at the top of its Explorer page in Subgraph Studio. +You can start [querying](/subgraphs/querying/introduction/) any Subgraph by sending a GraphQL query into the Subgraph’s query URL endpoint, which is located at the top of its Explorer page in Subgraph Studio. #### 示例 -[CryptoPunks Ethereum subgraph](https://thegraph.com/explorer/subgraphs/HdVdERFUe8h61vm2fDyycHgxjsde5PbB832NHgJfZNqK) by Messari: +[CryptoPunks Ethereum Subgraph](https://thegraph.com/explorer/subgraphs/HdVdERFUe8h61vm2fDyycHgxjsde5PbB832NHgJfZNqK) by Messari: ![Query URL](/img/cryptopunks-screenshot-transfer.png) -The query URL for this subgraph is: +The query URL for this Subgraph is: ```sh https://gateway-arbitrum.network.thegraph.com/api/`**your-own-api-key**`/subgraphs/id/HdVdERFUe8h61vm2fDyycgxjsde5PbB832NHgJfZNqK @@ -96,9 +96,9 @@ You can create API Keys in Subgraph Studio under the “API Keys” menu at the ### Monitor Subgraph Status -Once you upgrade, you can access and manage your subgraphs in [Subgraph Studio](https://thegraph.com/studio/) and explore all subgraphs in [The Graph Explorer](https://thegraph.com/networks/). +Once you upgrade, you can access and manage your Subgraphs in [Subgraph Studio](https://thegraph.com/studio/) and explore all Subgraphs in [The Graph Explorer](https://thegraph.com/networks/). -### 其他资源 +### Additional Resources -- To quickly create and publish a new subgraph, check out the [Quick Start](/subgraphs/quick-start/). -- To explore all the ways you can optimize and customize your subgraph for a better performance, read more about [creating a subgraph here](/developing/creating-a-subgraph/). +- To quickly create and publish a new Subgraph, check out the [Quick Start](/subgraphs/quick-start/). +- To explore all the ways you can optimize and customize your Subgraph for a better performance, read more about [creating a Subgraph here](/developing/creating-a-subgraph/). From ff987520021034f1aaa33fd83a7935250dac8c2b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:21:13 -0500 Subject: [PATCH 0731/1789] New translations transfer-to-the-graph.mdx (Urdu (Pakistan)) --- .../cookbook/transfer-to-the-graph.mdx | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/website/src/pages/ur/subgraphs/cookbook/transfer-to-the-graph.mdx b/website/src/pages/ur/subgraphs/cookbook/transfer-to-the-graph.mdx index 43fd50c14672..b63b2b15033e 100644 --- a/website/src/pages/ur/subgraphs/cookbook/transfer-to-the-graph.mdx +++ b/website/src/pages/ur/subgraphs/cookbook/transfer-to-the-graph.mdx @@ -2,13 +2,13 @@ title: Tranfer to The Graph --- -Quickly upgrade your subgraphs from any platform to [The Graph's decentralized network](https://thegraph.com/networks/). +Quickly upgrade your Subgraphs from any platform to [The Graph's decentralized network](https://thegraph.com/networks/). ## Benefits of Switching to The Graph -- Use the same subgraph that your apps already use with zero-downtime migration. +- Use the same Subgraph that your apps already use with zero-downtime migration. - Increase reliability from a global network supported by 100+ Indexers. -- Receive lightning-fast support for subgraphs 24/7, with an on-call engineering team. +- Receive lightning-fast support for Subgraphs 24/7, with an on-call engineering team. ## Upgrade Your Subgraph to The Graph in 3 Easy Steps @@ -21,9 +21,9 @@ Quickly upgrade your subgraphs from any platform to [The Graph's decentralized n ### Create a Subgraph in Subgraph Studio - Go to [Subgraph Studio](https://thegraph.com/studio/) and connect your wallet. -- Click "Create a Subgraph". It is recommended to name the subgraph in Title Case: "Subgraph Name Chain Name". +- Click "Create a Subgraph". It is recommended to name the Subgraph in Title Case: "Subgraph Name Chain Name". -> Note: After publishing, the subgraph name will be editable but requires onchain action each time, so name it properly. +> Note: After publishing, the Subgraph name will be editable but requires onchain action each time, so name it properly. ### Install the Graph CLI⁠ @@ -37,7 +37,7 @@ On your local machine, run the following command: npm install -g @graphprotocol/graph-cli@latest ``` -Use the following command to create a subgraph in Studio using the CLI: +Use the following command to create a Subgraph in Studio using the CLI: ```sh graph init --product subgraph-studio @@ -53,7 +53,7 @@ graph auth ## 2. Deploy Your Subgraph to Studio -If you have your source code, you can easily deploy it to Studio. If you don't have it, here's a quick way to deploy your subgraph. +If you have your source code, you can easily deploy it to Studio. If you don't have it, here's a quick way to deploy your Subgraph. In The Graph CLI, run the following command: @@ -62,7 +62,7 @@ graph deploy --ipfs-hash ``` -> **Note:** Every subgraph has an IPFS hash (Deployment ID), which looks like this: "Qmasdfad...". To deploy simply use this **IPFS hash**. You’ll be prompted to enter a version (e.g., v0.0.1). +> **Note:** Every Subgraph has an IPFS hash (Deployment ID), which looks like this: "Qmasdfad...". To deploy simply use this **IPFS hash**. You’ll be prompted to enter a version (e.g., v0.0.1). ## 3. Publish Your Subgraph to The Graph Network @@ -70,17 +70,17 @@ graph deploy --ipfs-hash ### Query Your Subgraph -> To attract about 3 indexers to query your subgraph, it’s recommended to curate at least 3,000 GRT. To learn more about curating, check out [Curating](/resources/roles/curating/) on The Graph. +> To attract about 3 indexers to query your Subgraph, it’s recommended to curate at least 3,000 GRT. To learn more about curating, check out [Curating](/resources/roles/curating/) on The Graph. -You can start [querying](/subgraphs/querying/introduction/) any subgraph by sending a GraphQL query into the subgraph’s query URL endpoint, which is located at the top of its Explorer page in Subgraph Studio. +You can start [querying](/subgraphs/querying/introduction/) any Subgraph by sending a GraphQL query into the Subgraph’s query URL endpoint, which is located at the top of its Explorer page in Subgraph Studio. #### مثال -[CryptoPunks Ethereum subgraph](https://thegraph.com/explorer/subgraphs/HdVdERFUe8h61vm2fDyycHgxjsde5PbB832NHgJfZNqK) by Messari: +[CryptoPunks Ethereum Subgraph](https://thegraph.com/explorer/subgraphs/HdVdERFUe8h61vm2fDyycHgxjsde5PbB832NHgJfZNqK) by Messari: ![Query URL](/img/cryptopunks-screenshot-transfer.png) -The query URL for this subgraph is: +The query URL for this Subgraph is: ```sh https://gateway-arbitrum.network.thegraph.com/api/`**your-own-api-key**`/subgraphs/id/HdVdERFUe8h61vm2fDyycgxjsde5PbB832NHgJfZNqK @@ -96,9 +96,9 @@ You can create API Keys in Subgraph Studio under the “API Keys” menu at the ### Monitor Subgraph Status -Once you upgrade, you can access and manage your subgraphs in [Subgraph Studio](https://thegraph.com/studio/) and explore all subgraphs in [The Graph Explorer](https://thegraph.com/networks/). +Once you upgrade, you can access and manage your Subgraphs in [Subgraph Studio](https://thegraph.com/studio/) and explore all Subgraphs in [The Graph Explorer](https://thegraph.com/networks/). ### اضافی وسائل -- To quickly create and publish a new subgraph, check out the [Quick Start](/subgraphs/quick-start/). -- To explore all the ways you can optimize and customize your subgraph for a better performance, read more about [creating a subgraph here](/developing/creating-a-subgraph/). +- To quickly create and publish a new Subgraph, check out the [Quick Start](/subgraphs/quick-start/). +- To explore all the ways you can optimize and customize your Subgraph for a better performance, read more about [creating a Subgraph here](/developing/creating-a-subgraph/). From af1e42f7bc52026e5d2f3e8bbef80dac36bdb758 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:21:14 -0500 Subject: [PATCH 0732/1789] New translations transfer-to-the-graph.mdx (Vietnamese) --- .../cookbook/transfer-to-the-graph.mdx | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/website/src/pages/vi/subgraphs/cookbook/transfer-to-the-graph.mdx b/website/src/pages/vi/subgraphs/cookbook/transfer-to-the-graph.mdx index 78493fe55b06..6fc96df5b83b 100644 --- a/website/src/pages/vi/subgraphs/cookbook/transfer-to-the-graph.mdx +++ b/website/src/pages/vi/subgraphs/cookbook/transfer-to-the-graph.mdx @@ -2,13 +2,13 @@ title: Tranfer to The Graph --- -Quickly upgrade your subgraphs from any platform to [The Graph's decentralized network](https://thegraph.com/networks/). +Quickly upgrade your Subgraphs from any platform to [The Graph's decentralized network](https://thegraph.com/networks/). ## Benefits of Switching to The Graph -- Use the same subgraph that your apps already use with zero-downtime migration. +- Use the same Subgraph that your apps already use with zero-downtime migration. - Increase reliability from a global network supported by 100+ Indexers. -- Receive lightning-fast support for subgraphs 24/7, with an on-call engineering team. +- Receive lightning-fast support for Subgraphs 24/7, with an on-call engineering team. ## Upgrade Your Subgraph to The Graph in 3 Easy Steps @@ -21,9 +21,9 @@ Quickly upgrade your subgraphs from any platform to [The Graph's decentralized n ### Create a Subgraph in Subgraph Studio - Go to [Subgraph Studio](https://thegraph.com/studio/) and connect your wallet. -- Click "Create a Subgraph". It is recommended to name the subgraph in Title Case: "Subgraph Name Chain Name". +- Click "Create a Subgraph". It is recommended to name the Subgraph in Title Case: "Subgraph Name Chain Name". -> Note: After publishing, the subgraph name will be editable but requires onchain action each time, so name it properly. +> Note: After publishing, the Subgraph name will be editable but requires onchain action each time, so name it properly. ### Install the Graph CLI⁠ @@ -37,7 +37,7 @@ Using [npm](https://www.npmjs.com/): npm install -g @graphprotocol/graph-cli@latest ``` -Use the following command to create a subgraph in Studio using the CLI: +Use the following command to create a Subgraph in Studio using the CLI: ```sh graph init --product subgraph-studio @@ -53,7 +53,7 @@ graph auth ## 2. Deploy Your Subgraph to Studio -If you have your source code, you can easily deploy it to Studio. If you don't have it, here's a quick way to deploy your subgraph. +If you have your source code, you can easily deploy it to Studio. If you don't have it, here's a quick way to deploy your Subgraph. In The Graph CLI, run the following command: @@ -62,7 +62,7 @@ graph deploy --ipfs-hash ``` -> **Note:** Every subgraph has an IPFS hash (Deployment ID), which looks like this: "Qmasdfad...". To deploy simply use this **IPFS hash**. You’ll be prompted to enter a version (e.g., v0.0.1). +> **Note:** Every Subgraph has an IPFS hash (Deployment ID), which looks like this: "Qmasdfad...". To deploy simply use this **IPFS hash**. You’ll be prompted to enter a version (e.g., v0.0.1). ## 3. Publish Your Subgraph to The Graph Network @@ -70,17 +70,17 @@ graph deploy --ipfs-hash ### Query Your Subgraph -> To attract about 3 indexers to query your subgraph, it’s recommended to curate at least 3,000 GRT. To learn more about curating, check out [Curating](/resources/roles/curating/) on The Graph. +> To attract about 3 indexers to query your Subgraph, it’s recommended to curate at least 3,000 GRT. To learn more about curating, check out [Curating](/resources/roles/curating/) on The Graph. -You can start [querying](/subgraphs/querying/introduction/) any subgraph by sending a GraphQL query into the subgraph’s query URL endpoint, which is located at the top of its Explorer page in Subgraph Studio. +You can start [querying](/subgraphs/querying/introduction/) any Subgraph by sending a GraphQL query into the Subgraph’s query URL endpoint, which is located at the top of its Explorer page in Subgraph Studio. #### Ví dụ -[CryptoPunks Ethereum subgraph](https://thegraph.com/explorer/subgraphs/HdVdERFUe8h61vm2fDyycHgxjsde5PbB832NHgJfZNqK) by Messari: +[CryptoPunks Ethereum Subgraph](https://thegraph.com/explorer/subgraphs/HdVdERFUe8h61vm2fDyycHgxjsde5PbB832NHgJfZNqK) by Messari: ![Query URL](/img/cryptopunks-screenshot-transfer.png) -The query URL for this subgraph is: +The query URL for this Subgraph is: ```sh https://gateway-arbitrum.network.thegraph.com/api/`**your-own-api-key**`/subgraphs/id/HdVdERFUe8h61vm2fDyycgxjsde5PbB832NHgJfZNqK @@ -96,9 +96,9 @@ You can create API Keys in Subgraph Studio under the “API Keys” menu at the ### Monitor Subgraph Status -Once you upgrade, you can access and manage your subgraphs in [Subgraph Studio](https://thegraph.com/studio/) and explore all subgraphs in [The Graph Explorer](https://thegraph.com/networks/). +Once you upgrade, you can access and manage your Subgraphs in [Subgraph Studio](https://thegraph.com/studio/) and explore all Subgraphs in [The Graph Explorer](https://thegraph.com/networks/). ### Additional Resources -- To quickly create and publish a new subgraph, check out the [Quick Start](/subgraphs/quick-start/). -- To explore all the ways you can optimize and customize your subgraph for a better performance, read more about [creating a subgraph here](/developing/creating-a-subgraph/). +- To quickly create and publish a new Subgraph, check out the [Quick Start](/subgraphs/quick-start/). +- To explore all the ways you can optimize and customize your Subgraph for a better performance, read more about [creating a Subgraph here](/developing/creating-a-subgraph/). From b7b5add5db23b4b7de59e42a19542ab57ad5851f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:21:15 -0500 Subject: [PATCH 0733/1789] New translations transfer-to-the-graph.mdx (Marathi) --- .../cookbook/transfer-to-the-graph.mdx | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/website/src/pages/mr/subgraphs/cookbook/transfer-to-the-graph.mdx b/website/src/pages/mr/subgraphs/cookbook/transfer-to-the-graph.mdx index d31f9d8864b5..cd1570902483 100644 --- a/website/src/pages/mr/subgraphs/cookbook/transfer-to-the-graph.mdx +++ b/website/src/pages/mr/subgraphs/cookbook/transfer-to-the-graph.mdx @@ -2,13 +2,13 @@ title: Tranfer to The Graph --- -Quickly upgrade your subgraphs from any platform to [The Graph's decentralized network](https://thegraph.com/networks/). +Quickly upgrade your Subgraphs from any platform to [The Graph's decentralized network](https://thegraph.com/networks/). ## Benefits of Switching to The Graph -- Use the same subgraph that your apps already use with zero-downtime migration. +- Use the same Subgraph that your apps already use with zero-downtime migration. - Increase reliability from a global network supported by 100+ Indexers. -- Receive lightning-fast support for subgraphs 24/7, with an on-call engineering team. +- Receive lightning-fast support for Subgraphs 24/7, with an on-call engineering team. ## Upgrade Your Subgraph to The Graph in 3 Easy Steps @@ -21,9 +21,9 @@ Quickly upgrade your subgraphs from any platform to [The Graph's decentralized n ### Create a Subgraph in Subgraph Studio - Go to [Subgraph Studio](https://thegraph.com/studio/) and connect your wallet. -- Click "Create a Subgraph". It is recommended to name the subgraph in Title Case: "Subgraph Name Chain Name". +- Click "Create a Subgraph". It is recommended to name the Subgraph in Title Case: "Subgraph Name Chain Name". -> Note: After publishing, the subgraph name will be editable but requires onchain action each time, so name it properly. +> Note: After publishing, the Subgraph name will be editable but requires onchain action each time, so name it properly. ### Install the Graph CLI⁠ @@ -37,7 +37,7 @@ Using [npm](https://www.npmjs.com/): npm install -g @graphprotocol/graph-cli@latest ``` -Use the following command to create a subgraph in Studio using the CLI: +Use the following command to create a Subgraph in Studio using the CLI: ```sh graph init --product subgraph-studio @@ -53,7 +53,7 @@ graph auth ## 2. Deploy Your Subgraph to Studio -If you have your source code, you can easily deploy it to Studio. If you don't have it, here's a quick way to deploy your subgraph. +If you have your source code, you can easily deploy it to Studio. If you don't have it, here's a quick way to deploy your Subgraph. In The Graph CLI, run the following command: @@ -62,7 +62,7 @@ graph deploy --ipfs-hash ``` -> **Note:** Every subgraph has an IPFS hash (Deployment ID), which looks like this: "Qmasdfad...". To deploy simply use this **IPFS hash**. You’ll be prompted to enter a version (e.g., v0.0.1). +> **Note:** Every Subgraph has an IPFS hash (Deployment ID), which looks like this: "Qmasdfad...". To deploy simply use this **IPFS hash**. You’ll be prompted to enter a version (e.g., v0.0.1). ## 3. Publish Your Subgraph to The Graph Network @@ -70,17 +70,17 @@ graph deploy --ipfs-hash ### Query Your Subgraph -> To attract about 3 indexers to query your subgraph, it’s recommended to curate at least 3,000 GRT. To learn more about curating, check out [Curating](/resources/roles/curating/) on The Graph. +> To attract about 3 indexers to query your Subgraph, it’s recommended to curate at least 3,000 GRT. To learn more about curating, check out [Curating](/resources/roles/curating/) on The Graph. -You can start [querying](/subgraphs/querying/introduction/) any subgraph by sending a GraphQL query into the subgraph’s query URL endpoint, which is located at the top of its Explorer page in Subgraph Studio. +You can start [querying](/subgraphs/querying/introduction/) any Subgraph by sending a GraphQL query into the Subgraph’s query URL endpoint, which is located at the top of its Explorer page in Subgraph Studio. #### उदाहरण -[CryptoPunks Ethereum subgraph](https://thegraph.com/explorer/subgraphs/HdVdERFUe8h61vm2fDyycHgxjsde5PbB832NHgJfZNqK) by Messari: +[CryptoPunks Ethereum Subgraph](https://thegraph.com/explorer/subgraphs/HdVdERFUe8h61vm2fDyycHgxjsde5PbB832NHgJfZNqK) by Messari: ![Query URL](/img/cryptopunks-screenshot-transfer.png) -The query URL for this subgraph is: +The query URL for this Subgraph is: ```sh https://gateway-arbitrum.network.thegraph.com/api/`**your-own-api-key**`/subgraphs/id/HdVdERFUe8h61vm2fDyycgxjsde5PbB832NHgJfZNqK @@ -96,9 +96,9 @@ You can create API Keys in Subgraph Studio under the “API Keys” menu at the ### Monitor Subgraph Status -Once you upgrade, you can access and manage your subgraphs in [Subgraph Studio](https://thegraph.com/studio/) and explore all subgraphs in [The Graph Explorer](https://thegraph.com/networks/). +Once you upgrade, you can access and manage your Subgraphs in [Subgraph Studio](https://thegraph.com/studio/) and explore all Subgraphs in [The Graph Explorer](https://thegraph.com/networks/). ### अतिरिक्त संसाधने -- To quickly create and publish a new subgraph, check out the [Quick Start](/subgraphs/quick-start/). -- To explore all the ways you can optimize and customize your subgraph for a better performance, read more about [creating a subgraph here](/developing/creating-a-subgraph/). +- To quickly create and publish a new Subgraph, check out the [Quick Start](/subgraphs/quick-start/). +- To explore all the ways you can optimize and customize your Subgraph for a better performance, read more about [creating a Subgraph here](/developing/creating-a-subgraph/). From 919c9e6b00831905ce531e318d8b7d0839b715c6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:21:16 -0500 Subject: [PATCH 0734/1789] New translations transfer-to-the-graph.mdx (Hindi) --- .../cookbook/transfer-to-the-graph.mdx | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/website/src/pages/hi/subgraphs/cookbook/transfer-to-the-graph.mdx b/website/src/pages/hi/subgraphs/cookbook/transfer-to-the-graph.mdx index ae5023b492a4..fa830f4adaa8 100644 --- a/website/src/pages/hi/subgraphs/cookbook/transfer-to-the-graph.mdx +++ b/website/src/pages/hi/subgraphs/cookbook/transfer-to-the-graph.mdx @@ -2,13 +2,13 @@ title: The Graph पर ट्रांसफर करें --- -अपने subgraphs को किसी भी प्लेटफ़ॉर्म से The Graph's decentralized network(https://thegraph.com/networks/) में जल्दी से अपग्रेड करें। +Quickly upgrade your Subgraphs from any platform to [The Graph's decentralized network](https://thegraph.com/networks/). ## The Graph पर स्विच करने के लाभ -- आपके ऐप्स द्वारा पहले से उपयोग किए जा रहे वही subgraph को बिना किसी डाउनटाइम के माइग्रेशन के लिए उपयोग करें। +- Use the same Subgraph that your apps already use with zero-downtime migration. - 100+ Indexers द्वारा समर्थित एक वैश्विक नेटवर्क से विश्वसनीयता बढ़ाएं। -- सबग्राफ के लिए 24/7 तेज़ और तुरंत समर्थन प्राप्त करें, एक ऑन-कॉल इंजीनियरिंग टीम के साथ। +- Receive lightning-fast support for Subgraphs 24/7, with an on-call engineering team. ## अपने Subgraph को The Graph में 3 आसान कदमों में अपग्रेड करें @@ -21,9 +21,9 @@ title: The Graph पर ट्रांसफर करें ### सबग्राफ बनाएँ Subgraph Studio में - [Subgraph Studio](https://thegraph.com/studio/) पर जाएँ और अपने वॉलेट को कनेक्ट करें। -- "एक सबग्राफ बनाएं" पर क्लिक करें। सबग्राफ का नाम टाइटल केस में रखनाrecommended है: "सबग्राफ नाम चेन नाम"। +- Click "Create a Subgraph". It is recommended to name the Subgraph in Title Case: "Subgraph Name Chain Name". -> Note: After publishing, the subgraph name will be editable but requires onchain action each time, so name it properly. +> Note: After publishing, the Subgraph name will be editable but requires onchain action each time, so name it properly. ### Graph CLI स्थापित करें @@ -37,7 +37,7 @@ Using [npm](https://www.npmjs.com/): npm install -g @graphprotocol/graph-cli@latest ``` -इस कमांड का उपयोग करें और CLI का उपयोग करके Studio में एक subgraph बनाएँ: +Use the following command to create a Subgraph in Studio using the CLI: ```sh graph init --product subgraph-studio @@ -53,7 +53,7 @@ graph auth ## 2. अपने Subgraph को Studio पर डिप्लॉय करें -यदि आपके पास अपना सोर्स कोड है, तो आप इसे आसानी से Studio में डिप्लॉय कर सकते हैं। यदि आपके पास यह नहीं है, तो यहां एक त्वरित तरीका है अपनी subgraph को डिप्लॉय करने का। +If you have your source code, you can easily deploy it to Studio. If you don't have it, here's a quick way to deploy your Subgraph. The Graph CLI में, निम्नलिखित कमांड चलाएँ: @@ -62,7 +62,7 @@ graph deploy --ipfs-hash ``` -> नोट: प्रत्येक subgraph का एक IPFS हैश (Deployment ID) होता है, जो इस प्रकार दिखता है: "Qmasdfad...". बस इसे deploy करने के लिए इस IPFS हैश का उपयोग करें। आपको एक संस्करण दर्ज करने के लिए कहा जाएगा (जैसे, v0.0.1)। +> **Note:** Every Subgraph has an IPFS hash (Deployment ID), which looks like this: "Qmasdfad...". To deploy simply use this **IPFS hash**. You’ll be prompted to enter a version (e.g., v0.0.1). ## 3. अपने Subgraph को The Graph Network पर प्रकाशित करें @@ -70,17 +70,17 @@ graph deploy --ipfs-hash ### अपने Subgraph को क्वेरी करें -> To attract about 3 indexers to query your subgraph, it’s recommended to curate at least 3,000 GRT. To learn more about curating, check out [Curating](/resources/roles/curating/) on The Graph. +> To attract about 3 indexers to query your Subgraph, it’s recommended to curate at least 3,000 GRT. To learn more about curating, check out [Curating](/resources/roles/curating/) on The Graph. -You can start [querying](/subgraphs/querying/introduction/) any subgraph by sending a GraphQL query into the subgraph’s query URL endpoint, which is located at the top of its Explorer page in Subgraph Studio. +You can start [querying](/subgraphs/querying/introduction/) any Subgraph by sending a GraphQL query into the Subgraph’s query URL endpoint, which is located at the top of its Explorer page in Subgraph Studio. #### उदाहरण -[CryptoPunks Ethereum subgraph](https://thegraph.com/explorer/subgraphs/HdVdERFUe8h61vm2fDyycHgxjsde5PbB832NHgJfZNqK) by Messari: +[CryptoPunks Ethereum Subgraph](https://thegraph.com/explorer/subgraphs/HdVdERFUe8h61vm2fDyycHgxjsde5PbB832NHgJfZNqK) by Messari: ![Query URL](/img/cryptopunks-screenshot-transfer.png) -इस subgraph का क्वेरी URL है: +The query URL for this Subgraph is: ```sh https://gateway-arbitrum.network.thegraph.com/api/`**your-own-api-key**`/subgraphs/id/HdVdERFUe8h61vm2fDyycgxjsde5PbB832NHgJfZNqK @@ -96,9 +96,9 @@ https://gateway-arbitrum.network.thegraph.com/api/`**your-own-api-key**`/subgrap ### सबग्राफ की स्थिति की निगरानी करें -एक बार जब आप अपग्रेड करते हैं, तो आप Subgraph Studio(https://thegraph.com/studio/) में अपने सबग्राफ्स को एक्सेस और प्रबंधित कर सकते हैं और The Graph Explorer(https://thegraph.com/networks/) में सभी सबग्राफ्स को एक्सप्लोर कर सकते हैं। +Once you upgrade, you can access and manage your Subgraphs in [Subgraph Studio](https://thegraph.com/studio/) and explore all Subgraphs in [The Graph Explorer](https://thegraph.com/networks/). ### Additional Resources -- To quickly create and publish a new subgraph, check out the [Quick Start](/subgraphs/quick-start/). -- आप अपने subgraph के प्रदर्शन को बेहतर बनाने के लिए इसे अनुकूलित और कस्टमाइज़ करने के सभी तरीकों का पता लगाने के लिए, creating a subgraph here(/developing/creating-a-subgraph/) पर और पढ़ें। +- To quickly create and publish a new Subgraph, check out the [Quick Start](/subgraphs/quick-start/). +- To explore all the ways you can optimize and customize your Subgraph for a better performance, read more about [creating a Subgraph here](/developing/creating-a-subgraph/). From c4ca4bf2427f9d3cb00518a9ddc8478e11b17f7c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:21:17 -0500 Subject: [PATCH 0735/1789] New translations transfer-to-the-graph.mdx (Swahili) --- .../cookbook/transfer-to-the-graph.mdx | 104 ++++++++++++++++++ 1 file changed, 104 insertions(+) create mode 100644 website/src/pages/sw/subgraphs/cookbook/transfer-to-the-graph.mdx diff --git a/website/src/pages/sw/subgraphs/cookbook/transfer-to-the-graph.mdx b/website/src/pages/sw/subgraphs/cookbook/transfer-to-the-graph.mdx new file mode 100644 index 000000000000..0ac02c8234a4 --- /dev/null +++ b/website/src/pages/sw/subgraphs/cookbook/transfer-to-the-graph.mdx @@ -0,0 +1,104 @@ +--- +title: Tranfer to The Graph +--- + +Quickly upgrade your Subgraphs from any platform to [The Graph's decentralized network](https://thegraph.com/networks/). + +## Benefits of Switching to The Graph + +- Use the same Subgraph that your apps already use with zero-downtime migration. +- Increase reliability from a global network supported by 100+ Indexers. +- Receive lightning-fast support for Subgraphs 24/7, with an on-call engineering team. + +## Upgrade Your Subgraph to The Graph in 3 Easy Steps + +1. [Set Up Your Studio Environment](/subgraphs/cookbook/transfer-to-the-graph/#1-set-up-your-studio-environment) +2. [Deploy Your Subgraph to Studio](/subgraphs/cookbook/transfer-to-the-graph/#2-deploy-your-subgraph-to-studio) +3. [Publish to The Graph Network](/subgraphs/cookbook/transfer-to-the-graph/#publish-your-subgraph-to-the-graphs-decentralized-network) + +## 1. Set Up Your Studio Environment + +### Create a Subgraph in Subgraph Studio + +- Go to [Subgraph Studio](https://thegraph.com/studio/) and connect your wallet. +- Click "Create a Subgraph". It is recommended to name the Subgraph in Title Case: "Subgraph Name Chain Name". + +> Note: After publishing, the Subgraph name will be editable but requires onchain action each time, so name it properly. + +### Install the Graph CLI⁠ + +You must have [Node.js](https://nodejs.org/) and a package manager of your choice (`npm` or `pnpm`) installed to use the Graph CLI. Check for the [most recent](https://github.com/graphprotocol/graph-tooling/releases?q=%40graphprotocol%2Fgraph-cli&expanded=true) CLI version. + +On your local machine, run the following command: + +Using [npm](https://www.npmjs.com/): + +```sh +npm install -g @graphprotocol/graph-cli@latest +``` + +Use the following command to create a Subgraph in Studio using the CLI: + +```sh +graph init --product subgraph-studio +``` + +### Authenticate Your Subgraph + +In The Graph CLI, use the auth command seen in Subgraph Studio: + +```sh +graph auth +``` + +## 2. Deploy Your Subgraph to Studio + +If you have your source code, you can easily deploy it to Studio. If you don't have it, here's a quick way to deploy your Subgraph. + +In The Graph CLI, run the following command: + +```sh +graph deploy --ipfs-hash + +``` + +> **Note:** Every Subgraph has an IPFS hash (Deployment ID), which looks like this: "Qmasdfad...". To deploy simply use this **IPFS hash**. You’ll be prompted to enter a version (e.g., v0.0.1). + +## 3. Publish Your Subgraph to The Graph Network + +![publish button](/img/publish-sub-transfer.png) + +### Query Your Subgraph + +> To attract about 3 indexers to query your Subgraph, it’s recommended to curate at least 3,000 GRT. To learn more about curating, check out [Curating](/resources/roles/curating/) on The Graph. + +You can start [querying](/subgraphs/querying/introduction/) any Subgraph by sending a GraphQL query into the Subgraph’s query URL endpoint, which is located at the top of its Explorer page in Subgraph Studio. + +#### Example + +[CryptoPunks Ethereum Subgraph](https://thegraph.com/explorer/subgraphs/HdVdERFUe8h61vm2fDyycHgxjsde5PbB832NHgJfZNqK) by Messari: + +![Query URL](/img/cryptopunks-screenshot-transfer.png) + +The query URL for this Subgraph is: + +```sh +https://gateway-arbitrum.network.thegraph.com/api/`**your-own-api-key**`/subgraphs/id/HdVdERFUe8h61vm2fDyycgxjsde5PbB832NHgJfZNqK +``` + +Now, you simply need to fill in **your own API Key** to start sending GraphQL queries to this endpoint. + +### Getting your own API Key + +You can create API Keys in Subgraph Studio under the “API Keys” menu at the top of the page: + +![API keys](/img/Api-keys-screenshot.png) + +### Monitor Subgraph Status + +Once you upgrade, you can access and manage your Subgraphs in [Subgraph Studio](https://thegraph.com/studio/) and explore all Subgraphs in [The Graph Explorer](https://thegraph.com/networks/). + +### Additional Resources + +- To quickly create and publish a new Subgraph, check out the [Quick Start](/subgraphs/quick-start/). +- To explore all the ways you can optimize and customize your Subgraph for a better performance, read more about [creating a Subgraph here](/developing/creating-a-subgraph/). From 7c8b3272b3ab9ee23ee4ee804d90f31c06e28f60 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:21:18 -0500 Subject: [PATCH 0736/1789] New translations assemblyscript-mappings.mdx (Romanian) --- .../creating/assemblyscript-mappings.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/ro/subgraphs/developing/creating/assemblyscript-mappings.mdx b/website/src/pages/ro/subgraphs/developing/creating/assemblyscript-mappings.mdx index 2ac894695fe1..cd81dc118f28 100644 --- a/website/src/pages/ro/subgraphs/developing/creating/assemblyscript-mappings.mdx +++ b/website/src/pages/ro/subgraphs/developing/creating/assemblyscript-mappings.mdx @@ -10,7 +10,7 @@ The mappings take data from a particular source and transform it into entities t For each event handler that is defined in `subgraph.yaml` under `mapping.eventHandlers`, create an exported function of the same name. Each handler must accept a single parameter called `event` with a type corresponding to the name of the event which is being handled. -In the example subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: +In the example Subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: ```javascript import { NewGravatar, UpdatedGravatar } from '../generated/Gravity/Gravity' @@ -72,7 +72,7 @@ If no value is set for a field in the new entity with the same ID, the field wil ## Code Generation -In order to make it easy and type-safe to work with smart contracts, events and entities, the Graph CLI can generate AssemblyScript types from the subgraph's GraphQL schema and the contract ABIs included in the data sources. +In order to make it easy and type-safe to work with smart contracts, events and entities, the Graph CLI can generate AssemblyScript types from the Subgraph's GraphQL schema and the contract ABIs included in the data sources. This is done with @@ -80,7 +80,7 @@ This is done with graph codegen [--output-dir ] [] ``` -but in most cases, subgraphs are already preconfigured via `package.json` to allow you to simply run one of the following to achieve the same: +but in most cases, Subgraphs are already preconfigured via `package.json` to allow you to simply run one of the following to achieve the same: ```sh # Yarn @@ -90,7 +90,7 @@ yarn codegen npm run codegen ``` -This will generate an AssemblyScript class for every smart contract in the ABI files mentioned in `subgraph.yaml`, allowing you to bind these contracts to specific addresses in the mappings and call read-only contract methods against the block being processed. It will also generate a class for every contract event to provide easy access to event parameters, as well as the block and transaction the event originated from. All of these types are written to `//.ts`. In the example subgraph, this would be `generated/Gravity/Gravity.ts`, allowing mappings to import these types with. +This will generate an AssemblyScript class for every smart contract in the ABI files mentioned in `subgraph.yaml`, allowing you to bind these contracts to specific addresses in the mappings and call read-only contract methods against the block being processed. It will also generate a class for every contract event to provide easy access to event parameters, as well as the block and transaction the event originated from. All of these types are written to `//.ts`. In the example Subgraph, this would be `generated/Gravity/Gravity.ts`, allowing mappings to import these types with. ```javascript import { @@ -102,12 +102,12 @@ import { } from '../generated/Gravity/Gravity' ``` -In addition to this, one class is generated for each entity type in the subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with +In addition to this, one class is generated for each entity type in the Subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with ```javascript import { Gravatar } from '../generated/schema' ``` -> **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the subgraph. +> **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the Subgraph. -Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. +Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your Subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. From cccf00738d903efac11fdfc3bbb38348e37f3dd4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:21:19 -0500 Subject: [PATCH 0737/1789] New translations assemblyscript-mappings.mdx (French) --- .../creating/assemblyscript-mappings.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/fr/subgraphs/developing/creating/assemblyscript-mappings.mdx b/website/src/pages/fr/subgraphs/developing/creating/assemblyscript-mappings.mdx index 7bb87fa69ab6..bc0596c824e8 100644 --- a/website/src/pages/fr/subgraphs/developing/creating/assemblyscript-mappings.mdx +++ b/website/src/pages/fr/subgraphs/developing/creating/assemblyscript-mappings.mdx @@ -10,7 +10,7 @@ Les mappages prennent des données d'une source particulière et les transformen Pour chaque gestionnaire d'événements défini dans `subgraph.yaml` sous `mapping.eventHandlers`, créez une fonction exportée du même nom. Chaque gestionnaire doit accepter un seul paramètre appelé `event` avec un type correspondant au nom de l'événement traité. -Dans le subgraph d'exemple, `src/mapping.ts` contient des gestionnaires pour les événements `NewGravatar` et `UpdatedGravatar`: +In the example Subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: ```javascript import { NewGravatar, UpdatedGravatar } from '../generated/Gravity/Gravity' @@ -72,7 +72,7 @@ Si aucune valeur n'est définie pour un champ de la nouvelle entité avec le mê ## Génération de code -Afin de faciliter et de sécuriser le travail avec les contrats intelligents, les événements et les entités, la CLI Graph peut générer des types AssemblyScript à partir du schéma GraphQL du subgraph et des ABI de contrat inclus dans les sources de données. +In order to make it easy and type-safe to work with smart contracts, events and entities, the Graph CLI can generate AssemblyScript types from the Subgraph's GraphQL schema and the contract ABIs included in the data sources. Cela se fait avec @@ -80,7 +80,7 @@ Cela se fait avec graph codegen [--output-dir ] [] ``` -mais dans la plupart des cas, les subgraphs sont déjà préconfigurés via `package.json` pour vous permettre d'exécuter simplement l'un des éléments suivants pour obtenir le même résultat : +but in most cases, Subgraphs are already preconfigured via `package.json` to allow you to simply run one of the following to achieve the same: ```sh # Yarn @@ -90,7 +90,7 @@ yarn codegen npm run codegen ``` -Cela va générer une classe AssemblyScript pour chaque contrat intelligent dans les fichiers ABI mentionnés dans `subgraph.yaml`, vous permettant de lier ces contrats à des adresses spécifiques dans les mappagess et d'appeler des méthodes de contrat en lecture seule sur le bloc en cours de traitement. Il génère également une classe pour chaque événement de contrat afin de fournir un accès facile aux paramètres de l'événement, ainsi qu'au bloc et à la transaction d'où provient l'événement. Tous ces types sont écrits dans `//.ts`. Dans l'exemple du subgraph, ce serait `generated/Gravity/Gravity.ts`, permettant aux mappages d'importer ces types avec. +This will generate an AssemblyScript class for every smart contract in the ABI files mentioned in `subgraph.yaml`, allowing you to bind these contracts to specific addresses in the mappings and call read-only contract methods against the block being processed. It will also generate a class for every contract event to provide easy access to event parameters, as well as the block and transaction the event originated from. All of these types are written to `//.ts`. In the example Subgraph, this would be `generated/Gravity/Gravity.ts`, allowing mappings to import these types with. ```javascript import { @@ -102,12 +102,12 @@ import { } from '../generated/Gravity/Gravity' ``` -En outre, une classe est générée pour chaque type d'entité dans le schéma GraphQL du subgraph. Ces classes fournissent un chargement sécurisé des entités, un accès en lecture et en écriture aux champs des entités ainsi qu'une méthode `save()` pour écrire les entités dans le store. Toutes les classes d'entités sont écrites dans le fichier `/schema.ts`, ce qui permet aux mappages de les importer avec la commande +In addition to this, one class is generated for each entity type in the Subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with ```javascript import { Gravatar } from '../generated/schema' ``` -> **Note:** La génération de code doit être exécutée à nouveau après chaque modification du schéma GraphQL ou des ABIs incluses dans le manifeste. Elle doit également être effectuée au moins une fois avant de construire ou de déployer le subgraphs. +> **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the Subgraph. -La génération de code ne vérifie pas votre code de mappage dans `src/mapping.ts`. Si vous souhaitez vérifier cela avant d'essayer de déployer votre subgraph sur Graph Explorer, vous pouvez exécuter `yarn build` et corriger les erreurs de syntaxe que le compilateur TypeScript pourrait trouver. +Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your Subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. From 6c413fc6e3b621424180bdcb3cf4cfa7a808f015 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:21:20 -0500 Subject: [PATCH 0738/1789] New translations assemblyscript-mappings.mdx (Spanish) --- .../creating/assemblyscript-mappings.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/es/subgraphs/developing/creating/assemblyscript-mappings.mdx b/website/src/pages/es/subgraphs/developing/creating/assemblyscript-mappings.mdx index 792a6521f82d..520914f913f6 100644 --- a/website/src/pages/es/subgraphs/developing/creating/assemblyscript-mappings.mdx +++ b/website/src/pages/es/subgraphs/developing/creating/assemblyscript-mappings.mdx @@ -10,7 +10,7 @@ The mappings take data from a particular source and transform it into entities t For each event handler that is defined in `subgraph.yaml` under `mapping.eventHandlers`, create an exported function of the same name. Each handler must accept a single parameter called `event` with a type corresponding to the name of the event which is being handled. -In the example subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: +In the example Subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: ```javascript import { NewGravatar, UpdatedGravatar } from '../generated/Gravity/Gravity' @@ -72,7 +72,7 @@ If no value is set for a field in the new entity with the same ID, the field wil ## Generación de código -Para que trabajar con contratos inteligentes, eventos y entidades sea fácil y seguro desde el punto de vista de los tipos, Graph CLI puede generar tipos AssemblyScript a partir del esquema GraphQL del subgrafo y de las ABIs de los contratos incluidas en las fuentes de datos. +In order to make it easy and type-safe to work with smart contracts, events and entities, the Graph CLI can generate AssemblyScript types from the Subgraph's GraphQL schema and the contract ABIs included in the data sources. Esto se hace con @@ -80,7 +80,7 @@ Esto se hace con graph codegen [--output-dir ] [] ``` -but in most cases, subgraphs are already preconfigured via `package.json` to allow you to simply run one of the following to achieve the same: +but in most cases, Subgraphs are already preconfigured via `package.json` to allow you to simply run one of the following to achieve the same: ```sh # Yarn @@ -90,7 +90,7 @@ yarn codegen npm run codegen ``` -This will generate an AssemblyScript class for every smart contract in the ABI files mentioned in `subgraph.yaml`, allowing you to bind these contracts to specific addresses in the mappings and call read-only contract methods against the block being processed. It will also generate a class for every contract event to provide easy access to event parameters, as well as the block and transaction the event originated from. All of these types are written to `//.ts`. In the example subgraph, this would be `generated/Gravity/Gravity.ts`, allowing mappings to import these types with. +This will generate an AssemblyScript class for every smart contract in the ABI files mentioned in `subgraph.yaml`, allowing you to bind these contracts to specific addresses in the mappings and call read-only contract methods against the block being processed. It will also generate a class for every contract event to provide easy access to event parameters, as well as the block and transaction the event originated from. All of these types are written to `//.ts`. In the example Subgraph, this would be `generated/Gravity/Gravity.ts`, allowing mappings to import these types with. ```javascript import { @@ -102,12 +102,12 @@ import { } from '../generated/Gravity/Gravity' ``` -In addition to this, one class is generated for each entity type in the subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with +In addition to this, one class is generated for each entity type in the Subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with ```javascript import { Gravatar } from '../generated/schema' ``` -> **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the subgraph. +> **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the Subgraph. -Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. +Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your Subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. From 69213bfb53ad3aec1c1053c319ef04341ddce5ac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:21:21 -0500 Subject: [PATCH 0739/1789] New translations assemblyscript-mappings.mdx (Arabic) --- .../creating/assemblyscript-mappings.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/ar/subgraphs/developing/creating/assemblyscript-mappings.mdx b/website/src/pages/ar/subgraphs/developing/creating/assemblyscript-mappings.mdx index 2518d7620204..3062fe900657 100644 --- a/website/src/pages/ar/subgraphs/developing/creating/assemblyscript-mappings.mdx +++ b/website/src/pages/ar/subgraphs/developing/creating/assemblyscript-mappings.mdx @@ -10,7 +10,7 @@ The mappings take data from a particular source and transform it into entities t For each event handler that is defined in `subgraph.yaml` under `mapping.eventHandlers`, create an exported function of the same name. Each handler must accept a single parameter called `event` with a type corresponding to the name of the event which is being handled. -In the example subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: +In the example Subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: ```javascript import { NewGravatar, UpdatedGravatar } from '../generated/Gravity/Gravity' @@ -72,7 +72,7 @@ If no value is set for a field in the new entity with the same ID, the field wil ## توليد الكود -In order to make it easy and type-safe to work with smart contracts, events and entities, the Graph CLI can generate AssemblyScript types from the subgraph's GraphQL schema and the contract ABIs included in the data sources. +In order to make it easy and type-safe to work with smart contracts, events and entities, the Graph CLI can generate AssemblyScript types from the Subgraph's GraphQL schema and the contract ABIs included in the data sources. This is done with @@ -80,7 +80,7 @@ This is done with graph codegen [--output-dir ] [] ``` -but in most cases, subgraphs are already preconfigured via `package.json` to allow you to simply run one of the following to achieve the same: +but in most cases, Subgraphs are already preconfigured via `package.json` to allow you to simply run one of the following to achieve the same: ```sh # Yarn @@ -90,7 +90,7 @@ yarn codegen npm run codegen ``` -This will generate an AssemblyScript class for every smart contract in the ABI files mentioned in `subgraph.yaml`, allowing you to bind these contracts to specific addresses in the mappings and call read-only contract methods against the block being processed. It will also generate a class for every contract event to provide easy access to event parameters, as well as the block and transaction the event originated from. All of these types are written to `//.ts`. In the example subgraph, this would be `generated/Gravity/Gravity.ts`, allowing mappings to import these types with. +This will generate an AssemblyScript class for every smart contract in the ABI files mentioned in `subgraph.yaml`, allowing you to bind these contracts to specific addresses in the mappings and call read-only contract methods against the block being processed. It will also generate a class for every contract event to provide easy access to event parameters, as well as the block and transaction the event originated from. All of these types are written to `//.ts`. In the example Subgraph, this would be `generated/Gravity/Gravity.ts`, allowing mappings to import these types with. ```javascript import { @@ -102,12 +102,12 @@ import { } from '../generated/Gravity/Gravity' ``` -In addition to this, one class is generated for each entity type in the subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with +In addition to this, one class is generated for each entity type in the Subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with ```javascript 'import { Gravatar } from '../generated/schema ``` -> **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the subgraph. +> **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the Subgraph. -Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. +Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your Subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. From 1e59a4283710f4217f5552627968b90653e3fa35 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:21:22 -0500 Subject: [PATCH 0740/1789] New translations assemblyscript-mappings.mdx (Czech) --- .../creating/assemblyscript-mappings.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/cs/subgraphs/developing/creating/assemblyscript-mappings.mdx b/website/src/pages/cs/subgraphs/developing/creating/assemblyscript-mappings.mdx index fad0d6ebaa1a..00fb7cbcf275 100644 --- a/website/src/pages/cs/subgraphs/developing/creating/assemblyscript-mappings.mdx +++ b/website/src/pages/cs/subgraphs/developing/creating/assemblyscript-mappings.mdx @@ -10,7 +10,7 @@ The mappings take data from a particular source and transform it into entities t For each event handler that is defined in `subgraph.yaml` under `mapping.eventHandlers`, create an exported function of the same name. Each handler must accept a single parameter called `event` with a type corresponding to the name of the event which is being handled. -In the example subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: +In the example Subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: ```javascript import { NewGravatar, UpdatedGravatar } from '../generated/Gravity/Gravity' @@ -72,7 +72,7 @@ Pokud není pro pole v nové entitě se stejným ID nastavena žádná hodnota, ## Generování kódu -Aby byla práce s inteligentními smlouvami, událostmi a entitami snadná a typově bezpečná, může Graf CLI generovat typy AssemblyScript ze schématu GraphQL podgrafu a ABI smluv obsažených ve zdrojích dat. +In order to make it easy and type-safe to work with smart contracts, events and entities, the Graph CLI can generate AssemblyScript types from the Subgraph's GraphQL schema and the contract ABIs included in the data sources. To se provádí pomocí @@ -80,7 +80,7 @@ To se provádí pomocí graph codegen [--output-dir ] [] ``` -but in most cases, subgraphs are already preconfigured via `package.json` to allow you to simply run one of the following to achieve the same: +but in most cases, Subgraphs are already preconfigured via `package.json` to allow you to simply run one of the following to achieve the same: ```sh # Yarn @@ -90,7 +90,7 @@ yarn codegen npm run codegen ``` -This will generate an AssemblyScript class for every smart contract in the ABI files mentioned in `subgraph.yaml`, allowing you to bind these contracts to specific addresses in the mappings and call read-only contract methods against the block being processed. It will also generate a class for every contract event to provide easy access to event parameters, as well as the block and transaction the event originated from. All of these types are written to `//.ts`. In the example subgraph, this would be `generated/Gravity/Gravity.ts`, allowing mappings to import these types with. +This will generate an AssemblyScript class for every smart contract in the ABI files mentioned in `subgraph.yaml`, allowing you to bind these contracts to specific addresses in the mappings and call read-only contract methods against the block being processed. It will also generate a class for every contract event to provide easy access to event parameters, as well as the block and transaction the event originated from. All of these types are written to `//.ts`. In the example Subgraph, this would be `generated/Gravity/Gravity.ts`, allowing mappings to import these types with. ```javascript import { @@ -102,12 +102,12 @@ import { } from '../generated/Gravity/Gravity' ``` -In addition to this, one class is generated for each entity type in the subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with +In addition to this, one class is generated for each entity type in the Subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with ```javascript import { Gravatar } from '../generated/schema' ``` -> **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the subgraph. +> **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the Subgraph. -Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. +Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your Subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. From 19d69da42adf3bee944aeeb3a0195c51b8c9b10b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:21:23 -0500 Subject: [PATCH 0741/1789] New translations assemblyscript-mappings.mdx (German) --- .../creating/assemblyscript-mappings.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/de/subgraphs/developing/creating/assemblyscript-mappings.mdx b/website/src/pages/de/subgraphs/developing/creating/assemblyscript-mappings.mdx index 4354181a33df..f687e4c8d85e 100644 --- a/website/src/pages/de/subgraphs/developing/creating/assemblyscript-mappings.mdx +++ b/website/src/pages/de/subgraphs/developing/creating/assemblyscript-mappings.mdx @@ -10,7 +10,7 @@ The mappings take data from a particular source and transform it into entities t For each event handler that is defined in `subgraph.yaml` under `mapping.eventHandlers`, create an exported function of the same name. Each handler must accept a single parameter called `event` with a type corresponding to the name of the event which is being handled. -In the example subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: +In the example Subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: ```javascript import { NewGravatar, UpdatedGravatar } from '../generated/Gravity/Gravity' @@ -72,7 +72,7 @@ If no value is set for a field in the new entity with the same ID, the field wil ## Code Generation -In order to make it easy and type-safe to work with smart contracts, events and entities, the Graph CLI can generate AssemblyScript types from the subgraph's GraphQL schema and the contract ABIs included in the data sources. +In order to make it easy and type-safe to work with smart contracts, events and entities, the Graph CLI can generate AssemblyScript types from the Subgraph's GraphQL schema and the contract ABIs included in the data sources. This is done with @@ -80,7 +80,7 @@ This is done with graph codegen [--output-dir ] [] ``` -but in most cases, subgraphs are already preconfigured via `package.json` to allow you to simply run one of the following to achieve the same: +but in most cases, Subgraphs are already preconfigured via `package.json` to allow you to simply run one of the following to achieve the same: ```sh # Yarn @@ -90,7 +90,7 @@ yarn codegen npm run codegen ``` -This will generate an AssemblyScript class for every smart contract in the ABI files mentioned in `subgraph.yaml`, allowing you to bind these contracts to specific addresses in the mappings and call read-only contract methods against the block being processed. It will also generate a class for every contract event to provide easy access to event parameters, as well as the block and transaction the event originated from. All of these types are written to `//.ts`. In the example subgraph, this would be `generated/Gravity/Gravity.ts`, allowing mappings to import these types with. +This will generate an AssemblyScript class for every smart contract in the ABI files mentioned in `subgraph.yaml`, allowing you to bind these contracts to specific addresses in the mappings and call read-only contract methods against the block being processed. It will also generate a class for every contract event to provide easy access to event parameters, as well as the block and transaction the event originated from. All of these types are written to `//.ts`. In the example Subgraph, this would be `generated/Gravity/Gravity.ts`, allowing mappings to import these types with. ```javascript import { @@ -102,12 +102,12 @@ import { } from '../generated/Gravity/Gravity' ``` -In addition to this, one class is generated for each entity type in the subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with +In addition to this, one class is generated for each entity type in the Subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with ```javascript import { Gravatar } from '../generated/schema' ``` -> **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the subgraph. +> **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the Subgraph. -Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. +Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your Subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. From dc8d3b8459469b9cc4ea858462b99e4599653420 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:21:24 -0500 Subject: [PATCH 0742/1789] New translations assemblyscript-mappings.mdx (Italian) --- .../creating/assemblyscript-mappings.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/it/subgraphs/developing/creating/assemblyscript-mappings.mdx b/website/src/pages/it/subgraphs/developing/creating/assemblyscript-mappings.mdx index 23271ae9c85c..8154b3d9555c 100644 --- a/website/src/pages/it/subgraphs/developing/creating/assemblyscript-mappings.mdx +++ b/website/src/pages/it/subgraphs/developing/creating/assemblyscript-mappings.mdx @@ -10,7 +10,7 @@ The mappings take data from a particular source and transform it into entities t For each event handler that is defined in `subgraph.yaml` under `mapping.eventHandlers`, create an exported function of the same name. Each handler must accept a single parameter called `event` with a type corresponding to the name of the event which is being handled. -In the example subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: +In the example Subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: ```javascript import { NewGravatar, UpdatedGravatar } from '../generated/Gravity/Gravity' @@ -72,7 +72,7 @@ If no value is set for a field in the new entity with the same ID, the field wil ## Generazione del codice -Per rendere semplice e sicuro il lavoro con gli smart contract, gli eventi e le entità, la Graph CLI può generare tipi AssemblyScript dallo schema GraphQL del subgraph e dagli ABI dei contratti inclusi nelle data source. +In order to make it easy and type-safe to work with smart contracts, events and entities, the Graph CLI can generate AssemblyScript types from the Subgraph's GraphQL schema and the contract ABIs included in the data sources. Questo viene fatto con @@ -80,7 +80,7 @@ Questo viene fatto con graph codegen [--output-dir ] [] ``` -but in most cases, subgraphs are already preconfigured via `package.json` to allow you to simply run one of the following to achieve the same: +but in most cases, Subgraphs are already preconfigured via `package.json` to allow you to simply run one of the following to achieve the same: ```sh # Yarn @@ -90,7 +90,7 @@ yarn codegen npm run codegen ``` -This will generate an AssemblyScript class for every smart contract in the ABI files mentioned in `subgraph.yaml`, allowing you to bind these contracts to specific addresses in the mappings and call read-only contract methods against the block being processed. It will also generate a class for every contract event to provide easy access to event parameters, as well as the block and transaction the event originated from. All of these types are written to `//.ts`. In the example subgraph, this would be `generated/Gravity/Gravity.ts`, allowing mappings to import these types with. +This will generate an AssemblyScript class for every smart contract in the ABI files mentioned in `subgraph.yaml`, allowing you to bind these contracts to specific addresses in the mappings and call read-only contract methods against the block being processed. It will also generate a class for every contract event to provide easy access to event parameters, as well as the block and transaction the event originated from. All of these types are written to `//.ts`. In the example Subgraph, this would be `generated/Gravity/Gravity.ts`, allowing mappings to import these types with. ```javascript import { @@ -102,12 +102,12 @@ import { } from '../generated/Gravity/Gravity' ``` -In addition to this, one class is generated for each entity type in the subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with +In addition to this, one class is generated for each entity type in the Subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with ```javascript import { Gravatar } from '../generated/schema' ``` -> **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the subgraph. +> **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the Subgraph. -Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. +Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your Subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. From 7123777dc346c1310b11a03120517a5daf53ea21 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:21:25 -0500 Subject: [PATCH 0743/1789] New translations assemblyscript-mappings.mdx (Japanese) --- .../creating/assemblyscript-mappings.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/ja/subgraphs/developing/creating/assemblyscript-mappings.mdx b/website/src/pages/ja/subgraphs/developing/creating/assemblyscript-mappings.mdx index 50b664c86f3b..e46466a45c92 100644 --- a/website/src/pages/ja/subgraphs/developing/creating/assemblyscript-mappings.mdx +++ b/website/src/pages/ja/subgraphs/developing/creating/assemblyscript-mappings.mdx @@ -10,7 +10,7 @@ The mappings take data from a particular source and transform it into entities t For each event handler that is defined in `subgraph.yaml` under `mapping.eventHandlers`, create an exported function of the same name. Each handler must accept a single parameter called `event` with a type corresponding to the name of the event which is being handled. -In the example subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: +In the example Subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: ```javascript import { NewGravatar, UpdatedGravatar } from '../generated/Gravity/Gravity' @@ -72,7 +72,7 @@ If no value is set for a field in the new entity with the same ID, the field wil ## コード生成 -スマートコントラクト、イベント、エンティティを簡単かつタイプセーフに扱うために、Graph CLIはサブグラフのGraphQLスキーマとデータソースに含まれるコントラクトABIからAssemblyScriptタイプを生成することができます。 +In order to make it easy and type-safe to work with smart contracts, events and entities, the Graph CLI can generate AssemblyScript types from the Subgraph's GraphQL schema and the contract ABIs included in the data sources. これを行うためには @@ -80,7 +80,7 @@ If no value is set for a field in the new entity with the same ID, the field wil graph codegen [--output-dir ] [] ``` -but in most cases, subgraphs are already preconfigured via `package.json` to allow you to simply run one of the following to achieve the same: +but in most cases, Subgraphs are already preconfigured via `package.json` to allow you to simply run one of the following to achieve the same: ```sh # Yarn @@ -90,7 +90,7 @@ yarn codegen npm run codegen ``` -This will generate an AssemblyScript class for every smart contract in the ABI files mentioned in `subgraph.yaml`, allowing you to bind these contracts to specific addresses in the mappings and call read-only contract methods against the block being processed. It will also generate a class for every contract event to provide easy access to event parameters, as well as the block and transaction the event originated from. All of these types are written to `//.ts`. In the example subgraph, this would be `generated/Gravity/Gravity.ts`, allowing mappings to import these types with. +This will generate an AssemblyScript class for every smart contract in the ABI files mentioned in `subgraph.yaml`, allowing you to bind these contracts to specific addresses in the mappings and call read-only contract methods against the block being processed. It will also generate a class for every contract event to provide easy access to event parameters, as well as the block and transaction the event originated from. All of these types are written to `//.ts`. In the example Subgraph, this would be `generated/Gravity/Gravity.ts`, allowing mappings to import these types with. ```javascript import { @@ -102,12 +102,12 @@ import { } from '../generated/Gravity/Gravity' ``` -In addition to this, one class is generated for each entity type in the subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with +In addition to this, one class is generated for each entity type in the Subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with ```javascript import { Gravatar } from '../generated/schema' ``` -> **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the subgraph. +> **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the Subgraph. -Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. +Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your Subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. From 5835826b848fc3a687f372eea8bae433e1f6c9cf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:21:26 -0500 Subject: [PATCH 0744/1789] New translations assemblyscript-mappings.mdx (Korean) --- .../creating/assemblyscript-mappings.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/ko/subgraphs/developing/creating/assemblyscript-mappings.mdx b/website/src/pages/ko/subgraphs/developing/creating/assemblyscript-mappings.mdx index 2ac894695fe1..cd81dc118f28 100644 --- a/website/src/pages/ko/subgraphs/developing/creating/assemblyscript-mappings.mdx +++ b/website/src/pages/ko/subgraphs/developing/creating/assemblyscript-mappings.mdx @@ -10,7 +10,7 @@ The mappings take data from a particular source and transform it into entities t For each event handler that is defined in `subgraph.yaml` under `mapping.eventHandlers`, create an exported function of the same name. Each handler must accept a single parameter called `event` with a type corresponding to the name of the event which is being handled. -In the example subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: +In the example Subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: ```javascript import { NewGravatar, UpdatedGravatar } from '../generated/Gravity/Gravity' @@ -72,7 +72,7 @@ If no value is set for a field in the new entity with the same ID, the field wil ## Code Generation -In order to make it easy and type-safe to work with smart contracts, events and entities, the Graph CLI can generate AssemblyScript types from the subgraph's GraphQL schema and the contract ABIs included in the data sources. +In order to make it easy and type-safe to work with smart contracts, events and entities, the Graph CLI can generate AssemblyScript types from the Subgraph's GraphQL schema and the contract ABIs included in the data sources. This is done with @@ -80,7 +80,7 @@ This is done with graph codegen [--output-dir ] [] ``` -but in most cases, subgraphs are already preconfigured via `package.json` to allow you to simply run one of the following to achieve the same: +but in most cases, Subgraphs are already preconfigured via `package.json` to allow you to simply run one of the following to achieve the same: ```sh # Yarn @@ -90,7 +90,7 @@ yarn codegen npm run codegen ``` -This will generate an AssemblyScript class for every smart contract in the ABI files mentioned in `subgraph.yaml`, allowing you to bind these contracts to specific addresses in the mappings and call read-only contract methods against the block being processed. It will also generate a class for every contract event to provide easy access to event parameters, as well as the block and transaction the event originated from. All of these types are written to `//.ts`. In the example subgraph, this would be `generated/Gravity/Gravity.ts`, allowing mappings to import these types with. +This will generate an AssemblyScript class for every smart contract in the ABI files mentioned in `subgraph.yaml`, allowing you to bind these contracts to specific addresses in the mappings and call read-only contract methods against the block being processed. It will also generate a class for every contract event to provide easy access to event parameters, as well as the block and transaction the event originated from. All of these types are written to `//.ts`. In the example Subgraph, this would be `generated/Gravity/Gravity.ts`, allowing mappings to import these types with. ```javascript import { @@ -102,12 +102,12 @@ import { } from '../generated/Gravity/Gravity' ``` -In addition to this, one class is generated for each entity type in the subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with +In addition to this, one class is generated for each entity type in the Subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with ```javascript import { Gravatar } from '../generated/schema' ``` -> **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the subgraph. +> **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the Subgraph. -Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. +Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your Subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. From b0a59e59efa5c51cbe1589f6dd9af0788ddca2d6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:21:27 -0500 Subject: [PATCH 0745/1789] New translations assemblyscript-mappings.mdx (Dutch) --- .../creating/assemblyscript-mappings.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/nl/subgraphs/developing/creating/assemblyscript-mappings.mdx b/website/src/pages/nl/subgraphs/developing/creating/assemblyscript-mappings.mdx index 2ac894695fe1..cd81dc118f28 100644 --- a/website/src/pages/nl/subgraphs/developing/creating/assemblyscript-mappings.mdx +++ b/website/src/pages/nl/subgraphs/developing/creating/assemblyscript-mappings.mdx @@ -10,7 +10,7 @@ The mappings take data from a particular source and transform it into entities t For each event handler that is defined in `subgraph.yaml` under `mapping.eventHandlers`, create an exported function of the same name. Each handler must accept a single parameter called `event` with a type corresponding to the name of the event which is being handled. -In the example subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: +In the example Subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: ```javascript import { NewGravatar, UpdatedGravatar } from '../generated/Gravity/Gravity' @@ -72,7 +72,7 @@ If no value is set for a field in the new entity with the same ID, the field wil ## Code Generation -In order to make it easy and type-safe to work with smart contracts, events and entities, the Graph CLI can generate AssemblyScript types from the subgraph's GraphQL schema and the contract ABIs included in the data sources. +In order to make it easy and type-safe to work with smart contracts, events and entities, the Graph CLI can generate AssemblyScript types from the Subgraph's GraphQL schema and the contract ABIs included in the data sources. This is done with @@ -80,7 +80,7 @@ This is done with graph codegen [--output-dir ] [] ``` -but in most cases, subgraphs are already preconfigured via `package.json` to allow you to simply run one of the following to achieve the same: +but in most cases, Subgraphs are already preconfigured via `package.json` to allow you to simply run one of the following to achieve the same: ```sh # Yarn @@ -90,7 +90,7 @@ yarn codegen npm run codegen ``` -This will generate an AssemblyScript class for every smart contract in the ABI files mentioned in `subgraph.yaml`, allowing you to bind these contracts to specific addresses in the mappings and call read-only contract methods against the block being processed. It will also generate a class for every contract event to provide easy access to event parameters, as well as the block and transaction the event originated from. All of these types are written to `//.ts`. In the example subgraph, this would be `generated/Gravity/Gravity.ts`, allowing mappings to import these types with. +This will generate an AssemblyScript class for every smart contract in the ABI files mentioned in `subgraph.yaml`, allowing you to bind these contracts to specific addresses in the mappings and call read-only contract methods against the block being processed. It will also generate a class for every contract event to provide easy access to event parameters, as well as the block and transaction the event originated from. All of these types are written to `//.ts`. In the example Subgraph, this would be `generated/Gravity/Gravity.ts`, allowing mappings to import these types with. ```javascript import { @@ -102,12 +102,12 @@ import { } from '../generated/Gravity/Gravity' ``` -In addition to this, one class is generated for each entity type in the subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with +In addition to this, one class is generated for each entity type in the Subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with ```javascript import { Gravatar } from '../generated/schema' ``` -> **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the subgraph. +> **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the Subgraph. -Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. +Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your Subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. From 11765ce5345126e94d5fa0304d18d05a8cdc653d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:21:28 -0500 Subject: [PATCH 0746/1789] New translations assemblyscript-mappings.mdx (Polish) --- .../creating/assemblyscript-mappings.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/pl/subgraphs/developing/creating/assemblyscript-mappings.mdx b/website/src/pages/pl/subgraphs/developing/creating/assemblyscript-mappings.mdx index 2ac894695fe1..cd81dc118f28 100644 --- a/website/src/pages/pl/subgraphs/developing/creating/assemblyscript-mappings.mdx +++ b/website/src/pages/pl/subgraphs/developing/creating/assemblyscript-mappings.mdx @@ -10,7 +10,7 @@ The mappings take data from a particular source and transform it into entities t For each event handler that is defined in `subgraph.yaml` under `mapping.eventHandlers`, create an exported function of the same name. Each handler must accept a single parameter called `event` with a type corresponding to the name of the event which is being handled. -In the example subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: +In the example Subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: ```javascript import { NewGravatar, UpdatedGravatar } from '../generated/Gravity/Gravity' @@ -72,7 +72,7 @@ If no value is set for a field in the new entity with the same ID, the field wil ## Code Generation -In order to make it easy and type-safe to work with smart contracts, events and entities, the Graph CLI can generate AssemblyScript types from the subgraph's GraphQL schema and the contract ABIs included in the data sources. +In order to make it easy and type-safe to work with smart contracts, events and entities, the Graph CLI can generate AssemblyScript types from the Subgraph's GraphQL schema and the contract ABIs included in the data sources. This is done with @@ -80,7 +80,7 @@ This is done with graph codegen [--output-dir ] [] ``` -but in most cases, subgraphs are already preconfigured via `package.json` to allow you to simply run one of the following to achieve the same: +but in most cases, Subgraphs are already preconfigured via `package.json` to allow you to simply run one of the following to achieve the same: ```sh # Yarn @@ -90,7 +90,7 @@ yarn codegen npm run codegen ``` -This will generate an AssemblyScript class for every smart contract in the ABI files mentioned in `subgraph.yaml`, allowing you to bind these contracts to specific addresses in the mappings and call read-only contract methods against the block being processed. It will also generate a class for every contract event to provide easy access to event parameters, as well as the block and transaction the event originated from. All of these types are written to `//.ts`. In the example subgraph, this would be `generated/Gravity/Gravity.ts`, allowing mappings to import these types with. +This will generate an AssemblyScript class for every smart contract in the ABI files mentioned in `subgraph.yaml`, allowing you to bind these contracts to specific addresses in the mappings and call read-only contract methods against the block being processed. It will also generate a class for every contract event to provide easy access to event parameters, as well as the block and transaction the event originated from. All of these types are written to `//.ts`. In the example Subgraph, this would be `generated/Gravity/Gravity.ts`, allowing mappings to import these types with. ```javascript import { @@ -102,12 +102,12 @@ import { } from '../generated/Gravity/Gravity' ``` -In addition to this, one class is generated for each entity type in the subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with +In addition to this, one class is generated for each entity type in the Subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with ```javascript import { Gravatar } from '../generated/schema' ``` -> **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the subgraph. +> **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the Subgraph. -Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. +Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your Subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. From 87c08b5ba058aa22fca6468aeb5e3d21231c8ded Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:21:29 -0500 Subject: [PATCH 0747/1789] New translations assemblyscript-mappings.mdx (Portuguese) --- .../creating/assemblyscript-mappings.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/pt/subgraphs/developing/creating/assemblyscript-mappings.mdx b/website/src/pages/pt/subgraphs/developing/creating/assemblyscript-mappings.mdx index e7d972a9d0bf..68892d9994fa 100644 --- a/website/src/pages/pt/subgraphs/developing/creating/assemblyscript-mappings.mdx +++ b/website/src/pages/pt/subgraphs/developing/creating/assemblyscript-mappings.mdx @@ -10,7 +10,7 @@ The mappings take data from a particular source and transform it into entities t For each event handler that is defined in `subgraph.yaml` under `mapping.eventHandlers`, create an exported function of the same name. Each handler must accept a single parameter called `event` with a type corresponding to the name of the event which is being handled. -In the example subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: +In the example Subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: ```javascript import { NewGravatar, UpdatedGravatar } from '../generated/Gravity/Gravity' @@ -72,7 +72,7 @@ Se nenhum valor for inserido para um campo na nova entidade com a mesma ID, o ca ## Geração de Código -Para tornar mais fácil e seguro a tipos o trabalho com contratos inteligentes, eventos e entidades, o Graph CLI pode gerar tipos de AssemblyScript a partir do schema GraphQL do subgraph e das ABIs de contratos incluídas nas fontes de dados. +In order to make it easy and type-safe to work with smart contracts, events and entities, the Graph CLI can generate AssemblyScript types from the Subgraph's GraphQL schema and the contract ABIs included in the data sources. Isto é feito com @@ -80,7 +80,7 @@ Isto é feito com graph codegen [--output-dir ] [] ``` -but in most cases, subgraphs are already preconfigured via `package.json` to allow you to simply run one of the following to achieve the same: +but in most cases, Subgraphs are already preconfigured via `package.json` to allow you to simply run one of the following to achieve the same: ```sh # Yarn @@ -90,7 +90,7 @@ yarn codegen npm run codegen ``` -This will generate an AssemblyScript class for every smart contract in the ABI files mentioned in `subgraph.yaml`, allowing you to bind these contracts to specific addresses in the mappings and call read-only contract methods against the block being processed. It will also generate a class for every contract event to provide easy access to event parameters, as well as the block and transaction the event originated from. All of these types are written to `//.ts`. In the example subgraph, this would be `generated/Gravity/Gravity.ts`, allowing mappings to import these types with. +This will generate an AssemblyScript class for every smart contract in the ABI files mentioned in `subgraph.yaml`, allowing you to bind these contracts to specific addresses in the mappings and call read-only contract methods against the block being processed. It will also generate a class for every contract event to provide easy access to event parameters, as well as the block and transaction the event originated from. All of these types are written to `//.ts`. In the example Subgraph, this would be `generated/Gravity/Gravity.ts`, allowing mappings to import these types with. ```javascript import { @@ -102,12 +102,12 @@ import { } from '../generated/Gravity/Gravity' ``` -In addition to this, one class is generated for each entity type in the subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with +In addition to this, one class is generated for each entity type in the Subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with ```javascript import { Gravatar } from '../generated/schema' ``` -> **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the subgraph. +> **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the Subgraph. -Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. +Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your Subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. From 59a615dff429599ce13dea496ccf345065b171f6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:21:30 -0500 Subject: [PATCH 0748/1789] New translations assemblyscript-mappings.mdx (Russian) --- .../creating/assemblyscript-mappings.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/ru/subgraphs/developing/creating/assemblyscript-mappings.mdx b/website/src/pages/ru/subgraphs/developing/creating/assemblyscript-mappings.mdx index e4c398204f2e..6a74db44bfb3 100644 --- a/website/src/pages/ru/subgraphs/developing/creating/assemblyscript-mappings.mdx +++ b/website/src/pages/ru/subgraphs/developing/creating/assemblyscript-mappings.mdx @@ -10,7 +10,7 @@ The mappings take data from a particular source and transform it into entities t For each event handler that is defined in `subgraph.yaml` under `mapping.eventHandlers`, create an exported function of the same name. Each handler must accept a single parameter called `event` with a type corresponding to the name of the event which is being handled. -In the example subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: +In the example Subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: ```javascript import { NewGravatar, UpdatedGravatar } from '../generated/Gravity/Gravity' @@ -72,7 +72,7 @@ There is a [Graph Typescript Library](https://github.com/graphprotocol/graph-too ## Генерация кода -Для упрощения и обеспечения безопасности типов при работе со смарт-контрактами, событиями и объектами Graph CLI может генерировать типы AssemblyScript на основе схемы GraphQL субграфа и ABI контрактов, включенных в источники данных. +In order to make it easy and type-safe to work with smart contracts, events and entities, the Graph CLI can generate AssemblyScript types from the Subgraph's GraphQL schema and the contract ABIs included in the data sources. Это делается с помощью @@ -80,7 +80,7 @@ There is a [Graph Typescript Library](https://github.com/graphprotocol/graph-too graph codegen [--output-dir ] [] ``` -but in most cases, subgraphs are already preconfigured via `package.json` to allow you to simply run one of the following to achieve the same: +but in most cases, Subgraphs are already preconfigured via `package.json` to allow you to simply run one of the following to achieve the same: ```sh # Yarn @@ -90,7 +90,7 @@ yarn codegen npm run codegen ``` -This will generate an AssemblyScript class for every smart contract in the ABI files mentioned in `subgraph.yaml`, allowing you to bind these contracts to specific addresses in the mappings and call read-only contract methods against the block being processed. It will also generate a class for every contract event to provide easy access to event parameters, as well as the block and transaction the event originated from. All of these types are written to `//.ts`. In the example subgraph, this would be `generated/Gravity/Gravity.ts`, allowing mappings to import these types with. +This will generate an AssemblyScript class for every smart contract in the ABI files mentioned in `subgraph.yaml`, allowing you to bind these contracts to specific addresses in the mappings and call read-only contract methods against the block being processed. It will also generate a class for every contract event to provide easy access to event parameters, as well as the block and transaction the event originated from. All of these types are written to `//.ts`. In the example Subgraph, this would be `generated/Gravity/Gravity.ts`, allowing mappings to import these types with. ```javascript import { @@ -102,12 +102,12 @@ import { } from '../generated/Gravity/Gravity' ``` -In addition to this, one class is generated for each entity type in the subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with +In addition to this, one class is generated for each entity type in the Subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with ```javascript import { Gravatar } from '../generated/schema' ``` -> **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the subgraph. +> **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the Subgraph. -Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. +Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your Subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. From 88e8b739fa0614c24c6ff8c06f2c7cc23ea6f7a2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:21:31 -0500 Subject: [PATCH 0749/1789] New translations assemblyscript-mappings.mdx (Swedish) --- .../creating/assemblyscript-mappings.mdx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/website/src/pages/sv/subgraphs/developing/creating/assemblyscript-mappings.mdx b/website/src/pages/sv/subgraphs/developing/creating/assemblyscript-mappings.mdx index 259ae147af9f..3ed678be2a9a 100644 --- a/website/src/pages/sv/subgraphs/developing/creating/assemblyscript-mappings.mdx +++ b/website/src/pages/sv/subgraphs/developing/creating/assemblyscript-mappings.mdx @@ -10,7 +10,7 @@ The mappings take data from a particular source and transform it into entities t For each event handler that is defined in `subgraph.yaml` under `mapping.eventHandlers`, create an exported function of the same name. Each handler must accept a single parameter called `event` with a type corresponding to the name of the event which is being handled. -In the example subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: +In the example Subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: ```javascript import { NewGravatar, UpdatedGravatar } from "../generated/Gravity/Gravity"; @@ -72,7 +72,7 @@ If no value is set for a field in the new entity with the same ID, the field wil ## Kodgenerering -För att göra det enkelt och typsäkert att arbeta med smarta kontrakt, händelser och entiteter kan Graph CLI generera AssemblyScript-typer från subgrafens GraphQL-schema och kontrakts-ABIn som ingår i datakällorna. +In order to make it easy and type-safe to work with smart contracts, events and entities, the Graph CLI can generate AssemblyScript types from the Subgraph's GraphQL schema and the contract ABIs included in the data sources. Detta görs med @@ -80,7 +80,7 @@ Detta görs med graph codegen [--output-dir ] [] ``` -but in most cases, subgraphs are already preconfigured via `package.json` to allow you to simply run one of the following to achieve the same: +but in most cases, Subgraphs are already preconfigured via `package.json` to allow you to simply run one of the following to achieve the same: ```sh # Yarn @@ -90,7 +90,7 @@ yarn codegen npm run codegen ``` -This will generate an AssemblyScript class for every smart contract in the ABI files mentioned in `subgraph.yaml`, allowing you to bind these contracts to specific addresses in the mappings and call read-only contract methods against the block being processed. It will also generate a class for every contract event to provide easy access to event parameters, as well as the block and transaction the event originated from. All of these types are written to `//.ts`. In the example subgraph, this would be `generated/Gravity/Gravity.ts`, allowing mappings to import these types with. +This will generate an AssemblyScript class for every smart contract in the ABI files mentioned in `subgraph.yaml`, allowing you to bind these contracts to specific addresses in the mappings and call read-only contract methods against the block being processed. It will also generate a class for every contract event to provide easy access to event parameters, as well as the block and transaction the event originated from. All of these types are written to `//.ts`. In the example Subgraph, this would be `generated/Gravity/Gravity.ts`, allowing mappings to import these types with. ```javascript import { @@ -99,15 +99,15 @@ import { // The events classes: NewGravatar, UpdatedGravatar, -} from '../generated/Gravity/Gravity' +} from "../generated/Gravity/Gravity"; ``` -In addition to this, one class is generated for each entity type in the subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with +In addition to this, one class is generated for each entity type in the Subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with ```javascript -import { Gravatar } from '../generated/schema' +import { Gravatar } from "../generated/schema" ``` -> **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the subgraph. +> **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the Subgraph. -Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. +Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your Subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. From 28be8a8fa3309bda95d057d435cd88b613582bd5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:21:32 -0500 Subject: [PATCH 0750/1789] New translations assemblyscript-mappings.mdx (Turkish) --- .../creating/assemblyscript-mappings.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/tr/subgraphs/developing/creating/assemblyscript-mappings.mdx b/website/src/pages/tr/subgraphs/developing/creating/assemblyscript-mappings.mdx index d3182334749c..7199a149244d 100644 --- a/website/src/pages/tr/subgraphs/developing/creating/assemblyscript-mappings.mdx +++ b/website/src/pages/tr/subgraphs/developing/creating/assemblyscript-mappings.mdx @@ -10,7 +10,7 @@ The mappings take data from a particular source and transform it into entities t For each event handler that is defined in `subgraph.yaml` under `mapping.eventHandlers`, create an exported function of the same name. Each handler must accept a single parameter called `event` with a type corresponding to the name of the event which is being handled. -In the example subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: +In the example Subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: ```javascript import { NewGravatar, UpdatedGravatar } from '../generated/Gravity/Gravity' @@ -72,7 +72,7 @@ Eğer aynı ID'ye sahip yeni bir varlıkta bir alan için değer atanmamışsa, ## Kod Oluşturma -Akıllı sözleşmeler, olaylar ve varlıklarla çalışmayı kolay ve tip güvenli hale getirmek amacıyla Graph CLI, subgraph'ın GraphQL şemasından ve veri kaynaklarında bulunan sözleşme ABI'lerinden AssemblyScript türleri oluşturabilir. +In order to make it easy and type-safe to work with smart contracts, events and entities, the Graph CLI can generate AssemblyScript types from the Subgraph's GraphQL schema and the contract ABIs included in the data sources. Bununla yapılır @@ -80,7 +80,7 @@ Bununla yapılır graph codegen [--output-dir ] [] ``` -but in most cases, subgraphs are already preconfigured via `package.json` to allow you to simply run one of the following to achieve the same: +but in most cases, Subgraphs are already preconfigured via `package.json` to allow you to simply run one of the following to achieve the same: ```sh # Yarn @@ -90,7 +90,7 @@ yarn codegen npm run codegen ``` -This will generate an AssemblyScript class for every smart contract in the ABI files mentioned in `subgraph.yaml`, allowing you to bind these contracts to specific addresses in the mappings and call read-only contract methods against the block being processed. It will also generate a class for every contract event to provide easy access to event parameters, as well as the block and transaction the event originated from. All of these types are written to `//.ts`. In the example subgraph, this would be `generated/Gravity/Gravity.ts`, allowing mappings to import these types with. +This will generate an AssemblyScript class for every smart contract in the ABI files mentioned in `subgraph.yaml`, allowing you to bind these contracts to specific addresses in the mappings and call read-only contract methods against the block being processed. It will also generate a class for every contract event to provide easy access to event parameters, as well as the block and transaction the event originated from. All of these types are written to `//.ts`. In the example Subgraph, this would be `generated/Gravity/Gravity.ts`, allowing mappings to import these types with. ```javascript import { @@ -102,12 +102,12 @@ import { } from '../generated/Gravity/Gravity' ``` -In addition to this, one class is generated for each entity type in the subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with +In addition to this, one class is generated for each entity type in the Subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with ```javascript import { Gravatar } from '../generated/schema' ``` -> **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the subgraph. +> **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the Subgraph. -Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. +Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your Subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. From 0236aa73b6d35e55793bb0f61101a04ac2dda8e4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:21:33 -0500 Subject: [PATCH 0751/1789] New translations assemblyscript-mappings.mdx (Ukrainian) --- .../creating/assemblyscript-mappings.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/uk/subgraphs/developing/creating/assemblyscript-mappings.mdx b/website/src/pages/uk/subgraphs/developing/creating/assemblyscript-mappings.mdx index 2ac894695fe1..cd81dc118f28 100644 --- a/website/src/pages/uk/subgraphs/developing/creating/assemblyscript-mappings.mdx +++ b/website/src/pages/uk/subgraphs/developing/creating/assemblyscript-mappings.mdx @@ -10,7 +10,7 @@ The mappings take data from a particular source and transform it into entities t For each event handler that is defined in `subgraph.yaml` under `mapping.eventHandlers`, create an exported function of the same name. Each handler must accept a single parameter called `event` with a type corresponding to the name of the event which is being handled. -In the example subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: +In the example Subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: ```javascript import { NewGravatar, UpdatedGravatar } from '../generated/Gravity/Gravity' @@ -72,7 +72,7 @@ If no value is set for a field in the new entity with the same ID, the field wil ## Code Generation -In order to make it easy and type-safe to work with smart contracts, events and entities, the Graph CLI can generate AssemblyScript types from the subgraph's GraphQL schema and the contract ABIs included in the data sources. +In order to make it easy and type-safe to work with smart contracts, events and entities, the Graph CLI can generate AssemblyScript types from the Subgraph's GraphQL schema and the contract ABIs included in the data sources. This is done with @@ -80,7 +80,7 @@ This is done with graph codegen [--output-dir ] [] ``` -but in most cases, subgraphs are already preconfigured via `package.json` to allow you to simply run one of the following to achieve the same: +but in most cases, Subgraphs are already preconfigured via `package.json` to allow you to simply run one of the following to achieve the same: ```sh # Yarn @@ -90,7 +90,7 @@ yarn codegen npm run codegen ``` -This will generate an AssemblyScript class for every smart contract in the ABI files mentioned in `subgraph.yaml`, allowing you to bind these contracts to specific addresses in the mappings and call read-only contract methods against the block being processed. It will also generate a class for every contract event to provide easy access to event parameters, as well as the block and transaction the event originated from. All of these types are written to `//.ts`. In the example subgraph, this would be `generated/Gravity/Gravity.ts`, allowing mappings to import these types with. +This will generate an AssemblyScript class for every smart contract in the ABI files mentioned in `subgraph.yaml`, allowing you to bind these contracts to specific addresses in the mappings and call read-only contract methods against the block being processed. It will also generate a class for every contract event to provide easy access to event parameters, as well as the block and transaction the event originated from. All of these types are written to `//.ts`. In the example Subgraph, this would be `generated/Gravity/Gravity.ts`, allowing mappings to import these types with. ```javascript import { @@ -102,12 +102,12 @@ import { } from '../generated/Gravity/Gravity' ``` -In addition to this, one class is generated for each entity type in the subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with +In addition to this, one class is generated for each entity type in the Subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with ```javascript import { Gravatar } from '../generated/schema' ``` -> **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the subgraph. +> **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the Subgraph. -Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. +Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your Subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. From fe798937dbdae49104169fe90bfca8d27e9329e7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:21:34 -0500 Subject: [PATCH 0752/1789] New translations assemblyscript-mappings.mdx (Chinese Simplified) --- .../creating/assemblyscript-mappings.mdx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/website/src/pages/zh/subgraphs/developing/creating/assemblyscript-mappings.mdx b/website/src/pages/zh/subgraphs/developing/creating/assemblyscript-mappings.mdx index 88028e162e55..9fdd6e0d6a0f 100644 --- a/website/src/pages/zh/subgraphs/developing/creating/assemblyscript-mappings.mdx +++ b/website/src/pages/zh/subgraphs/developing/creating/assemblyscript-mappings.mdx @@ -2,7 +2,7 @@ title: Writing AssemblyScript Mappings --- -## 概述 +## Overview The mappings take data from a particular source and transform it into entities that are defined within your schema. Mappings are written in a subset of [TypeScript](https://www.typescriptlang.org/docs/handbook/typescript-in-5-minutes.html) called [AssemblyScript](https://github.com/AssemblyScript/assemblyscript/wiki) which can be compiled to WASM ([WebAssembly](https://webassembly.org/)). AssemblyScript is stricter than normal TypeScript, yet provides a familiar syntax. @@ -10,7 +10,7 @@ The mappings take data from a particular source and transform it into entities t For each event handler that is defined in `subgraph.yaml` under `mapping.eventHandlers`, create an exported function of the same name. Each handler must accept a single parameter called `event` with a type corresponding to the name of the event which is being handled. -In the example subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: +In the example Subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: ```javascript import { NewGravatar, UpdatedGravatar } from '../generated/Gravity/Gravity' @@ -72,15 +72,15 @@ If no value is set for a field in the new entity with the same ID, the field wil ## 代码生成 -为了使与智能合约、事件和实体的代码编写工作变得简单且类型安全,Graph CLI 可以从子图的 GraphQL 模式和数据源中包含的合约 ABI 生成 AssemblyScript 类型。 +In order to make it easy and type-safe to work with smart contracts, events and entities, the Graph CLI can generate AssemblyScript types from the Subgraph's GraphQL schema and the contract ABIs included in the data sources. -这可以通过以下命令实现 +这可以通过以下命令实现: ```sh graph codegen [--output-dir ] [] ``` -but in most cases, subgraphs are already preconfigured via `package.json` to allow you to simply run one of the following to achieve the same: +but in most cases, Subgraphs are already preconfigured via `package.json` to allow you to simply run one of the following to achieve the same: ```sh # Yarn @@ -90,7 +90,7 @@ yarn codegen npm run codegen ``` -This will generate an AssemblyScript class for every smart contract in the ABI files mentioned in `subgraph.yaml`, allowing you to bind these contracts to specific addresses in the mappings and call read-only contract methods against the block being processed. It will also generate a class for every contract event to provide easy access to event parameters, as well as the block and transaction the event originated from. All of these types are written to `//.ts`. In the example subgraph, this would be `generated/Gravity/Gravity.ts`, allowing mappings to import these types with. +This will generate an AssemblyScript class for every smart contract in the ABI files mentioned in `subgraph.yaml`, allowing you to bind these contracts to specific addresses in the mappings and call read-only contract methods against the block being processed. It will also generate a class for every contract event to provide easy access to event parameters, as well as the block and transaction the event originated from. All of these types are written to `//.ts`. In the example Subgraph, this would be `generated/Gravity/Gravity.ts`, allowing mappings to import these types with. ```javascript import { @@ -102,12 +102,12 @@ import { } from '../generated/Gravity/Gravity' ``` -In addition to this, one class is generated for each entity type in the subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with +In addition to this, one class is generated for each entity type in the Subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with ```javascript import { Gravatar } from '../generated/schema' ``` -> **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the subgraph. +> **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the Subgraph. -Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. +Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your Subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. From 60c1ba4da4d97ae30315d6cadae4e3c842b0aa79 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:21:35 -0500 Subject: [PATCH 0753/1789] New translations assemblyscript-mappings.mdx (Urdu (Pakistan)) --- .../creating/assemblyscript-mappings.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/ur/subgraphs/developing/creating/assemblyscript-mappings.mdx b/website/src/pages/ur/subgraphs/developing/creating/assemblyscript-mappings.mdx index 28f2936bb14f..64013ef5df38 100644 --- a/website/src/pages/ur/subgraphs/developing/creating/assemblyscript-mappings.mdx +++ b/website/src/pages/ur/subgraphs/developing/creating/assemblyscript-mappings.mdx @@ -10,7 +10,7 @@ The mappings take data from a particular source and transform it into entities t For each event handler that is defined in `subgraph.yaml` under `mapping.eventHandlers`, create an exported function of the same name. Each handler must accept a single parameter called `event` with a type corresponding to the name of the event which is being handled. -In the example subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: +In the example Subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: ```javascript import { NewGravatar, UpdatedGravatar } from '../generated/Gravity/Gravity' @@ -72,7 +72,7 @@ If no value is set for a field in the new entity with the same ID, the field wil ## کوڈ تخلیق کرنا -سمارٹ کنٹریکٹس، ایوینٹس اور ہستیوں کے ساتھ کام کرنا آسان اور ٹائپ محفوظ بنانے کے لیے، گراف CLI ڈیٹا کے ذرائع میں شامل سب گراف کے GraphQL اسکیما اور کنٹریکٹ ABIs سے اسمبلی سکرپٹ کی قسمیں تیار کر سکتا ہے. +In order to make it easy and type-safe to work with smart contracts, events and entities, the Graph CLI can generate AssemblyScript types from the Subgraph's GraphQL schema and the contract ABIs included in the data sources. اس کے ساتھ کیا جاتا ہے @@ -80,7 +80,7 @@ If no value is set for a field in the new entity with the same ID, the field wil graph codegen [--output-dir ] [] ``` -but in most cases, subgraphs are already preconfigured via `package.json` to allow you to simply run one of the following to achieve the same: +but in most cases, Subgraphs are already preconfigured via `package.json` to allow you to simply run one of the following to achieve the same: ```sh # Yarn @@ -90,7 +90,7 @@ yarn codegen npm run codegen ``` -This will generate an AssemblyScript class for every smart contract in the ABI files mentioned in `subgraph.yaml`, allowing you to bind these contracts to specific addresses in the mappings and call read-only contract methods against the block being processed. It will also generate a class for every contract event to provide easy access to event parameters, as well as the block and transaction the event originated from. All of these types are written to `//.ts`. In the example subgraph, this would be `generated/Gravity/Gravity.ts`, allowing mappings to import these types with. +This will generate an AssemblyScript class for every smart contract in the ABI files mentioned in `subgraph.yaml`, allowing you to bind these contracts to specific addresses in the mappings and call read-only contract methods against the block being processed. It will also generate a class for every contract event to provide easy access to event parameters, as well as the block and transaction the event originated from. All of these types are written to `//.ts`. In the example Subgraph, this would be `generated/Gravity/Gravity.ts`, allowing mappings to import these types with. ```javascript import { @@ -102,12 +102,12 @@ import { } from '../generated/Gravity/Gravity' ``` -In addition to this, one class is generated for each entity type in the subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with +In addition to this, one class is generated for each entity type in the Subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with ```javascript import { Gravatar } from '../generated/schema' ``` -> **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the subgraph. +> **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the Subgraph. -Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. +Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your Subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. From c2c6acd0c1d900cd48137b0a5491f446f2cc79ae Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:21:36 -0500 Subject: [PATCH 0754/1789] New translations assemblyscript-mappings.mdx (Vietnamese) --- .../creating/assemblyscript-mappings.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/vi/subgraphs/developing/creating/assemblyscript-mappings.mdx b/website/src/pages/vi/subgraphs/developing/creating/assemblyscript-mappings.mdx index 8a1d491a50fd..87b694b86828 100644 --- a/website/src/pages/vi/subgraphs/developing/creating/assemblyscript-mappings.mdx +++ b/website/src/pages/vi/subgraphs/developing/creating/assemblyscript-mappings.mdx @@ -10,7 +10,7 @@ The mappings take data from a particular source and transform it into entities t For each event handler that is defined in `subgraph.yaml` under `mapping.eventHandlers`, create an exported function of the same name. Each handler must accept a single parameter called `event` with a type corresponding to the name of the event which is being handled. -In the example subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: +In the example Subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: ```javascript import { NewGravatar, UpdatedGravatar } from '../generated/Gravity/Gravity' @@ -72,7 +72,7 @@ If no value is set for a field in the new entity with the same ID, the field wil ## Tạo mã -In order to make it easy and type-safe to work with smart contracts, events and entities, the Graph CLI can generate AssemblyScript types from the subgraph's GraphQL schema and the contract ABIs included in the data sources. +In order to make it easy and type-safe to work with smart contracts, events and entities, the Graph CLI can generate AssemblyScript types from the Subgraph's GraphQL schema and the contract ABIs included in the data sources. This is done with @@ -80,7 +80,7 @@ This is done with graph codegen [--output-dir ] [] ``` -but in most cases, subgraphs are already preconfigured via `package.json` to allow you to simply run one of the following to achieve the same: +but in most cases, Subgraphs are already preconfigured via `package.json` to allow you to simply run one of the following to achieve the same: ```sh # Yarn @@ -90,7 +90,7 @@ yarn codegen npm run codegen ``` -This will generate an AssemblyScript class for every smart contract in the ABI files mentioned in `subgraph.yaml`, allowing you to bind these contracts to specific addresses in the mappings and call read-only contract methods against the block being processed. It will also generate a class for every contract event to provide easy access to event parameters, as well as the block and transaction the event originated from. All of these types are written to `//.ts`. In the example subgraph, this would be `generated/Gravity/Gravity.ts`, allowing mappings to import these types with. +This will generate an AssemblyScript class for every smart contract in the ABI files mentioned in `subgraph.yaml`, allowing you to bind these contracts to specific addresses in the mappings and call read-only contract methods against the block being processed. It will also generate a class for every contract event to provide easy access to event parameters, as well as the block and transaction the event originated from. All of these types are written to `//.ts`. In the example Subgraph, this would be `generated/Gravity/Gravity.ts`, allowing mappings to import these types with. ```javascript import { @@ -102,12 +102,12 @@ import { } from '../generated/Gravity/Gravity' ``` -In addition to this, one class is generated for each entity type in the subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with +In addition to this, one class is generated for each entity type in the Subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with ```javascript import { Gravatar } from '../generated/schema' ``` -> **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the subgraph. +> **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the Subgraph. -Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. +Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your Subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. From ab0c75fb4384dd01987d65357aeab9ca746a73c2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:21:37 -0500 Subject: [PATCH 0755/1789] New translations assemblyscript-mappings.mdx (Marathi) --- .../creating/assemblyscript-mappings.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/mr/subgraphs/developing/creating/assemblyscript-mappings.mdx b/website/src/pages/mr/subgraphs/developing/creating/assemblyscript-mappings.mdx index 682aec0ae2a5..e531b0f3d7c9 100644 --- a/website/src/pages/mr/subgraphs/developing/creating/assemblyscript-mappings.mdx +++ b/website/src/pages/mr/subgraphs/developing/creating/assemblyscript-mappings.mdx @@ -10,7 +10,7 @@ The mappings take data from a particular source and transform it into entities t For each event handler that is defined in `subgraph.yaml` under `mapping.eventHandlers`, create an exported function of the same name. Each handler must accept a single parameter called `event` with a type corresponding to the name of the event which is being handled. -In the example subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: +In the example Subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: ```javascript import { NewGravatar, UpdatedGravatar } from '../generated/Gravity/Gravity' @@ -72,7 +72,7 @@ If no value is set for a field in the new entity with the same ID, the field wil ## कोड जनरेशन -स्मार्ट कॉन्ट्रॅक्ट्स, इव्हेंट्स आणि संस्थांसोबत काम करणे सोपे आणि टाइप-सुरक्षित करण्यासाठी, ग्राफ CLI सबग्राफच्या GraphQL स्कीमा आणि डेटा स्रोतांमध्ये समाविष्ट केलेल्या कॉन्ट्रॅक्ट ABIs मधून असेंबलीस्क्रिप्ट प्रकार व्युत्पन्न करू शकतो. +In order to make it easy and type-safe to work with smart contracts, events and entities, the Graph CLI can generate AssemblyScript types from the Subgraph's GraphQL schema and the contract ABIs included in the data sources. यासह केले जाते @@ -80,7 +80,7 @@ If no value is set for a field in the new entity with the same ID, the field wil graph codegen [--output-dir ] [] ``` -but in most cases, subgraphs are already preconfigured via `package.json` to allow you to simply run one of the following to achieve the same: +but in most cases, Subgraphs are already preconfigured via `package.json` to allow you to simply run one of the following to achieve the same: ```sh # Yarn @@ -90,7 +90,7 @@ yarn codegen npm run codegen ``` -This will generate an AssemblyScript class for every smart contract in the ABI files mentioned in `subgraph.yaml`, allowing you to bind these contracts to specific addresses in the mappings and call read-only contract methods against the block being processed. It will also generate a class for every contract event to provide easy access to event parameters, as well as the block and transaction the event originated from. All of these types are written to `//.ts`. In the example subgraph, this would be `generated/Gravity/Gravity.ts`, allowing mappings to import these types with. +This will generate an AssemblyScript class for every smart contract in the ABI files mentioned in `subgraph.yaml`, allowing you to bind these contracts to specific addresses in the mappings and call read-only contract methods against the block being processed. It will also generate a class for every contract event to provide easy access to event parameters, as well as the block and transaction the event originated from. All of these types are written to `//.ts`. In the example Subgraph, this would be `generated/Gravity/Gravity.ts`, allowing mappings to import these types with. ```javascript import { @@ -102,12 +102,12 @@ import { } from '../generated/Gravity/Gravity' ``` -In addition to this, one class is generated for each entity type in the subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with +In addition to this, one class is generated for each entity type in the Subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with ```javascript import { Gravatar } from '../generated/schema' ``` -> **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the subgraph. +> **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the Subgraph. -Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. +Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your Subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. From 72246498b8af6e5d987c05d43248fa0858eb47ea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:21:38 -0500 Subject: [PATCH 0756/1789] New translations assemblyscript-mappings.mdx (Hindi) --- .../creating/assemblyscript-mappings.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/hi/subgraphs/developing/creating/assemblyscript-mappings.mdx b/website/src/pages/hi/subgraphs/developing/creating/assemblyscript-mappings.mdx index 38441c623127..71d23fe6bec0 100644 --- a/website/src/pages/hi/subgraphs/developing/creating/assemblyscript-mappings.mdx +++ b/website/src/pages/hi/subgraphs/developing/creating/assemblyscript-mappings.mdx @@ -10,7 +10,7 @@ The mappings take data from a particular source and transform it into entities t For each event handler that is defined in `subgraph.yaml` under `mapping.eventHandlers`, create an exported function of the same name. Each handler must accept a single parameter called `event` with a type corresponding to the name of the event which is being handled. -In the example subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: +In the example Subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: ```javascript import { NewGravatar, UpdatedGravatar } from '../generated/Gravity/Gravity' @@ -72,7 +72,7 @@ There is a [Graph Typescript Library](https://github.com/graphprotocol/graph-too ## कोड जनरेशन -स्मार्ट कॉन्ट्रैक्ट्स, इवेंट्स और एंटिटीज के साथ काम करना आसान और टाइप-सेफ बनाने के लिए, ग्राफ सीएलआई सबग्राफ के ग्राफक्यूएल स्कीमा और डेटा स्रोतों में शामिल कॉन्ट्रैक्ट एबीआई से असेंबलीस्क्रिप्ट प्रकार उत्पन्न कर सकता है। +In order to make it easy and type-safe to work with smart contracts, events and entities, the Graph CLI can generate AssemblyScript types from the Subgraph's GraphQL schema and the contract ABIs included in the data sources. इसके साथ किया जाता है @@ -80,7 +80,7 @@ There is a [Graph Typescript Library](https://github.com/graphprotocol/graph-too graph codegen [--output-dir ] [] ``` -but in most cases, subgraphs are already preconfigured via `package.json` to allow you to simply run one of the following to achieve the same: +but in most cases, Subgraphs are already preconfigured via `package.json` to allow you to simply run one of the following to achieve the same: ```sh # Yarn @@ -90,7 +90,7 @@ yarn codegen npm run codegen ``` -This will generate an AssemblyScript class for every smart contract in the ABI files mentioned in `subgraph.yaml`, allowing you to bind these contracts to specific addresses in the mappings and call read-only contract methods against the block being processed. It will also generate a class for every contract event to provide easy access to event parameters, as well as the block and transaction the event originated from. All of these types are written to `//.ts`. In the example subgraph, this would be `generated/Gravity/Gravity.ts`, allowing mappings to import these types with. +This will generate an AssemblyScript class for every smart contract in the ABI files mentioned in `subgraph.yaml`, allowing you to bind these contracts to specific addresses in the mappings and call read-only contract methods against the block being processed. It will also generate a class for every contract event to provide easy access to event parameters, as well as the block and transaction the event originated from. All of these types are written to `//.ts`. In the example Subgraph, this would be `generated/Gravity/Gravity.ts`, allowing mappings to import these types with. ```javascript import { @@ -102,12 +102,12 @@ import { } from '../generated/Gravity/Gravity' ``` -In addition to this, one class is generated for each entity type in the subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with +In addition to this, one class is generated for each entity type in the Subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with ```javascript import { Gravatar } from '../generated/schema' ``` -> **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the subgraph. +> **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the Subgraph. -Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. +Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your Subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. From 8c13363bee6b57049d702376cdc24e23331cef02 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:21:39 -0500 Subject: [PATCH 0757/1789] New translations assemblyscript-mappings.mdx (Swahili) --- .../creating/assemblyscript-mappings.mdx | 113 ++++++++++++++++++ 1 file changed, 113 insertions(+) create mode 100644 website/src/pages/sw/subgraphs/developing/creating/assemblyscript-mappings.mdx diff --git a/website/src/pages/sw/subgraphs/developing/creating/assemblyscript-mappings.mdx b/website/src/pages/sw/subgraphs/developing/creating/assemblyscript-mappings.mdx new file mode 100644 index 000000000000..cd81dc118f28 --- /dev/null +++ b/website/src/pages/sw/subgraphs/developing/creating/assemblyscript-mappings.mdx @@ -0,0 +1,113 @@ +--- +title: Writing AssemblyScript Mappings +--- + +## Overview + +The mappings take data from a particular source and transform it into entities that are defined within your schema. Mappings are written in a subset of [TypeScript](https://www.typescriptlang.org/docs/handbook/typescript-in-5-minutes.html) called [AssemblyScript](https://github.com/AssemblyScript/assemblyscript/wiki) which can be compiled to WASM ([WebAssembly](https://webassembly.org/)). AssemblyScript is stricter than normal TypeScript, yet provides a familiar syntax. + +## Writing Mappings + +For each event handler that is defined in `subgraph.yaml` under `mapping.eventHandlers`, create an exported function of the same name. Each handler must accept a single parameter called `event` with a type corresponding to the name of the event which is being handled. + +In the example Subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: + +```javascript +import { NewGravatar, UpdatedGravatar } from '../generated/Gravity/Gravity' +import { Gravatar } from '../generated/schema' + +export function handleNewGravatar(event: NewGravatar): void { + let gravatar = new Gravatar(event.params.id) + gravatar.owner = event.params.owner + gravatar.displayName = event.params.displayName + gravatar.imageUrl = event.params.imageUrl + gravatar.save() +} + +export function handleUpdatedGravatar(event: UpdatedGravatar): void { + let id = event.params.id + let gravatar = Gravatar.load(id) + if (gravatar == null) { + gravatar = new Gravatar(id) + } + gravatar.owner = event.params.owner + gravatar.displayName = event.params.displayName + gravatar.imageUrl = event.params.imageUrl + gravatar.save() +} +``` + +The first handler takes a `NewGravatar` event and creates a new `Gravatar` entity with `new Gravatar(event.params.id.toHex())`, populating the entity fields using the corresponding event parameters. This entity instance is represented by the variable `gravatar`, with an id value of `event.params.id.toHex()`. + +The second handler tries to load the existing `Gravatar` from the Graph Node store. If it does not exist yet, it is created on-demand. The entity is then updated to match the new event parameters before it is saved back to the store using `gravatar.save()`. + +### Recommended IDs for Creating New Entities + +It is highly recommended to use `Bytes` as the type for `id` fields, and only use `String` for attributes that truly contain human-readable text, like the name of a token. Below are some recommended `id` values to consider when creating new entities. + +- `transfer.id = event.transaction.hash` + +- `let id = event.transaction.hash.concatI32(event.logIndex.toI32())` + +- For entities that store aggregated data, for e.g, daily trade volumes, the `id` usually contains the day number. Here, using a `Bytes` as the `id` is beneficial. Determining the `id` would look like + +```typescript +let dayID = event.block.timestamp.toI32() / 86400 +let id = Bytes.fromI32(dayID) +``` + +- Convert constant addresses to `Bytes`. + +`const id = Bytes.fromHexString('0xdead...beef')` + +There is a [Graph Typescript Library](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts) which contains utilities for interacting with the Graph Node store and conveniences for handling smart contract data and entities. It can be imported into `mapping.ts` from `@graphprotocol/graph-ts`. + +### Handling of entities with identical IDs + +When creating and saving a new entity, if an entity with the same ID already exists, the properties of the new entity are always preferred during the merge process. This means that the existing entity will be updated with the values from the new entity. + +If a null value is intentionally set for a field in the new entity with the same ID, the existing entity will be updated with the null value. + +If no value is set for a field in the new entity with the same ID, the field will result in null as well. + +## Code Generation + +In order to make it easy and type-safe to work with smart contracts, events and entities, the Graph CLI can generate AssemblyScript types from the Subgraph's GraphQL schema and the contract ABIs included in the data sources. + +This is done with + +```sh +graph codegen [--output-dir ] [] +``` + +but in most cases, Subgraphs are already preconfigured via `package.json` to allow you to simply run one of the following to achieve the same: + +```sh +# Yarn +yarn codegen + +# NPM +npm run codegen +``` + +This will generate an AssemblyScript class for every smart contract in the ABI files mentioned in `subgraph.yaml`, allowing you to bind these contracts to specific addresses in the mappings and call read-only contract methods against the block being processed. It will also generate a class for every contract event to provide easy access to event parameters, as well as the block and transaction the event originated from. All of these types are written to `//.ts`. In the example Subgraph, this would be `generated/Gravity/Gravity.ts`, allowing mappings to import these types with. + +```javascript +import { + // The contract class: + Gravity, + // The events classes: + NewGravatar, + UpdatedGravatar, +} from '../generated/Gravity/Gravity' +``` + +In addition to this, one class is generated for each entity type in the Subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with + +```javascript +import { Gravatar } from '../generated/schema' +``` + +> **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the Subgraph. + +Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your Subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. From 1323ea4a6f5ac4a22b4e39cbadd95677cbefce5a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:21:40 -0500 Subject: [PATCH 0758/1789] New translations install-the-cli.mdx (Romanian) --- .../developing/creating/install-the-cli.mdx | 38 ++++++------------- 1 file changed, 12 insertions(+), 26 deletions(-) diff --git a/website/src/pages/ro/subgraphs/developing/creating/install-the-cli.mdx b/website/src/pages/ro/subgraphs/developing/creating/install-the-cli.mdx index f98ef589aaef..ee168286548b 100644 --- a/website/src/pages/ro/subgraphs/developing/creating/install-the-cli.mdx +++ b/website/src/pages/ro/subgraphs/developing/creating/install-the-cli.mdx @@ -2,11 +2,11 @@ title: Install the Graph CLI --- -> In order to use your subgraph on The Graph's decentralized network, you will need to [create an API key](/resources/subgraph-studio-faq/#2-how-do-i-create-an-api-key) in [Subgraph Studio](https://thegraph.com/studio/apikeys/). It is recommended that you add signal to your subgraph with at least 3,000 GRT to attract 2-3 Indexers. To learn more about signaling, check out [curating](/resources/roles/curating/). +> In order to use your Subgraph on The Graph's decentralized network, you will need to [create an API key](/resources/subgraph-studio-faq/#2-how-do-i-create-an-api-key) in [Subgraph Studio](https://thegraph.com/studio/apikeys/). It is recommended that you add signal to your Subgraph with at least 3,000 GRT to attract 2-3 Indexers. To learn more about signaling, check out [curating](/resources/roles/curating/). ## Overview -The [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) is a command-line interface that facilitates developers' commands for The Graph. It processes a [subgraph manifest](/subgraphs/developing/creating/subgraph-manifest/) and compiles the [mappings](/subgraphs/developing/creating/assemblyscript-mappings/) to create the files you will need to deploy the subgraph to [Subgraph Studio](https://thegraph.com/studio/) and the network. +The [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) is a command-line interface that facilitates developers' commands for The Graph. It processes a [Subgraph manifest](/subgraphs/developing/creating/subgraph-manifest/) and compiles the [mappings](/subgraphs/developing/creating/assemblyscript-mappings/) to create the files you will need to deploy the Subgraph to [Subgraph Studio](https://thegraph.com/studio/) and the network. ## Getting Started @@ -28,13 +28,13 @@ npm install -g @graphprotocol/graph-cli@latest yarn global add @graphprotocol/graph-cli ``` -The `graph init` command can be used to set up a new subgraph project, either from an existing contract or from an example subgraph. If you already have a smart contract deployed to your preferred network, you can bootstrap a new subgraph from that contract to get started. +The `graph init` command can be used to set up a new Subgraph project, either from an existing contract or from an example Subgraph. If you already have a smart contract deployed to your preferred network, you can bootstrap a new Subgraph from that contract to get started. ## Creează un Subgraf ### From an Existing Contract -The following command creates a subgraph that indexes all events of an existing contract: +The following command creates a Subgraph that indexes all events of an existing contract: ```sh graph init \ @@ -51,25 +51,25 @@ graph init \ - If any of the optional arguments are missing, it guides you through an interactive form. -- The `` is the ID of your subgraph in [Subgraph Studio](https://thegraph.com/studio/). It can be found on your subgraph details page. +- The `` is the ID of your Subgraph in [Subgraph Studio](https://thegraph.com/studio/). It can be found on your Subgraph details page. ### From an Example Subgraph -The following command initializes a new project from an example subgraph: +The following command initializes a new project from an example Subgraph: ```sh graph init --from-example=example-subgraph ``` -- The [example subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant, which manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. +- The [example Subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant, which manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. -- The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. +- The Subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. ### Add New `dataSources` to an Existing Subgraph -`dataSources` are key components of subgraphs. They define the sources of data that the subgraph indexes and processes. A `dataSource` specifies which smart contract to listen to, which events to process, and how to handle them. +`dataSources` are key components of Subgraphs. They define the sources of data that the Subgraph indexes and processes. A `dataSource` specifies which smart contract to listen to, which events to process, and how to handle them. -Recent versions of the Graph CLI supports adding new `dataSources` to an existing subgraph through the `graph add` command: +Recent versions of the Graph CLI supports adding new `dataSources` to an existing Subgraph through the `graph add` command: ```sh graph add
[] @@ -101,19 +101,5 @@ The `graph add` command will fetch the ABI from Etherscan (unless an ABI path is The ABI file(s) must match your contract(s). There are a few ways to obtain ABI files: - If you are building your own project, you will likely have access to your most current ABIs. -- If you are building a subgraph for a public project, you can download that project to your computer and get the ABI by using [`npx hardhat compile`](https://hardhat.org/hardhat-runner/docs/guides/compile-contracts#compiling-your-contracts) or using `solc` to compile. -- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your subgraph will fail. - -## SpecVersion Releases - -| Version | Release notes | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | -| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | Added support for event handlers having access to transaction receipts. | -| 0.0.4 | Added support for managing subgraph features. | +- If you are building a Subgraph for a public project, you can download that project to your computer and get the ABI by using [`npx hardhat compile`](https://hardhat.org/hardhat-runner/docs/guides/compile-contracts#compiling-your-contracts) or using `solc` to compile. +- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your Subgraph will fail. From 6094acfc8a457776596d77f0ee6aa6d86fd38144 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:21:41 -0500 Subject: [PATCH 0759/1789] New translations install-the-cli.mdx (French) --- .../developing/creating/install-the-cli.mdx | 38 ++++++------------- 1 file changed, 12 insertions(+), 26 deletions(-) diff --git a/website/src/pages/fr/subgraphs/developing/creating/install-the-cli.mdx b/website/src/pages/fr/subgraphs/developing/creating/install-the-cli.mdx index 0376a713f058..fc205ece8f5e 100644 --- a/website/src/pages/fr/subgraphs/developing/creating/install-the-cli.mdx +++ b/website/src/pages/fr/subgraphs/developing/creating/install-the-cli.mdx @@ -2,11 +2,11 @@ title: Installation du Graph CLI --- -> Pour utiliser votre subgraph sur le réseau décentralisé de The Graph, vous devrez [créer une clé API](/resources/subgraph-studio-faq/#2-how-do-i-create-an-api-key) dans [Subgraph Studio](https://thegraph.com/studio/apikeys/). Il est recommandé d'ajouter un signal à votre subgraph avec au moins 3 000 GRT pour attirer 2 à 3 Indexeurs. Pour en savoir plus sur la signalisation, consultez [curation](/resources/roles/curating/). +> In order to use your Subgraph on The Graph's decentralized network, you will need to [create an API key](/resources/subgraph-studio-faq/#2-how-do-i-create-an-api-key) in [Subgraph Studio](https://thegraph.com/studio/apikeys/). It is recommended that you add signal to your Subgraph with at least 3,000 GRT to attract 2-3 Indexers. To learn more about signaling, check out [curating](/resources/roles/curating/). ## Aperçu -[Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) est une interface de ligne de commande qui facilite les commandes des développeurs pour The Graph. Il traite un [manifeste de subgraph](/subgraphs/developing/creating/subgraph-manifest/) et compile les [mappages](/subgraphs/developing/creating/assemblyscript-mappings/) pour créer les fichiers dont vous aurez besoin pour déployer le subgraph sur [Subgraph Studio](https://thegraph.com/studio/) et le réseau. +The [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) is a command-line interface that facilitates developers' commands for The Graph. It processes a [Subgraph manifest](/subgraphs/developing/creating/subgraph-manifest/) and compiles the [mappings](/subgraphs/developing/creating/assemblyscript-mappings/) to create the files you will need to deploy the Subgraph to [Subgraph Studio](https://thegraph.com/studio/) and the network. ## Introduction @@ -28,13 +28,13 @@ npm install -g @graphprotocol/graph-cli@latest yarn global add @graphprotocol/graph-cli ``` -La commande `graph init` peut être utilisée pour configurer un nouveau projet de subgraph, soit à partir d'un contrat existant, soit à partir d'un exemple de subgraph. Si vous avez déjà déployé un contrat intelligent sur votre réseau préféré, vous pouvez démarrer un nouveau subgraph à partir de ce contrat pour commencer. +The `graph init` command can be used to set up a new Subgraph project, either from an existing contract or from an example Subgraph. If you already have a smart contract deployed to your preferred network, you can bootstrap a new Subgraph from that contract to get started. ## Créer un subgraph ### À partir d'un contrat existant -La commande suivante crée un subgraph qui indexe tous les événements d'un contrat existant : +The following command creates a Subgraph that indexes all events of an existing contract: ```sh graph init \ @@ -51,25 +51,25 @@ graph init \ - Si certains arguments optionnels manquent, il vous guide à travers un formulaire interactif. -- Le `` est l'ID de votre subgraph dans [Subgraph Studio](https://thegraph.com/studio/). Il se trouve sur la page de détails de votre subgraph. +- The `` is the ID of your Subgraph in [Subgraph Studio](https://thegraph.com/studio/). It can be found on your Subgraph details page. ### À partir d'un exemple de subgraph -La commande suivante initialise un nouveau projet à partir d'un exemple de subgraph : +The following command initializes a new project from an example Subgraph: ```sh graph init --from-example=example-subgraph ``` -- Le [subgraph d'exemple](https://github.com/graphprotocol/example-subgraph) est basé sur le contrat Gravity de Dani Grant, qui gère les avatars des utilisateurs et émet des événements `NewGravatar` ou `UpdateGravatar` chaque fois que des avatars sont créés ou mis à jour. +- The [example Subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant, which manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. -- Le subgraph gère ces événements en écrivant des entités `Gravatar` dans le store de Graph Node et en veillant à ce qu'elles soient mises à jour en fonction des événements. +- The Subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. ### Ajouter de nouvelles `sources de données` à un subgraph existant -Les `dataSources` sont des composants clés des subgraphs. Ils définissent les sources de données que le subgraphs indexe et traite. Une `dataSource` spécifie quel smart contract doit être écouté, quels événements doivent être traités et comment les traiter. +`dataSources` are key components of Subgraphs. They define the sources of data that the Subgraph indexes and processes. A `dataSource` specifies which smart contract to listen to, which events to process, and how to handle them. -Les versions récentes de Graph CLI permettent d'ajouter de nouvelles `dataSources` à un subgraph existant grâce à la commande `graph add` : +Recent versions of the Graph CLI supports adding new `dataSources` to an existing Subgraph through the `graph add` command: ```sh graph add
[] @@ -101,19 +101,5 @@ La commande `graph add` récupère l'ABI depuis Etherscan (à moins qu'un chemin Le(s) fichier(s) ABI doivent correspondre à votre(vos) contrat(s). Il existe plusieurs façons d'obtenir des fichiers ABI : - Si vous construisez votre propre projet, vous aurez probablement accès à vos ABI les plus récents. -- Si vous construisez un subgraph pour un projet public, vous pouvez télécharger ce projet sur votre ordinateur et obtenir l'ABI en utilisant [`npx hardhat compile`](https://hardhat.org/hardhat-runner/docs/guides/compile-contracts#compiling-your-contracts) ou en utilisant `solc` pour compiler. -- Vous pouvez également trouver l'ABI sur [Etherscan](https://etherscan.io/), mais ce n'est pas toujours fiable, car l'ABI qui y est téléchargé peut être obsolète. Assurez-vous d'avoir le bon ABI, sinon l'exécution de votre subgraph échouera. - -## Versions disponibles de SpecVersion - -| Version | Notes de version | -| :-: | --- | -| 1.2.0 | Ajout de la prise en charge du [filtrage des arguments indexés](/#indexed-argument-filters--topic-filters) et de la déclaration `eth_call` | -| 1.1.0 | Prend en charge [Timeseries & Aggregations](#timeseries-and-aggregations). Ajout de la prise en charge du type `Int8` pour `id`. | -| 1.0.0 | Prend en charge la fonctionnalité [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) pour élaguer les subgraphs | -| 0.0.9 | Prend en charge la fonctionnalité `endBlock` | -| 0.0.8 | Ajout de la prise en charge des [gestionnaires de blocs](/developing/creating-a-subgraph/#polling-filter) et des [gestionnaires d'initialisation](/developing/creating-a-subgraph/#once-filter) d'interrogation. | -| 0.0.7 | Ajout de la prise en charge des [fichiers sources de données](/developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Prend en charge la variante de calcul rapide de la [Preuve d'indexation](/indexing/overview/#what-is-a-proof-of-indexing-poi). | -| 0.0.5 | Ajout de la prise en charge des gestionnaires d'événement ayant accès aux reçus de transactions. | -| 0.0.4 | Ajout de la prise en charge du management des fonctionnalités de subgraph. | +- If you are building a Subgraph for a public project, you can download that project to your computer and get the ABI by using [`npx hardhat compile`](https://hardhat.org/hardhat-runner/docs/guides/compile-contracts#compiling-your-contracts) or using `solc` to compile. +- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your Subgraph will fail. From d229e21bfc35ae42812c7d6c7f383d4a2db9e9c2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:21:42 -0500 Subject: [PATCH 0760/1789] New translations install-the-cli.mdx (Spanish) --- .../developing/creating/install-the-cli.mdx | 38 ++++++------------- 1 file changed, 12 insertions(+), 26 deletions(-) diff --git a/website/src/pages/es/subgraphs/developing/creating/install-the-cli.mdx b/website/src/pages/es/subgraphs/developing/creating/install-the-cli.mdx index 5a0e73fd0bbd..d968a59b17ff 100644 --- a/website/src/pages/es/subgraphs/developing/creating/install-the-cli.mdx +++ b/website/src/pages/es/subgraphs/developing/creating/install-the-cli.mdx @@ -2,11 +2,11 @@ title: Instalar The Graph CLI --- -> In order to use your subgraph on The Graph's decentralized network, you will need to [create an API key](/resources/subgraph-studio-faq/#2-how-do-i-create-an-api-key) in [Subgraph Studio](https://thegraph.com/studio/apikeys/). It is recommended that you add signal to your subgraph with at least 3,000 GRT to attract 2-3 Indexers. To learn more about signaling, check out [curating](/resources/roles/curating/). +> In order to use your Subgraph on The Graph's decentralized network, you will need to [create an API key](/resources/subgraph-studio-faq/#2-how-do-i-create-an-api-key) in [Subgraph Studio](https://thegraph.com/studio/apikeys/). It is recommended that you add signal to your Subgraph with at least 3,000 GRT to attract 2-3 Indexers. To learn more about signaling, check out [curating](/resources/roles/curating/). ## Descripción -The [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) is a command-line interface that facilitates developers' commands for The Graph. It processes a [subgraph manifest](/subgraphs/developing/creating/subgraph-manifest/) and compiles the [mappings](/subgraphs/developing/creating/assemblyscript-mappings/) to create the files you will need to deploy the subgraph to [Subgraph Studio](https://thegraph.com/studio/) and the network. +The [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) is a command-line interface that facilitates developers' commands for The Graph. It processes a [Subgraph manifest](/subgraphs/developing/creating/subgraph-manifest/) and compiles the [mappings](/subgraphs/developing/creating/assemblyscript-mappings/) to create the files you will need to deploy the Subgraph to [Subgraph Studio](https://thegraph.com/studio/) and the network. ## Empezando @@ -28,13 +28,13 @@ npm install -g @graphprotocol/graph-cli@latest yarn global add @graphprotocol/graph-cli ``` -The `graph init` command can be used to set up a new subgraph project, either from an existing contract or from an example subgraph. If you already have a smart contract deployed to your preferred network, you can bootstrap a new subgraph from that contract to get started. +The `graph init` command can be used to set up a new Subgraph project, either from an existing contract or from an example Subgraph. If you already have a smart contract deployed to your preferred network, you can bootstrap a new Subgraph from that contract to get started. ## Crear un Subgrafo ### Desde un Contrato Existente -The following command creates a subgraph that indexes all events of an existing contract: +The following command creates a Subgraph that indexes all events of an existing contract: ```sh graph init \ @@ -51,25 +51,25 @@ graph init \ - If any of the optional arguments are missing, it guides you through an interactive form. -- The `` is the ID of your subgraph in [Subgraph Studio](https://thegraph.com/studio/). It can be found on your subgraph details page. +- The `` is the ID of your Subgraph in [Subgraph Studio](https://thegraph.com/studio/). It can be found on your Subgraph details page. ### De un Subgrafo de Ejemplo -The following command initializes a new project from an example subgraph: +The following command initializes a new project from an example Subgraph: ```sh graph init --from-example=example-subgraph ``` -- The [example subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant, which manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. +- The [example Subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant, which manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. -- The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. +- The Subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. ### Add New `dataSources` to an Existing Subgraph -`dataSources` are key components of subgraphs. They define the sources of data that the subgraph indexes and processes. A `dataSource` specifies which smart contract to listen to, which events to process, and how to handle them. +`dataSources` are key components of Subgraphs. They define the sources of data that the Subgraph indexes and processes. A `dataSource` specifies which smart contract to listen to, which events to process, and how to handle them. -Recent versions of the Graph CLI supports adding new `dataSources` to an existing subgraph through the `graph add` command: +Recent versions of the Graph CLI supports adding new `dataSources` to an existing Subgraph through the `graph add` command: ```sh graph add
[] @@ -101,19 +101,5 @@ The `graph add` command will fetch the ABI from Etherscan (unless an ABI path is Los archivos ABI deben coincidir con tu(s) contrato(s). Hay varias formas de obtener archivos ABI: - Si estás construyendo tu propio proyecto, es probable que tengas acceso a tus ABIs más actuales. -- If you are building a subgraph for a public project, you can download that project to your computer and get the ABI by using [`npx hardhat compile`](https://hardhat.org/hardhat-runner/docs/guides/compile-contracts#compiling-your-contracts) or using `solc` to compile. -- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your subgraph will fail. - -## SpecVersion Releases - -| Version | Notas del lanzamiento | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | -| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | Added support for event handlers having access to transaction receipts. | -| 0.0.4 | Added support for managing subgraph features. | +- If you are building a Subgraph for a public project, you can download that project to your computer and get the ABI by using [`npx hardhat compile`](https://hardhat.org/hardhat-runner/docs/guides/compile-contracts#compiling-your-contracts) or using `solc` to compile. +- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your Subgraph will fail. From 4d92a430b001a8e7d6ba584f2926b2a31d2c34e9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:21:43 -0500 Subject: [PATCH 0761/1789] New translations install-the-cli.mdx (Arabic) --- .../developing/creating/install-the-cli.mdx | 38 ++++++------------- 1 file changed, 12 insertions(+), 26 deletions(-) diff --git a/website/src/pages/ar/subgraphs/developing/creating/install-the-cli.mdx b/website/src/pages/ar/subgraphs/developing/creating/install-the-cli.mdx index b55d24367e50..81469bc1837b 100644 --- a/website/src/pages/ar/subgraphs/developing/creating/install-the-cli.mdx +++ b/website/src/pages/ar/subgraphs/developing/creating/install-the-cli.mdx @@ -2,11 +2,11 @@ title: قم بتثبيت Graph CLI --- -> In order to use your subgraph on The Graph's decentralized network, you will need to [create an API key](/resources/subgraph-studio-faq/#2-how-do-i-create-an-api-key) in [Subgraph Studio](https://thegraph.com/studio/apikeys/). It is recommended that you add signal to your subgraph with at least 3,000 GRT to attract 2-3 Indexers. To learn more about signaling, check out [curating](/resources/roles/curating/). +> In order to use your Subgraph on The Graph's decentralized network, you will need to [create an API key](/resources/subgraph-studio-faq/#2-how-do-i-create-an-api-key) in [Subgraph Studio](https://thegraph.com/studio/apikeys/). It is recommended that you add signal to your Subgraph with at least 3,000 GRT to attract 2-3 Indexers. To learn more about signaling, check out [curating](/resources/roles/curating/). ## نظره عامة -The [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) is a command-line interface that facilitates developers' commands for The Graph. It processes a [subgraph manifest](/subgraphs/developing/creating/subgraph-manifest/) and compiles the [mappings](/subgraphs/developing/creating/assemblyscript-mappings/) to create the files you will need to deploy the subgraph to [Subgraph Studio](https://thegraph.com/studio/) and the network. +The [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) is a command-line interface that facilitates developers' commands for The Graph. It processes a [Subgraph manifest](/subgraphs/developing/creating/subgraph-manifest/) and compiles the [mappings](/subgraphs/developing/creating/assemblyscript-mappings/) to create the files you will need to deploy the Subgraph to [Subgraph Studio](https://thegraph.com/studio/) and the network. ## Getting Started @@ -28,13 +28,13 @@ npm install -g @graphprotocol/graph-cli@latest yarn global add @graphprotocol/graph-cli ``` -The `graph init` command can be used to set up a new subgraph project, either from an existing contract or from an example subgraph. If you already have a smart contract deployed to your preferred network, you can bootstrap a new subgraph from that contract to get started. +The `graph init` command can be used to set up a new Subgraph project, either from an existing contract or from an example Subgraph. If you already have a smart contract deployed to your preferred network, you can bootstrap a new Subgraph from that contract to get started. ## إنشاء الـ Subgraph ### من عقد موجود -The following command creates a subgraph that indexes all events of an existing contract: +The following command creates a Subgraph that indexes all events of an existing contract: ```sh graph init \ @@ -51,25 +51,25 @@ graph init \ - If any of the optional arguments are missing, it guides you through an interactive form. -- The `` is the ID of your subgraph in [Subgraph Studio](https://thegraph.com/studio/). It can be found on your subgraph details page. +- The `` is the ID of your Subgraph in [Subgraph Studio](https://thegraph.com/studio/). It can be found on your Subgraph details page. ### من مثال Subgraph -The following command initializes a new project from an example subgraph: +The following command initializes a new project from an example Subgraph: ```sh graph init --from-example=example-subgraph ``` -- The [example subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant, which manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. +- The [example Subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant, which manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. -- The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. +- The Subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. ### Add New `dataSources` to an Existing Subgraph -`dataSources` are key components of subgraphs. They define the sources of data that the subgraph indexes and processes. A `dataSource` specifies which smart contract to listen to, which events to process, and how to handle them. +`dataSources` are key components of Subgraphs. They define the sources of data that the Subgraph indexes and processes. A `dataSource` specifies which smart contract to listen to, which events to process, and how to handle them. -Recent versions of the Graph CLI supports adding new `dataSources` to an existing subgraph through the `graph add` command: +Recent versions of the Graph CLI supports adding new `dataSources` to an existing Subgraph through the `graph add` command: ```sh graph add
[] @@ -101,19 +101,5 @@ The `graph add` command will fetch the ABI from Etherscan (unless an ABI path is يجب أن تتطابق ملف (ملفات) ABI مع العقد (العقود) الخاصة بك. هناك عدة طرق للحصول على ملفات ABI: - إذا كنت تقوم ببناء مشروعك الخاص ، فمن المحتمل أن تتمكن من الوصول إلى أحدث ABIs. -- If you are building a subgraph for a public project, you can download that project to your computer and get the ABI by using [`npx hardhat compile`](https://hardhat.org/hardhat-runner/docs/guides/compile-contracts#compiling-your-contracts) or using `solc` to compile. -- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your subgraph will fail. - -## SpecVersion Releases - -| الاصدار | ملاحظات الإصدار | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | -| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | Added support for event handlers having access to transaction receipts. | -| 0.0.4 | Added support for managing subgraph features. | +- If you are building a Subgraph for a public project, you can download that project to your computer and get the ABI by using [`npx hardhat compile`](https://hardhat.org/hardhat-runner/docs/guides/compile-contracts#compiling-your-contracts) or using `solc` to compile. +- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your Subgraph will fail. From 979f164c1159340c44936e14c3968a4d7bfac09f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:21:45 -0500 Subject: [PATCH 0762/1789] New translations install-the-cli.mdx (Czech) --- .../developing/creating/install-the-cli.mdx | 38 ++++++------------- 1 file changed, 12 insertions(+), 26 deletions(-) diff --git a/website/src/pages/cs/subgraphs/developing/creating/install-the-cli.mdx b/website/src/pages/cs/subgraphs/developing/creating/install-the-cli.mdx index dbeac0c137a5..536b416c9465 100644 --- a/website/src/pages/cs/subgraphs/developing/creating/install-the-cli.mdx +++ b/website/src/pages/cs/subgraphs/developing/creating/install-the-cli.mdx @@ -2,11 +2,11 @@ title: Instalace Graf CLI --- -> In order to use your subgraph on The Graph's decentralized network, you will need to [create an API key](/resources/subgraph-studio-faq/#2-how-do-i-create-an-api-key) in [Subgraph Studio](https://thegraph.com/studio/apikeys/). It is recommended that you add signal to your subgraph with at least 3,000 GRT to attract 2-3 Indexers. To learn more about signaling, check out [curating](/resources/roles/curating/). +> In order to use your Subgraph on The Graph's decentralized network, you will need to [create an API key](/resources/subgraph-studio-faq/#2-how-do-i-create-an-api-key) in [Subgraph Studio](https://thegraph.com/studio/apikeys/). It is recommended that you add signal to your Subgraph with at least 3,000 GRT to attract 2-3 Indexers. To learn more about signaling, check out [curating](/resources/roles/curating/). ## Přehled -The [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) is a command-line interface that facilitates developers' commands for The Graph. It processes a [subgraph manifest](/subgraphs/developing/creating/subgraph-manifest/) and compiles the [mappings](/subgraphs/developing/creating/assemblyscript-mappings/) to create the files you will need to deploy the subgraph to [Subgraph Studio](https://thegraph.com/studio/) and the network. +The [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) is a command-line interface that facilitates developers' commands for The Graph. It processes a [Subgraph manifest](/subgraphs/developing/creating/subgraph-manifest/) and compiles the [mappings](/subgraphs/developing/creating/assemblyscript-mappings/) to create the files you will need to deploy the Subgraph to [Subgraph Studio](https://thegraph.com/studio/) and the network. ## Začínáme @@ -28,13 +28,13 @@ npm install -g @graphprotocol/graph-cli@latest yarn global add @graphprotocol/graph-cli ``` -The `graph init` command can be used to set up a new subgraph project, either from an existing contract or from an example subgraph. If you already have a smart contract deployed to your preferred network, you can bootstrap a new subgraph from that contract to get started. +The `graph init` command can be used to set up a new Subgraph project, either from an existing contract or from an example Subgraph. If you already have a smart contract deployed to your preferred network, you can bootstrap a new Subgraph from that contract to get started. ## Vytvoření podgrafu ### Ze stávající smlouvy -The following command creates a subgraph that indexes all events of an existing contract: +The following command creates a Subgraph that indexes all events of an existing contract: ```sh graph init \ @@ -51,25 +51,25 @@ graph init \ - If any of the optional arguments are missing, it guides you through an interactive form. -- The `` is the ID of your subgraph in [Subgraph Studio](https://thegraph.com/studio/). It can be found on your subgraph details page. +- The `` is the ID of your Subgraph in [Subgraph Studio](https://thegraph.com/studio/). It can be found on your Subgraph details page. ### Z příkladu podgrafu -The following command initializes a new project from an example subgraph: +The following command initializes a new project from an example Subgraph: ```sh graph init --from-example=example-subgraph ``` -- The [example subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant, which manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. +- The [example Subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant, which manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. -- The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. +- The Subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. ### Add New `dataSources` to an Existing Subgraph -`dataSources` are key components of subgraphs. They define the sources of data that the subgraph indexes and processes. A `dataSource` specifies which smart contract to listen to, which events to process, and how to handle them. +`dataSources` are key components of Subgraphs. They define the sources of data that the Subgraph indexes and processes. A `dataSource` specifies which smart contract to listen to, which events to process, and how to handle them. -Recent versions of the Graph CLI supports adding new `dataSources` to an existing subgraph through the `graph add` command: +Recent versions of the Graph CLI supports adding new `dataSources` to an existing Subgraph through the `graph add` command: ```sh graph add
[] @@ -101,19 +101,5 @@ The `graph add` command will fetch the ABI from Etherscan (unless an ABI path is Soubor(y) ABI se musí shodovat s vaší smlouvou. Soubory ABI lze získat několika způsoby: - Pokud vytváříte vlastní projekt, budete mít pravděpodobně přístup k nejaktuálnějším ABI. -- If you are building a subgraph for a public project, you can download that project to your computer and get the ABI by using [`npx hardhat compile`](https://hardhat.org/hardhat-runner/docs/guides/compile-contracts#compiling-your-contracts) or using `solc` to compile. -- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your subgraph will fail. - -## SpecVersion Releases - -| Verze | Poznámky vydání | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | -| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | Added support for event handlers having access to transaction receipts. | -| 0.0.4 | Added support for managing subgraph features. | +- If you are building a Subgraph for a public project, you can download that project to your computer and get the ABI by using [`npx hardhat compile`](https://hardhat.org/hardhat-runner/docs/guides/compile-contracts#compiling-your-contracts) or using `solc` to compile. +- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your Subgraph will fail. From 52e62703ef05ee97972f17a1a511d614fb492cfd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:21:46 -0500 Subject: [PATCH 0763/1789] New translations install-the-cli.mdx (German) --- .../developing/creating/install-the-cli.mdx | 38 ++++++------------- 1 file changed, 12 insertions(+), 26 deletions(-) diff --git a/website/src/pages/de/subgraphs/developing/creating/install-the-cli.mdx b/website/src/pages/de/subgraphs/developing/creating/install-the-cli.mdx index f9d419ffe1ce..925885bc3ade 100644 --- a/website/src/pages/de/subgraphs/developing/creating/install-the-cli.mdx +++ b/website/src/pages/de/subgraphs/developing/creating/install-the-cli.mdx @@ -2,11 +2,11 @@ title: Install the Graph CLI --- -> In order to use your subgraph on The Graph's decentralized network, you will need to [create an API key](/resources/subgraph-studio-faq/#2-how-do-i-create-an-api-key) in [Subgraph Studio](https://thegraph.com/studio/apikeys/). It is recommended that you add signal to your subgraph with at least 3,000 GRT to attract 2-3 Indexers. To learn more about signaling, check out [curating](/resources/roles/curating/). +> In order to use your Subgraph on The Graph's decentralized network, you will need to [create an API key](/resources/subgraph-studio-faq/#2-how-do-i-create-an-api-key) in [Subgraph Studio](https://thegraph.com/studio/apikeys/). It is recommended that you add signal to your Subgraph with at least 3,000 GRT to attract 2-3 Indexers. To learn more about signaling, check out [curating](/resources/roles/curating/). ## Überblick -The [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) is a command-line interface that facilitates developers' commands for The Graph. It processes a [subgraph manifest](/subgraphs/developing/creating/subgraph-manifest/) and compiles the [mappings](/subgraphs/developing/creating/assemblyscript-mappings/) to create the files you will need to deploy the subgraph to [Subgraph Studio](https://thegraph.com/studio/) and the network. +The [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) is a command-line interface that facilitates developers' commands for The Graph. It processes a [Subgraph manifest](/subgraphs/developing/creating/subgraph-manifest/) and compiles the [mappings](/subgraphs/developing/creating/assemblyscript-mappings/) to create the files you will need to deploy the Subgraph to [Subgraph Studio](https://thegraph.com/studio/) and the network. ## Erste Schritte @@ -28,13 +28,13 @@ npm install -g @graphprotocol/graph-cli@latest yarn global add @graphprotocol/graph-cli ``` -The `graph init` command can be used to set up a new subgraph project, either from an existing contract or from an example subgraph. If you already have a smart contract deployed to your preferred network, you can bootstrap a new subgraph from that contract to get started. +The `graph init` command can be used to set up a new Subgraph project, either from an existing contract or from an example Subgraph. If you already have a smart contract deployed to your preferred network, you can bootstrap a new Subgraph from that contract to get started. ## Create a Subgraph ### From an Existing Contract -The following command creates a subgraph that indexes all events of an existing contract: +The following command creates a Subgraph that indexes all events of an existing contract: ```sh graph init \ @@ -51,25 +51,25 @@ graph init \ - If any of the optional arguments are missing, it guides you through an interactive form. -- The `` is the ID of your subgraph in [Subgraph Studio](https://thegraph.com/studio/). It can be found on your subgraph details page. +- The `` is the ID of your Subgraph in [Subgraph Studio](https://thegraph.com/studio/). It can be found on your Subgraph details page. ### From an Example Subgraph -The following command initializes a new project from an example subgraph: +The following command initializes a new project from an example Subgraph: ```sh graph init --from-example=example-subgraph ``` -- The [example subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant, which manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. +- The [example Subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant, which manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. -- The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. +- The Subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. ### Add New `dataSources` to an Existing Subgraph -`dataSources` are key components of subgraphs. They define the sources of data that the subgraph indexes and processes. A `dataSource` specifies which smart contract to listen to, which events to process, and how to handle them. +`dataSources` are key components of Subgraphs. They define the sources of data that the Subgraph indexes and processes. A `dataSource` specifies which smart contract to listen to, which events to process, and how to handle them. -Recent versions of the Graph CLI supports adding new `dataSources` to an existing subgraph through the `graph add` command: +Recent versions of the Graph CLI supports adding new `dataSources` to an existing Subgraph through the `graph add` command: ```sh graph add
[] @@ -101,19 +101,5 @@ The `graph add` command will fetch the ABI from Etherscan (unless an ABI path is The ABI file(s) must match your contract(s). There are a few ways to obtain ABI files: - If you are building your own project, you will likely have access to your most current ABIs. -- If you are building a subgraph for a public project, you can download that project to your computer and get the ABI by using [`npx hardhat compile`](https://hardhat.org/hardhat-runner/docs/guides/compile-contracts#compiling-your-contracts) or using `solc` to compile. -- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your subgraph will fail. - -## SpecVersion Releases - -| Version | Release notes | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | -| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | Added support for event handlers having access to transaction receipts. | -| 0.0.4 | Added support for managing subgraph features. | +- If you are building a Subgraph for a public project, you can download that project to your computer and get the ABI by using [`npx hardhat compile`](https://hardhat.org/hardhat-runner/docs/guides/compile-contracts#compiling-your-contracts) or using `solc` to compile. +- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your Subgraph will fail. From 2df66113398eae1fed4bd0bac931cb3eafcf0b7b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:21:47 -0500 Subject: [PATCH 0764/1789] New translations install-the-cli.mdx (Italian) --- .../developing/creating/install-the-cli.mdx | 38 ++++++------------- 1 file changed, 12 insertions(+), 26 deletions(-) diff --git a/website/src/pages/it/subgraphs/developing/creating/install-the-cli.mdx b/website/src/pages/it/subgraphs/developing/creating/install-the-cli.mdx index 4f4afcee006a..20770b2e37b7 100644 --- a/website/src/pages/it/subgraphs/developing/creating/install-the-cli.mdx +++ b/website/src/pages/it/subgraphs/developing/creating/install-the-cli.mdx @@ -2,11 +2,11 @@ title: Installare the Graph CLI --- -> In order to use your subgraph on The Graph's decentralized network, you will need to [create an API key](/resources/subgraph-studio-faq/#2-how-do-i-create-an-api-key) in [Subgraph Studio](https://thegraph.com/studio/apikeys/). It is recommended that you add signal to your subgraph with at least 3,000 GRT to attract 2-3 Indexers. To learn more about signaling, check out [curating](/resources/roles/curating/). +> In order to use your Subgraph on The Graph's decentralized network, you will need to [create an API key](/resources/subgraph-studio-faq/#2-how-do-i-create-an-api-key) in [Subgraph Studio](https://thegraph.com/studio/apikeys/). It is recommended that you add signal to your Subgraph with at least 3,000 GRT to attract 2-3 Indexers. To learn more about signaling, check out [curating](/resources/roles/curating/). ## Panoramica -The [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) is a command-line interface that facilitates developers' commands for The Graph. It processes a [subgraph manifest](/subgraphs/developing/creating/subgraph-manifest/) and compiles the [mappings](/subgraphs/developing/creating/assemblyscript-mappings/) to create the files you will need to deploy the subgraph to [Subgraph Studio](https://thegraph.com/studio/) and the network. +The [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) is a command-line interface that facilitates developers' commands for The Graph. It processes a [Subgraph manifest](/subgraphs/developing/creating/subgraph-manifest/) and compiles the [mappings](/subgraphs/developing/creating/assemblyscript-mappings/) to create the files you will need to deploy the Subgraph to [Subgraph Studio](https://thegraph.com/studio/) and the network. ## Per cominciare @@ -28,13 +28,13 @@ npm install -g @graphprotocol/graph-cli@latest yarn global add @graphprotocol/graph-cli ``` -The `graph init` command can be used to set up a new subgraph project, either from an existing contract or from an example subgraph. If you already have a smart contract deployed to your preferred network, you can bootstrap a new subgraph from that contract to get started. +The `graph init` command can be used to set up a new Subgraph project, either from an existing contract or from an example Subgraph. If you already have a smart contract deployed to your preferred network, you can bootstrap a new Subgraph from that contract to get started. ## Create a Subgraph ### Da un contratto esistente -The following command creates a subgraph that indexes all events of an existing contract: +The following command creates a Subgraph that indexes all events of an existing contract: ```sh graph init \ @@ -51,25 +51,25 @@ graph init \ - If any of the optional arguments are missing, it guides you through an interactive form. -- The `` is the ID of your subgraph in [Subgraph Studio](https://thegraph.com/studio/). It can be found on your subgraph details page. +- The `` is the ID of your Subgraph in [Subgraph Studio](https://thegraph.com/studio/). It can be found on your Subgraph details page. ### Da un subgraph di esempio -The following command initializes a new project from an example subgraph: +The following command initializes a new project from an example Subgraph: ```sh graph init --from-example=example-subgraph ``` -- The [example subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant, which manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. +- The [example Subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant, which manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. -- The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. +- The Subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. ### Add New `dataSources` to an Existing Subgraph -`dataSources` are key components of subgraphs. They define the sources of data that the subgraph indexes and processes. A `dataSource` specifies which smart contract to listen to, which events to process, and how to handle them. +`dataSources` are key components of Subgraphs. They define the sources of data that the Subgraph indexes and processes. A `dataSource` specifies which smart contract to listen to, which events to process, and how to handle them. -Recent versions of the Graph CLI supports adding new `dataSources` to an existing subgraph through the `graph add` command: +Recent versions of the Graph CLI supports adding new `dataSources` to an existing Subgraph through the `graph add` command: ```sh graph add
[] @@ -101,19 +101,5 @@ The `graph add` command will fetch the ABI from Etherscan (unless an ABI path is I file ABI devono corrispondere al vostro contratto. Esistono diversi modi per ottenere i file ABI: - Se state costruendo il vostro progetto, probabilmente avrete accesso alle ABI più recenti. -- If you are building a subgraph for a public project, you can download that project to your computer and get the ABI by using [`npx hardhat compile`](https://hardhat.org/hardhat-runner/docs/guides/compile-contracts#compiling-your-contracts) or using `solc` to compile. -- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your subgraph will fail. - -## SpecVersion Releases - -| Versione | Note di rilascio | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | -| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | Added support for event handlers having access to transaction receipts. | -| 0.0.4 | Added support for managing subgraph features. | +- If you are building a Subgraph for a public project, you can download that project to your computer and get the ABI by using [`npx hardhat compile`](https://hardhat.org/hardhat-runner/docs/guides/compile-contracts#compiling-your-contracts) or using `solc` to compile. +- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your Subgraph will fail. From 022a592696414933448f37652e232cb8a4abdc25 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:21:48 -0500 Subject: [PATCH 0765/1789] New translations install-the-cli.mdx (Japanese) --- .../developing/creating/install-the-cli.mdx | 38 ++++++------------- 1 file changed, 12 insertions(+), 26 deletions(-) diff --git a/website/src/pages/ja/subgraphs/developing/creating/install-the-cli.mdx b/website/src/pages/ja/subgraphs/developing/creating/install-the-cli.mdx index 397b011cbdd3..3352df16b841 100644 --- a/website/src/pages/ja/subgraphs/developing/creating/install-the-cli.mdx +++ b/website/src/pages/ja/subgraphs/developing/creating/install-the-cli.mdx @@ -2,11 +2,11 @@ title: Graph CLI のインストール --- -> In order to use your subgraph on The Graph's decentralized network, you will need to [create an API key](/resources/subgraph-studio-faq/#2-how-do-i-create-an-api-key) in [Subgraph Studio](https://thegraph.com/studio/apikeys/). It is recommended that you add signal to your subgraph with at least 3,000 GRT to attract 2-3 Indexers. To learn more about signaling, check out [curating](/resources/roles/curating/). +> In order to use your Subgraph on The Graph's decentralized network, you will need to [create an API key](/resources/subgraph-studio-faq/#2-how-do-i-create-an-api-key) in [Subgraph Studio](https://thegraph.com/studio/apikeys/). It is recommended that you add signal to your Subgraph with at least 3,000 GRT to attract 2-3 Indexers. To learn more about signaling, check out [curating](/resources/roles/curating/). ## 概要 -The [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) is a command-line interface that facilitates developers' commands for The Graph. It processes a [subgraph manifest](/subgraphs/developing/creating/subgraph-manifest/) and compiles the [mappings](/subgraphs/developing/creating/assemblyscript-mappings/) to create the files you will need to deploy the subgraph to [Subgraph Studio](https://thegraph.com/studio/) and the network. +The [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) is a command-line interface that facilitates developers' commands for The Graph. It processes a [Subgraph manifest](/subgraphs/developing/creating/subgraph-manifest/) and compiles the [mappings](/subgraphs/developing/creating/assemblyscript-mappings/) to create the files you will need to deploy the Subgraph to [Subgraph Studio](https://thegraph.com/studio/) and the network. ## はじめに @@ -28,13 +28,13 @@ npm install -g @graphprotocol/graph-cli@latest yarn global add @graphprotocol/graph-cli ``` -The `graph init` command can be used to set up a new subgraph project, either from an existing contract or from an example subgraph. If you already have a smart contract deployed to your preferred network, you can bootstrap a new subgraph from that contract to get started. +The `graph init` command can be used to set up a new Subgraph project, either from an existing contract or from an example Subgraph. If you already have a smart contract deployed to your preferred network, you can bootstrap a new Subgraph from that contract to get started. ## サブグラフの作成 ### 既存のコントラクトから -The following command creates a subgraph that indexes all events of an existing contract: +The following command creates a Subgraph that indexes all events of an existing contract: ```sh graph init \ @@ -51,25 +51,25 @@ graph init \ - If any of the optional arguments are missing, it guides you through an interactive form. -- The `` is the ID of your subgraph in [Subgraph Studio](https://thegraph.com/studio/). It can be found on your subgraph details page. +- The `` is the ID of your Subgraph in [Subgraph Studio](https://thegraph.com/studio/). It can be found on your Subgraph details page. ### サブグラフの例から -The following command initializes a new project from an example subgraph: +The following command initializes a new project from an example Subgraph: ```sh graph init --from-example=example-subgraph ``` -- The [example subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant, which manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. +- The [example Subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant, which manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. -- The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. +- The Subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. ### Add New `dataSources` to an Existing Subgraph -`dataSources` are key components of subgraphs. They define the sources of data that the subgraph indexes and processes. A `dataSource` specifies which smart contract to listen to, which events to process, and how to handle them. +`dataSources` are key components of Subgraphs. They define the sources of data that the Subgraph indexes and processes. A `dataSource` specifies which smart contract to listen to, which events to process, and how to handle them. -Recent versions of the Graph CLI supports adding new `dataSources` to an existing subgraph through the `graph add` command: +Recent versions of the Graph CLI supports adding new `dataSources` to an existing Subgraph through the `graph add` command: ```sh graph add
[] @@ -101,19 +101,5 @@ The `graph add` command will fetch the ABI from Etherscan (unless an ABI path is ABI ファイルは、契約内容と一致している必要があります。ABI ファイルを入手するにはいくつかの方法があります: - 自分のプロジェクトを構築している場合は、最新の ABI にアクセスできる可能性があります。 -- If you are building a subgraph for a public project, you can download that project to your computer and get the ABI by using [`npx hardhat compile`](https://hardhat.org/hardhat-runner/docs/guides/compile-contracts#compiling-your-contracts) or using `solc` to compile. -- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your subgraph will fail. - -## SpecVersion Releases - -| バージョン | リリースノート | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | -| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | Added support for event handlers having access to transaction receipts. | -| 0.0.4 | Added support for managing subgraph features. | +- If you are building a Subgraph for a public project, you can download that project to your computer and get the ABI by using [`npx hardhat compile`](https://hardhat.org/hardhat-runner/docs/guides/compile-contracts#compiling-your-contracts) or using `solc` to compile. +- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your Subgraph will fail. From 005ee4c8c4aed7e341d2f6c9d28a9ba11db00cf3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:21:49 -0500 Subject: [PATCH 0766/1789] New translations install-the-cli.mdx (Korean) --- .../developing/creating/install-the-cli.mdx | 38 ++++++------------- 1 file changed, 12 insertions(+), 26 deletions(-) diff --git a/website/src/pages/ko/subgraphs/developing/creating/install-the-cli.mdx b/website/src/pages/ko/subgraphs/developing/creating/install-the-cli.mdx index 674cc5bc22d2..c9d6966ef5fe 100644 --- a/website/src/pages/ko/subgraphs/developing/creating/install-the-cli.mdx +++ b/website/src/pages/ko/subgraphs/developing/creating/install-the-cli.mdx @@ -2,11 +2,11 @@ title: Install the Graph CLI --- -> In order to use your subgraph on The Graph's decentralized network, you will need to [create an API key](/resources/subgraph-studio-faq/#2-how-do-i-create-an-api-key) in [Subgraph Studio](https://thegraph.com/studio/apikeys/). It is recommended that you add signal to your subgraph with at least 3,000 GRT to attract 2-3 Indexers. To learn more about signaling, check out [curating](/resources/roles/curating/). +> In order to use your Subgraph on The Graph's decentralized network, you will need to [create an API key](/resources/subgraph-studio-faq/#2-how-do-i-create-an-api-key) in [Subgraph Studio](https://thegraph.com/studio/apikeys/). It is recommended that you add signal to your Subgraph with at least 3,000 GRT to attract 2-3 Indexers. To learn more about signaling, check out [curating](/resources/roles/curating/). ## Overview -The [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) is a command-line interface that facilitates developers' commands for The Graph. It processes a [subgraph manifest](/subgraphs/developing/creating/subgraph-manifest/) and compiles the [mappings](/subgraphs/developing/creating/assemblyscript-mappings/) to create the files you will need to deploy the subgraph to [Subgraph Studio](https://thegraph.com/studio/) and the network. +The [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) is a command-line interface that facilitates developers' commands for The Graph. It processes a [Subgraph manifest](/subgraphs/developing/creating/subgraph-manifest/) and compiles the [mappings](/subgraphs/developing/creating/assemblyscript-mappings/) to create the files you will need to deploy the Subgraph to [Subgraph Studio](https://thegraph.com/studio/) and the network. ## Getting Started @@ -28,13 +28,13 @@ npm install -g @graphprotocol/graph-cli@latest yarn global add @graphprotocol/graph-cli ``` -The `graph init` command can be used to set up a new subgraph project, either from an existing contract or from an example subgraph. If you already have a smart contract deployed to your preferred network, you can bootstrap a new subgraph from that contract to get started. +The `graph init` command can be used to set up a new Subgraph project, either from an existing contract or from an example Subgraph. If you already have a smart contract deployed to your preferred network, you can bootstrap a new Subgraph from that contract to get started. ## Create a Subgraph ### From an Existing Contract -The following command creates a subgraph that indexes all events of an existing contract: +The following command creates a Subgraph that indexes all events of an existing contract: ```sh graph init \ @@ -51,25 +51,25 @@ graph init \ - If any of the optional arguments are missing, it guides you through an interactive form. -- The `` is the ID of your subgraph in [Subgraph Studio](https://thegraph.com/studio/). It can be found on your subgraph details page. +- The `` is the ID of your Subgraph in [Subgraph Studio](https://thegraph.com/studio/). It can be found on your Subgraph details page. ### From an Example Subgraph -The following command initializes a new project from an example subgraph: +The following command initializes a new project from an example Subgraph: ```sh graph init --from-example=example-subgraph ``` -- The [example subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant, which manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. +- The [example Subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant, which manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. -- The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. +- The Subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. ### Add New `dataSources` to an Existing Subgraph -`dataSources` are key components of subgraphs. They define the sources of data that the subgraph indexes and processes. A `dataSource` specifies which smart contract to listen to, which events to process, and how to handle them. +`dataSources` are key components of Subgraphs. They define the sources of data that the Subgraph indexes and processes. A `dataSource` specifies which smart contract to listen to, which events to process, and how to handle them. -Recent versions of the Graph CLI supports adding new `dataSources` to an existing subgraph through the `graph add` command: +Recent versions of the Graph CLI supports adding new `dataSources` to an existing Subgraph through the `graph add` command: ```sh graph add
[] @@ -101,19 +101,5 @@ The `graph add` command will fetch the ABI from Etherscan (unless an ABI path is The ABI file(s) must match your contract(s). There are a few ways to obtain ABI files: - If you are building your own project, you will likely have access to your most current ABIs. -- If you are building a subgraph for a public project, you can download that project to your computer and get the ABI by using [`npx hardhat compile`](https://hardhat.org/hardhat-runner/docs/guides/compile-contracts#compiling-your-contracts) or using `solc` to compile. -- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your subgraph will fail. - -## SpecVersion Releases - -| Version | Release notes | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | -| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | Added support for event handlers having access to transaction receipts. | -| 0.0.4 | Added support for managing subgraph features. | +- If you are building a Subgraph for a public project, you can download that project to your computer and get the ABI by using [`npx hardhat compile`](https://hardhat.org/hardhat-runner/docs/guides/compile-contracts#compiling-your-contracts) or using `solc` to compile. +- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your Subgraph will fail. From 20ca73ebaa566913065ef1efb6201866778aadb8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:21:49 -0500 Subject: [PATCH 0767/1789] New translations install-the-cli.mdx (Dutch) --- .../developing/creating/install-the-cli.mdx | 38 ++++++------------- 1 file changed, 12 insertions(+), 26 deletions(-) diff --git a/website/src/pages/nl/subgraphs/developing/creating/install-the-cli.mdx b/website/src/pages/nl/subgraphs/developing/creating/install-the-cli.mdx index 8bf0b4dfca9f..004d0f94c99e 100644 --- a/website/src/pages/nl/subgraphs/developing/creating/install-the-cli.mdx +++ b/website/src/pages/nl/subgraphs/developing/creating/install-the-cli.mdx @@ -2,11 +2,11 @@ title: Install the Graph CLI --- -> In order to use your subgraph on The Graph's decentralized network, you will need to [create an API key](/resources/subgraph-studio-faq/#2-how-do-i-create-an-api-key) in [Subgraph Studio](https://thegraph.com/studio/apikeys/). It is recommended that you add signal to your subgraph with at least 3,000 GRT to attract 2-3 Indexers. To learn more about signaling, check out [curating](/resources/roles/curating/). +> In order to use your Subgraph on The Graph's decentralized network, you will need to [create an API key](/resources/subgraph-studio-faq/#2-how-do-i-create-an-api-key) in [Subgraph Studio](https://thegraph.com/studio/apikeys/). It is recommended that you add signal to your Subgraph with at least 3,000 GRT to attract 2-3 Indexers. To learn more about signaling, check out [curating](/resources/roles/curating/). ## Overview -The [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) is a command-line interface that facilitates developers' commands for The Graph. It processes a [subgraph manifest](/subgraphs/developing/creating/subgraph-manifest/) and compiles the [mappings](/subgraphs/developing/creating/assemblyscript-mappings/) to create the files you will need to deploy the subgraph to [Subgraph Studio](https://thegraph.com/studio/) and the network. +The [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) is a command-line interface that facilitates developers' commands for The Graph. It processes a [Subgraph manifest](/subgraphs/developing/creating/subgraph-manifest/) and compiles the [mappings](/subgraphs/developing/creating/assemblyscript-mappings/) to create the files you will need to deploy the Subgraph to [Subgraph Studio](https://thegraph.com/studio/) and the network. ## Getting Started @@ -28,13 +28,13 @@ npm install -g @graphprotocol/graph-cli@latest yarn global add @graphprotocol/graph-cli ``` -The `graph init` command can be used to set up a new subgraph project, either from an existing contract or from an example subgraph. If you already have a smart contract deployed to your preferred network, you can bootstrap a new subgraph from that contract to get started. +The `graph init` command can be used to set up a new Subgraph project, either from an existing contract or from an example Subgraph. If you already have a smart contract deployed to your preferred network, you can bootstrap a new Subgraph from that contract to get started. ## Creëer een Subgraph ### From an Existing Contract -The following command creates a subgraph that indexes all events of an existing contract: +The following command creates a Subgraph that indexes all events of an existing contract: ```sh graph init \ @@ -51,25 +51,25 @@ graph init \ - If any of the optional arguments are missing, it guides you through an interactive form. -- The `` is the ID of your subgraph in [Subgraph Studio](https://thegraph.com/studio/). It can be found on your subgraph details page. +- The `` is the ID of your Subgraph in [Subgraph Studio](https://thegraph.com/studio/). It can be found on your Subgraph details page. ### From an Example Subgraph -The following command initializes a new project from an example subgraph: +The following command initializes a new project from an example Subgraph: ```sh graph init --from-example=example-subgraph ``` -- The [example subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant, which manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. +- The [example Subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant, which manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. -- The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. +- The Subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. ### Add New `dataSources` to an Existing Subgraph -`dataSources` are key components of subgraphs. They define the sources of data that the subgraph indexes and processes. A `dataSource` specifies which smart contract to listen to, which events to process, and how to handle them. +`dataSources` are key components of Subgraphs. They define the sources of data that the Subgraph indexes and processes. A `dataSource` specifies which smart contract to listen to, which events to process, and how to handle them. -Recent versions of the Graph CLI supports adding new `dataSources` to an existing subgraph through the `graph add` command: +Recent versions of the Graph CLI supports adding new `dataSources` to an existing Subgraph through the `graph add` command: ```sh graph add
[] @@ -101,19 +101,5 @@ The `graph add` command will fetch the ABI from Etherscan (unless an ABI path is The ABI file(s) must match your contract(s). There are a few ways to obtain ABI files: - If you are building your own project, you will likely have access to your most current ABIs. -- If you are building a subgraph for a public project, you can download that project to your computer and get the ABI by using [`npx hardhat compile`](https://hardhat.org/hardhat-runner/docs/guides/compile-contracts#compiling-your-contracts) or using `solc` to compile. -- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your subgraph will fail. - -## SpecVersion Releases - -| Version | Release notes | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | -| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | Added support for event handlers having access to transaction receipts. | -| 0.0.4 | Added support for managing subgraph features. | +- If you are building a Subgraph for a public project, you can download that project to your computer and get the ABI by using [`npx hardhat compile`](https://hardhat.org/hardhat-runner/docs/guides/compile-contracts#compiling-your-contracts) or using `solc` to compile. +- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your Subgraph will fail. From 4db457eb5a51ae4a9e959e8be8bccd382dc86dec Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:21:50 -0500 Subject: [PATCH 0768/1789] New translations install-the-cli.mdx (Polish) --- .../developing/creating/install-the-cli.mdx | 38 ++++++------------- 1 file changed, 12 insertions(+), 26 deletions(-) diff --git a/website/src/pages/pl/subgraphs/developing/creating/install-the-cli.mdx b/website/src/pages/pl/subgraphs/developing/creating/install-the-cli.mdx index d4509815a845..112f0952a1e8 100644 --- a/website/src/pages/pl/subgraphs/developing/creating/install-the-cli.mdx +++ b/website/src/pages/pl/subgraphs/developing/creating/install-the-cli.mdx @@ -2,11 +2,11 @@ title: Install the Graph CLI --- -> In order to use your subgraph on The Graph's decentralized network, you will need to [create an API key](/resources/subgraph-studio-faq/#2-how-do-i-create-an-api-key) in [Subgraph Studio](https://thegraph.com/studio/apikeys/). It is recommended that you add signal to your subgraph with at least 3,000 GRT to attract 2-3 Indexers. To learn more about signaling, check out [curating](/resources/roles/curating/). +> In order to use your Subgraph on The Graph's decentralized network, you will need to [create an API key](/resources/subgraph-studio-faq/#2-how-do-i-create-an-api-key) in [Subgraph Studio](https://thegraph.com/studio/apikeys/). It is recommended that you add signal to your Subgraph with at least 3,000 GRT to attract 2-3 Indexers. To learn more about signaling, check out [curating](/resources/roles/curating/). ## Overview -The [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) is a command-line interface that facilitates developers' commands for The Graph. It processes a [subgraph manifest](/subgraphs/developing/creating/subgraph-manifest/) and compiles the [mappings](/subgraphs/developing/creating/assemblyscript-mappings/) to create the files you will need to deploy the subgraph to [Subgraph Studio](https://thegraph.com/studio/) and the network. +The [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) is a command-line interface that facilitates developers' commands for The Graph. It processes a [Subgraph manifest](/subgraphs/developing/creating/subgraph-manifest/) and compiles the [mappings](/subgraphs/developing/creating/assemblyscript-mappings/) to create the files you will need to deploy the Subgraph to [Subgraph Studio](https://thegraph.com/studio/) and the network. ## Getting Started @@ -28,13 +28,13 @@ npm install -g @graphprotocol/graph-cli@latest yarn global add @graphprotocol/graph-cli ``` -The `graph init` command can be used to set up a new subgraph project, either from an existing contract or from an example subgraph. If you already have a smart contract deployed to your preferred network, you can bootstrap a new subgraph from that contract to get started. +The `graph init` command can be used to set up a new Subgraph project, either from an existing contract or from an example Subgraph. If you already have a smart contract deployed to your preferred network, you can bootstrap a new Subgraph from that contract to get started. ## Jak stworzyć subgraf ### From an Existing Contract -The following command creates a subgraph that indexes all events of an existing contract: +The following command creates a Subgraph that indexes all events of an existing contract: ```sh graph init \ @@ -51,25 +51,25 @@ graph init \ - If any of the optional arguments are missing, it guides you through an interactive form. -- The `` is the ID of your subgraph in [Subgraph Studio](https://thegraph.com/studio/). It can be found on your subgraph details page. +- The `` is the ID of your Subgraph in [Subgraph Studio](https://thegraph.com/studio/). It can be found on your Subgraph details page. ### From an Example Subgraph -The following command initializes a new project from an example subgraph: +The following command initializes a new project from an example Subgraph: ```sh graph init --from-example=example-subgraph ``` -- The [example subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant, which manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. +- The [example Subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant, which manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. -- The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. +- The Subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. ### Add New `dataSources` to an Existing Subgraph -`dataSources` are key components of subgraphs. They define the sources of data that the subgraph indexes and processes. A `dataSource` specifies which smart contract to listen to, which events to process, and how to handle them. +`dataSources` are key components of Subgraphs. They define the sources of data that the Subgraph indexes and processes. A `dataSource` specifies which smart contract to listen to, which events to process, and how to handle them. -Recent versions of the Graph CLI supports adding new `dataSources` to an existing subgraph through the `graph add` command: +Recent versions of the Graph CLI supports adding new `dataSources` to an existing Subgraph through the `graph add` command: ```sh graph add
[] @@ -101,19 +101,5 @@ The `graph add` command will fetch the ABI from Etherscan (unless an ABI path is The ABI file(s) must match your contract(s). There are a few ways to obtain ABI files: - If you are building your own project, you will likely have access to your most current ABIs. -- If you are building a subgraph for a public project, you can download that project to your computer and get the ABI by using [`npx hardhat compile`](https://hardhat.org/hardhat-runner/docs/guides/compile-contracts#compiling-your-contracts) or using `solc` to compile. -- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your subgraph will fail. - -## SpecVersion Releases - -| Version | Release notes | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | -| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | Added support for event handlers having access to transaction receipts. | -| 0.0.4 | Added support for managing subgraph features. | +- If you are building a Subgraph for a public project, you can download that project to your computer and get the ABI by using [`npx hardhat compile`](https://hardhat.org/hardhat-runner/docs/guides/compile-contracts#compiling-your-contracts) or using `solc` to compile. +- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your Subgraph will fail. From 5c43992e1a61baa4bd37e46587b9db391200d61f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:21:51 -0500 Subject: [PATCH 0769/1789] New translations install-the-cli.mdx (Portuguese) --- .../developing/creating/install-the-cli.mdx | 38 ++++++------------- 1 file changed, 12 insertions(+), 26 deletions(-) diff --git a/website/src/pages/pt/subgraphs/developing/creating/install-the-cli.mdx b/website/src/pages/pt/subgraphs/developing/creating/install-the-cli.mdx index ca436b6eef1b..0417048c351c 100644 --- a/website/src/pages/pt/subgraphs/developing/creating/install-the-cli.mdx +++ b/website/src/pages/pt/subgraphs/developing/creating/install-the-cli.mdx @@ -2,11 +2,11 @@ title: Como instalar o Graph CLI --- -> In order to use your subgraph on The Graph's decentralized network, you will need to [create an API key](/resources/subgraph-studio-faq/#2-how-do-i-create-an-api-key) in [Subgraph Studio](https://thegraph.com/studio/apikeys/). It is recommended that you add signal to your subgraph with at least 3,000 GRT to attract 2-3 Indexers. To learn more about signaling, check out [curating](/resources/roles/curating/). +> In order to use your Subgraph on The Graph's decentralized network, you will need to [create an API key](/resources/subgraph-studio-faq/#2-how-do-i-create-an-api-key) in [Subgraph Studio](https://thegraph.com/studio/apikeys/). It is recommended that you add signal to your Subgraph with at least 3,000 GRT to attract 2-3 Indexers. To learn more about signaling, check out [curating](/resources/roles/curating/). ## Visão geral -The [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) is a command-line interface that facilitates developers' commands for The Graph. It processes a [subgraph manifest](/subgraphs/developing/creating/subgraph-manifest/) and compiles the [mappings](/subgraphs/developing/creating/assemblyscript-mappings/) to create the files you will need to deploy the subgraph to [Subgraph Studio](https://thegraph.com/studio/) and the network. +The [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) is a command-line interface that facilitates developers' commands for The Graph. It processes a [Subgraph manifest](/subgraphs/developing/creating/subgraph-manifest/) and compiles the [mappings](/subgraphs/developing/creating/assemblyscript-mappings/) to create the files you will need to deploy the Subgraph to [Subgraph Studio](https://thegraph.com/studio/) and the network. ## Como Começar @@ -28,13 +28,13 @@ npm install -g @graphprotocol/graph-cli@latest yarn global add @graphprotocol/graph-cli ``` -The `graph init` command can be used to set up a new subgraph project, either from an existing contract or from an example subgraph. If you already have a smart contract deployed to your preferred network, you can bootstrap a new subgraph from that contract to get started. +The `graph init` command can be used to set up a new Subgraph project, either from an existing contract or from an example Subgraph. If you already have a smart contract deployed to your preferred network, you can bootstrap a new Subgraph from that contract to get started. ## Crie um Subgraph ### De um Contrato Existente -The following command creates a subgraph that indexes all events of an existing contract: +The following command creates a Subgraph that indexes all events of an existing contract: ```sh graph init \ @@ -51,25 +51,25 @@ graph init \ - If any of the optional arguments are missing, it guides you through an interactive form. -- The `` is the ID of your subgraph in [Subgraph Studio](https://thegraph.com/studio/). It can be found on your subgraph details page. +- The `` is the ID of your Subgraph in [Subgraph Studio](https://thegraph.com/studio/). It can be found on your Subgraph details page. ### De um Exemplo de Subgraph -The following command initializes a new project from an example subgraph: +The following command initializes a new project from an example Subgraph: ```sh graph init --from-example=example-subgraph ``` -- The [example subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant, which manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. +- The [example Subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant, which manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. -- The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. +- The Subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. ### Add New `dataSources` to an Existing Subgraph -`dataSources` are key components of subgraphs. They define the sources of data that the subgraph indexes and processes. A `dataSource` specifies which smart contract to listen to, which events to process, and how to handle them. +`dataSources` are key components of Subgraphs. They define the sources of data that the Subgraph indexes and processes. A `dataSource` specifies which smart contract to listen to, which events to process, and how to handle them. -Recent versions of the Graph CLI supports adding new `dataSources` to an existing subgraph through the `graph add` command: +Recent versions of the Graph CLI supports adding new `dataSources` to an existing Subgraph through the `graph add` command: ```sh graph add
[] @@ -101,19 +101,5 @@ The `graph add` command will fetch the ABI from Etherscan (unless an ABI path is Os arquivos da ABI devem combinar com o(s) seu(s) contrato(s). Há algumas maneiras de obter estes arquivos: - Caso construa o seu próprio projeto, provavelmente terá acesso às suas ABIs mais recentes. -- If you are building a subgraph for a public project, you can download that project to your computer and get the ABI by using [`npx hardhat compile`](https://hardhat.org/hardhat-runner/docs/guides/compile-contracts#compiling-your-contracts) or using `solc` to compile. -- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your subgraph will fail. - -## SpecVersion Releases - -| Versão | Notas de atualização | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | -| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | Adicionado apoio a handlers de eventos com acesso a recibos de transação. | -| 0.0.4 | Adicionado apoio à gestão de recursos de subgraph. | +- If you are building a Subgraph for a public project, you can download that project to your computer and get the ABI by using [`npx hardhat compile`](https://hardhat.org/hardhat-runner/docs/guides/compile-contracts#compiling-your-contracts) or using `solc` to compile. +- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your Subgraph will fail. From f4dbec23e79e88b70805ca79edb5d18f91217508 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:21:52 -0500 Subject: [PATCH 0770/1789] New translations install-the-cli.mdx (Russian) --- .../developing/creating/install-the-cli.mdx | 38 ++++++------------- 1 file changed, 12 insertions(+), 26 deletions(-) diff --git a/website/src/pages/ru/subgraphs/developing/creating/install-the-cli.mdx b/website/src/pages/ru/subgraphs/developing/creating/install-the-cli.mdx index b48104c2ff0d..0208397aeb4d 100644 --- a/website/src/pages/ru/subgraphs/developing/creating/install-the-cli.mdx +++ b/website/src/pages/ru/subgraphs/developing/creating/install-the-cli.mdx @@ -2,11 +2,11 @@ title: Установка Graph CLI --- -> In order to use your subgraph on The Graph's decentralized network, you will need to [create an API key](/resources/subgraph-studio-faq/#2-how-do-i-create-an-api-key) in [Subgraph Studio](https://thegraph.com/studio/apikeys/). It is recommended that you add signal to your subgraph with at least 3,000 GRT to attract 2-3 Indexers. To learn more about signaling, check out [curating](/resources/roles/curating/). +> In order to use your Subgraph on The Graph's decentralized network, you will need to [create an API key](/resources/subgraph-studio-faq/#2-how-do-i-create-an-api-key) in [Subgraph Studio](https://thegraph.com/studio/apikeys/). It is recommended that you add signal to your Subgraph with at least 3,000 GRT to attract 2-3 Indexers. To learn more about signaling, check out [curating](/resources/roles/curating/). ## Обзор -The [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) is a command-line interface that facilitates developers' commands for The Graph. It processes a [subgraph manifest](/subgraphs/developing/creating/subgraph-manifest/) and compiles the [mappings](/subgraphs/developing/creating/assemblyscript-mappings/) to create the files you will need to deploy the subgraph to [Subgraph Studio](https://thegraph.com/studio/) and the network. +The [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) is a command-line interface that facilitates developers' commands for The Graph. It processes a [Subgraph manifest](/subgraphs/developing/creating/subgraph-manifest/) and compiles the [mappings](/subgraphs/developing/creating/assemblyscript-mappings/) to create the files you will need to deploy the Subgraph to [Subgraph Studio](https://thegraph.com/studio/) and the network. ## Начало работы @@ -28,13 +28,13 @@ npm install -g @graphprotocol/graph-cli@latest yarn global add @graphprotocol/graph-cli ``` -The `graph init` command can be used to set up a new subgraph project, either from an existing contract or from an example subgraph. If you already have a smart contract deployed to your preferred network, you can bootstrap a new subgraph from that contract to get started. +The `graph init` command can be used to set up a new Subgraph project, either from an existing contract or from an example Subgraph. If you already have a smart contract deployed to your preferred network, you can bootstrap a new Subgraph from that contract to get started. ## Создайте субграф ### Из существующего контракта -Следующая команда создает субграф, индексирующий все события существующего контракта: +The following command creates a Subgraph that indexes all events of an existing contract: ```sh graph init \ @@ -51,25 +51,25 @@ graph init \ - Если какой-либо из необязательных аргументов отсутствует, Вам будет предложено воспользоваться интерактивной формой. -- The `` is the ID of your subgraph in [Subgraph Studio](https://thegraph.com/studio/). It can be found on your subgraph details page. +- The `` is the ID of your Subgraph in [Subgraph Studio](https://thegraph.com/studio/). It can be found on your Subgraph details page. ### Из примера подграфа -Следующая команда инициализирует новый проект на примере субграфа: +The following command initializes a new project from an example Subgraph: ```sh graph init --from-example=example-subgraph ``` -- The [example subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant, which manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. +- The [example Subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant, which manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. -- The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. +- The Subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. ### Add New `dataSources` to an Existing Subgraph -`dataSources` are key components of subgraphs. They define the sources of data that the subgraph indexes and processes. A `dataSource` specifies which smart contract to listen to, which events to process, and how to handle them. +`dataSources` are key components of Subgraphs. They define the sources of data that the Subgraph indexes and processes. A `dataSource` specifies which smart contract to listen to, which events to process, and how to handle them. -Recent versions of the Graph CLI supports adding new `dataSources` to an existing subgraph through the `graph add` command: +Recent versions of the Graph CLI supports adding new `dataSources` to an existing Subgraph through the `graph add` command: ```sh graph add
[] @@ -101,19 +101,5 @@ The `graph add` command will fetch the ABI from Etherscan (unless an ABI path is Файл(ы) ABI должен(ы) соответствовать Вашему контракту (контрактам). Существует несколько способов получения файлов ABI: - Если Вы создаете свой собственный проект, у Вас, скорее всего, будет доступ к наиболее актуальным ABIS. -- If you are building a subgraph for a public project, you can download that project to your computer and get the ABI by using [`npx hardhat compile`](https://hardhat.org/hardhat-runner/docs/guides/compile-contracts#compiling-your-contracts) or using `solc` to compile. -- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your subgraph will fail. - -## Релизы SpecVersion - -| Версия | Примечания к релизу | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | -| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | Добавлена поддержка обработчиков событий, имеющих доступ к чекам транзакций. | -| 0.0.4 | Добавлена ​​поддержка управления функциями субграфа. | +- If you are building a Subgraph for a public project, you can download that project to your computer and get the ABI by using [`npx hardhat compile`](https://hardhat.org/hardhat-runner/docs/guides/compile-contracts#compiling-your-contracts) or using `solc` to compile. +- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your Subgraph will fail. From 46e4e8d46a395afb51cbf7c92bc94516b3ead591 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:21:53 -0500 Subject: [PATCH 0771/1789] New translations install-the-cli.mdx (Swedish) --- .../developing/creating/install-the-cli.mdx | 38 ++++++------------- 1 file changed, 12 insertions(+), 26 deletions(-) diff --git a/website/src/pages/sv/subgraphs/developing/creating/install-the-cli.mdx b/website/src/pages/sv/subgraphs/developing/creating/install-the-cli.mdx index 8905ec3abf61..21e3401cd8e9 100644 --- a/website/src/pages/sv/subgraphs/developing/creating/install-the-cli.mdx +++ b/website/src/pages/sv/subgraphs/developing/creating/install-the-cli.mdx @@ -2,11 +2,11 @@ title: Installera Graph CLI --- -> In order to use your subgraph on The Graph's decentralized network, you will need to [create an API key](/resources/subgraph-studio-faq/#2-how-do-i-create-an-api-key) in [Subgraph Studio](https://thegraph.com/studio/apikeys/). It is recommended that you add signal to your subgraph with at least 3,000 GRT to attract 2-3 Indexers. To learn more about signaling, check out [curating](/resources/roles/curating/). +> In order to use your Subgraph on The Graph's decentralized network, you will need to [create an API key](/resources/subgraph-studio-faq/#2-how-do-i-create-an-api-key) in [Subgraph Studio](https://thegraph.com/studio/apikeys/). It is recommended that you add signal to your Subgraph with at least 3,000 GRT to attract 2-3 Indexers. To learn more about signaling, check out [curating](/resources/roles/curating/). ## Översikt -The [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) is a command-line interface that facilitates developers' commands for The Graph. It processes a [subgraph manifest](/subgraphs/developing/creating/subgraph-manifest/) and compiles the [mappings](/subgraphs/developing/creating/assemblyscript-mappings/) to create the files you will need to deploy the subgraph to [Subgraph Studio](https://thegraph.com/studio/) and the network. +The [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) is a command-line interface that facilitates developers' commands for The Graph. It processes a [Subgraph manifest](/subgraphs/developing/creating/subgraph-manifest/) and compiles the [mappings](/subgraphs/developing/creating/assemblyscript-mappings/) to create the files you will need to deploy the Subgraph to [Subgraph Studio](https://thegraph.com/studio/) and the network. ## Komma igång @@ -28,13 +28,13 @@ npm install -g @graphprotocol/graph-cli@latest yarn global add @graphprotocol/graph-cli ``` -The `graph init` command can be used to set up a new subgraph project, either from an existing contract or from an example subgraph. If you already have a smart contract deployed to your preferred network, you can bootstrap a new subgraph from that contract to get started. +The `graph init` command can be used to set up a new Subgraph project, either from an existing contract or from an example Subgraph. If you already have a smart contract deployed to your preferred network, you can bootstrap a new Subgraph from that contract to get started. ## Skapa en Subgraf ### Från ett befintligt avtal -The following command creates a subgraph that indexes all events of an existing contract: +The following command creates a Subgraph that indexes all events of an existing contract: ```sh graph init \ @@ -51,25 +51,25 @@ graph init \ - If any of the optional arguments are missing, it guides you through an interactive form. -- The `` is the ID of your subgraph in [Subgraph Studio](https://thegraph.com/studio/). It can be found on your subgraph details page. +- The `` is the ID of your Subgraph in [Subgraph Studio](https://thegraph.com/studio/). It can be found on your Subgraph details page. ### Från ett exempel på en undergraf -The following command initializes a new project from an example subgraph: +The following command initializes a new project from an example Subgraph: ```sh graph init --from-example=example-subgraph ``` -- The [example subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant, which manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. +- The [example Subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant, which manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. -- The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. +- The Subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. ### Add New `dataSources` to an Existing Subgraph -`dataSources` are key components of subgraphs. They define the sources of data that the subgraph indexes and processes. A `dataSource` specifies which smart contract to listen to, which events to process, and how to handle them. +`dataSources` are key components of Subgraphs. They define the sources of data that the Subgraph indexes and processes. A `dataSource` specifies which smart contract to listen to, which events to process, and how to handle them. -Recent versions of the Graph CLI supports adding new `dataSources` to an existing subgraph through the `graph add` command: +Recent versions of the Graph CLI supports adding new `dataSources` to an existing Subgraph through the `graph add` command: ```sh graph add
[] @@ -101,19 +101,5 @@ The `graph add` command will fetch the ABI from Etherscan (unless an ABI path is ABI-filerna måste matcha ditt/dina kontrakt. Det finns några olika sätt att få ABI-filer: - Om du bygger ditt eget projekt har du förmodligen tillgång till dina senaste ABIs. -- If you are building a subgraph for a public project, you can download that project to your computer and get the ABI by using [`npx hardhat compile`](https://hardhat.org/hardhat-runner/docs/guides/compile-contracts#compiling-your-contracts) or using `solc` to compile. -- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your subgraph will fail. - -## SpecVersion Releases - -| Version | Versionsanteckningar | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | -| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | Added support for event handlers having access to transaction receipts. | -| 0.0.4 | Added support for managing subgraph features. | +- If you are building a Subgraph for a public project, you can download that project to your computer and get the ABI by using [`npx hardhat compile`](https://hardhat.org/hardhat-runner/docs/guides/compile-contracts#compiling-your-contracts) or using `solc` to compile. +- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your Subgraph will fail. From 1a9b2f7af501ca12831d7192438f1d7bd76997e3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:21:54 -0500 Subject: [PATCH 0772/1789] New translations install-the-cli.mdx (Turkish) --- .../developing/creating/install-the-cli.mdx | 38 ++++++------------- 1 file changed, 12 insertions(+), 26 deletions(-) diff --git a/website/src/pages/tr/subgraphs/developing/creating/install-the-cli.mdx b/website/src/pages/tr/subgraphs/developing/creating/install-the-cli.mdx index 5b0633a6c1bf..08c282f651d6 100644 --- a/website/src/pages/tr/subgraphs/developing/creating/install-the-cli.mdx +++ b/website/src/pages/tr/subgraphs/developing/creating/install-the-cli.mdx @@ -2,11 +2,11 @@ title: Graph CLI'ı Yükleyin --- -> In order to use your subgraph on The Graph's decentralized network, you will need to [create an API key](/resources/subgraph-studio-faq/#2-how-do-i-create-an-api-key) in [Subgraph Studio](https://thegraph.com/studio/apikeys/). It is recommended that you add signal to your subgraph with at least 3,000 GRT to attract 2-3 Indexers. To learn more about signaling, check out [curating](/resources/roles/curating/). +> In order to use your Subgraph on The Graph's decentralized network, you will need to [create an API key](/resources/subgraph-studio-faq/#2-how-do-i-create-an-api-key) in [Subgraph Studio](https://thegraph.com/studio/apikeys/). It is recommended that you add signal to your Subgraph with at least 3,000 GRT to attract 2-3 Indexers. To learn more about signaling, check out [curating](/resources/roles/curating/). ## Genel Bakış -The [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) is a command-line interface that facilitates developers' commands for The Graph. It processes a [subgraph manifest](/subgraphs/developing/creating/subgraph-manifest/) and compiles the [mappings](/subgraphs/developing/creating/assemblyscript-mappings/) to create the files you will need to deploy the subgraph to [Subgraph Studio](https://thegraph.com/studio/) and the network. +The [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) is a command-line interface that facilitates developers' commands for The Graph. It processes a [Subgraph manifest](/subgraphs/developing/creating/subgraph-manifest/) and compiles the [mappings](/subgraphs/developing/creating/assemblyscript-mappings/) to create the files you will need to deploy the Subgraph to [Subgraph Studio](https://thegraph.com/studio/) and the network. ## Buradan Başlayın @@ -28,13 +28,13 @@ npm install -g @graphprotocol/graph-cli@latest yarn global add @graphprotocol/graph-cli ``` -The `graph init` command can be used to set up a new subgraph project, either from an existing contract or from an example subgraph. If you already have a smart contract deployed to your preferred network, you can bootstrap a new subgraph from that contract to get started. +The `graph init` command can be used to set up a new Subgraph project, either from an existing contract or from an example Subgraph. If you already have a smart contract deployed to your preferred network, you can bootstrap a new Subgraph from that contract to get started. ## Subgraph Oluştur ### Mevcut Bir Sözleşmeden -Aşağıdaki komut, mevcut bir sözleşmenin tüm olaylarını endeksleyen bir subgraph oluşturur: +The following command creates a Subgraph that indexes all events of an existing contract: ```sh graph init \ @@ -51,25 +51,25 @@ graph init \ - İsteğe bağlı argümanlar eksikse, komut sizi bir etkileşimli forma yönlendirir. -- The `` is the ID of your subgraph in [Subgraph Studio](https://thegraph.com/studio/). It can be found on your subgraph details page. +- The `` is the ID of your Subgraph in [Subgraph Studio](https://thegraph.com/studio/). It can be found on your Subgraph details page. ### Örnek Bir Subgraph'ten -Aşağıdaki komut, örnek bir subgraph'ten yeni bir proje ilklendirir: +The following command initializes a new project from an example Subgraph: ```sh graph init --from-example=example-subgraph ``` -- The [example subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant, which manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. +- The [example Subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant, which manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. -- The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. +- The Subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. ### Add New `dataSources` to an Existing Subgraph -`dataSources` are key components of subgraphs. They define the sources of data that the subgraph indexes and processes. A `dataSource` specifies which smart contract to listen to, which events to process, and how to handle them. +`dataSources` are key components of Subgraphs. They define the sources of data that the Subgraph indexes and processes. A `dataSource` specifies which smart contract to listen to, which events to process, and how to handle them. -Recent versions of the Graph CLI supports adding new `dataSources` to an existing subgraph through the `graph add` command: +Recent versions of the Graph CLI supports adding new `dataSources` to an existing Subgraph through the `graph add` command: ```sh graph add
[] @@ -101,19 +101,5 @@ The `graph add` command will fetch the ABI from Etherscan (unless an ABI path is ABI dosya(lar)ı sözleşme(ler) inizle uygun olmalıdır. ABI dosyalarını edinmek için birkaç yol vardır: - Kendi projenizi oluşturuyorsanız, muhtemelen en güncel ABI'lerinize erişiminiz olacaktır. -- If you are building a subgraph for a public project, you can download that project to your computer and get the ABI by using [`npx hardhat compile`](https://hardhat.org/hardhat-runner/docs/guides/compile-contracts#compiling-your-contracts) or using `solc` to compile. -- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your subgraph will fail. - -## SpecVersion Sürümleri - -| Sürüm | Sürüm Notları | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | -| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | İşleyicilerin işlem makbuzlarına erişim desteği eklendi. | -| 0.0.4 | Subgraph özelliklerini yönetme desteği eklendi. | +- If you are building a Subgraph for a public project, you can download that project to your computer and get the ABI by using [`npx hardhat compile`](https://hardhat.org/hardhat-runner/docs/guides/compile-contracts#compiling-your-contracts) or using `solc` to compile. +- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your Subgraph will fail. From 59eb7b9603ef35979323ca956ea7f50dcb58af73 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:21:55 -0500 Subject: [PATCH 0773/1789] New translations install-the-cli.mdx (Ukrainian) --- .../developing/creating/install-the-cli.mdx | 38 ++++++------------- 1 file changed, 12 insertions(+), 26 deletions(-) diff --git a/website/src/pages/uk/subgraphs/developing/creating/install-the-cli.mdx b/website/src/pages/uk/subgraphs/developing/creating/install-the-cli.mdx index 9f03c3a6c84a..cac462d8e960 100644 --- a/website/src/pages/uk/subgraphs/developing/creating/install-the-cli.mdx +++ b/website/src/pages/uk/subgraphs/developing/creating/install-the-cli.mdx @@ -2,11 +2,11 @@ title: Install the Graph CLI --- -> In order to use your subgraph on The Graph's decentralized network, you will need to [create an API key](/resources/subgraph-studio-faq/#2-how-do-i-create-an-api-key) in [Subgraph Studio](https://thegraph.com/studio/apikeys/). It is recommended that you add signal to your subgraph with at least 3,000 GRT to attract 2-3 Indexers. To learn more about signaling, check out [curating](/resources/roles/curating/). +> In order to use your Subgraph on The Graph's decentralized network, you will need to [create an API key](/resources/subgraph-studio-faq/#2-how-do-i-create-an-api-key) in [Subgraph Studio](https://thegraph.com/studio/apikeys/). It is recommended that you add signal to your Subgraph with at least 3,000 GRT to attract 2-3 Indexers. To learn more about signaling, check out [curating](/resources/roles/curating/). ## Overview -The [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) is a command-line interface that facilitates developers' commands for The Graph. It processes a [subgraph manifest](/subgraphs/developing/creating/subgraph-manifest/) and compiles the [mappings](/subgraphs/developing/creating/assemblyscript-mappings/) to create the files you will need to deploy the subgraph to [Subgraph Studio](https://thegraph.com/studio/) and the network. +The [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) is a command-line interface that facilitates developers' commands for The Graph. It processes a [Subgraph manifest](/subgraphs/developing/creating/subgraph-manifest/) and compiles the [mappings](/subgraphs/developing/creating/assemblyscript-mappings/) to create the files you will need to deploy the Subgraph to [Subgraph Studio](https://thegraph.com/studio/) and the network. ## Getting Started @@ -28,13 +28,13 @@ npm install -g @graphprotocol/graph-cli@latest yarn global add @graphprotocol/graph-cli ``` -The `graph init` command can be used to set up a new subgraph project, either from an existing contract or from an example subgraph. If you already have a smart contract deployed to your preferred network, you can bootstrap a new subgraph from that contract to get started. +The `graph init` command can be used to set up a new Subgraph project, either from an existing contract or from an example Subgraph. If you already have a smart contract deployed to your preferred network, you can bootstrap a new Subgraph from that contract to get started. ## Створення субграфа ### From an Existing Contract -The following command creates a subgraph that indexes all events of an existing contract: +The following command creates a Subgraph that indexes all events of an existing contract: ```sh graph init \ @@ -51,25 +51,25 @@ graph init \ - If any of the optional arguments are missing, it guides you through an interactive form. -- The `` is the ID of your subgraph in [Subgraph Studio](https://thegraph.com/studio/). It can be found on your subgraph details page. +- The `` is the ID of your Subgraph in [Subgraph Studio](https://thegraph.com/studio/). It can be found on your Subgraph details page. ### From an Example Subgraph -The following command initializes a new project from an example subgraph: +The following command initializes a new project from an example Subgraph: ```sh graph init --from-example=example-subgraph ``` -- The [example subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant, which manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. +- The [example Subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant, which manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. -- The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. +- The Subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. ### Add New `dataSources` to an Existing Subgraph -`dataSources` are key components of subgraphs. They define the sources of data that the subgraph indexes and processes. A `dataSource` specifies which smart contract to listen to, which events to process, and how to handle them. +`dataSources` are key components of Subgraphs. They define the sources of data that the Subgraph indexes and processes. A `dataSource` specifies which smart contract to listen to, which events to process, and how to handle them. -Recent versions of the Graph CLI supports adding new `dataSources` to an existing subgraph through the `graph add` command: +Recent versions of the Graph CLI supports adding new `dataSources` to an existing Subgraph through the `graph add` command: ```sh graph add
[] @@ -101,19 +101,5 @@ The `graph add` command will fetch the ABI from Etherscan (unless an ABI path is The ABI file(s) must match your contract(s). There are a few ways to obtain ABI files: - If you are building your own project, you will likely have access to your most current ABIs. -- If you are building a subgraph for a public project, you can download that project to your computer and get the ABI by using [`npx hardhat compile`](https://hardhat.org/hardhat-runner/docs/guides/compile-contracts#compiling-your-contracts) or using `solc` to compile. -- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your subgraph will fail. - -## SpecVersion Releases - -| Version | Release notes | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | -| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | Added support for event handlers having access to transaction receipts. | -| 0.0.4 | Added support for managing subgraph features. | +- If you are building a Subgraph for a public project, you can download that project to your computer and get the ABI by using [`npx hardhat compile`](https://hardhat.org/hardhat-runner/docs/guides/compile-contracts#compiling-your-contracts) or using `solc` to compile. +- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your Subgraph will fail. From 6ee72e710f56a0008ad9ed5e8312851dcc24515a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:21:56 -0500 Subject: [PATCH 0774/1789] New translations install-the-cli.mdx (Chinese Simplified) --- .../developing/creating/install-the-cli.mdx | 40 ++++++------------- 1 file changed, 13 insertions(+), 27 deletions(-) diff --git a/website/src/pages/zh/subgraphs/developing/creating/install-the-cli.mdx b/website/src/pages/zh/subgraphs/developing/creating/install-the-cli.mdx index 521b9a21d1a6..af0385b99bce 100644 --- a/website/src/pages/zh/subgraphs/developing/creating/install-the-cli.mdx +++ b/website/src/pages/zh/subgraphs/developing/creating/install-the-cli.mdx @@ -2,11 +2,11 @@ title: 安装 Graph CLI --- -> In order to use your subgraph on The Graph's decentralized network, you will need to [create an API key](/resources/subgraph-studio-faq/#2-how-do-i-create-an-api-key) in [Subgraph Studio](https://thegraph.com/studio/apikeys/). It is recommended that you add signal to your subgraph with at least 3,000 GRT to attract 2-3 Indexers. To learn more about signaling, check out [curating](/resources/roles/curating/). +> In order to use your Subgraph on The Graph's decentralized network, you will need to [create an API key](/resources/subgraph-studio-faq/#2-how-do-i-create-an-api-key) in [Subgraph Studio](https://thegraph.com/studio/apikeys/). It is recommended that you add signal to your Subgraph with at least 3,000 GRT to attract 2-3 Indexers. To learn more about signaling, check out [curating](/resources/roles/curating/). -## 概述 +## Overview -The [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) is a command-line interface that facilitates developers' commands for The Graph. It processes a [subgraph manifest](/subgraphs/developing/creating/subgraph-manifest/) and compiles the [mappings](/subgraphs/developing/creating/assemblyscript-mappings/) to create the files you will need to deploy the subgraph to [Subgraph Studio](https://thegraph.com/studio/) and the network. +The [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) is a command-line interface that facilitates developers' commands for The Graph. It processes a [Subgraph manifest](/subgraphs/developing/creating/subgraph-manifest/) and compiles the [mappings](/subgraphs/developing/creating/assemblyscript-mappings/) to create the files you will need to deploy the Subgraph to [Subgraph Studio](https://thegraph.com/studio/) and the network. ## 开始 @@ -28,13 +28,13 @@ npm install -g @graphprotocol/graph-cli@latest yarn global add @graphprotocol/graph-cli ``` -The `graph init` command can be used to set up a new subgraph project, either from an existing contract or from an example subgraph. If you already have a smart contract deployed to your preferred network, you can bootstrap a new subgraph from that contract to get started. +The `graph init` command can be used to set up a new Subgraph project, either from an existing contract or from an example Subgraph. If you already have a smart contract deployed to your preferred network, you can bootstrap a new Subgraph from that contract to get started. ## 创建子图 ### 基于现有合约 -The following command creates a subgraph that indexes all events of an existing contract: +The following command creates a Subgraph that indexes all events of an existing contract: ```sh graph init \ @@ -51,25 +51,25 @@ graph init \ - If any of the optional arguments are missing, it guides you through an interactive form. -- The `` is the ID of your subgraph in [Subgraph Studio](https://thegraph.com/studio/). It can be found on your subgraph details page. +- The `` is the ID of your Subgraph in [Subgraph Studio](https://thegraph.com/studio/). It can be found on your Subgraph details page. ### 基于子图示例 -The following command initializes a new project from an example subgraph: +The following command initializes a new project from an example Subgraph: ```sh graph init --from-example=example-subgraph ``` -- The [example subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant, which manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. +- The [example Subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant, which manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. -- The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. +- The Subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. ### Add New `dataSources` to an Existing Subgraph -`dataSources` are key components of subgraphs. They define the sources of data that the subgraph indexes and processes. A `dataSource` specifies which smart contract to listen to, which events to process, and how to handle them. +`dataSources` are key components of Subgraphs. They define the sources of data that the Subgraph indexes and processes. A `dataSource` specifies which smart contract to listen to, which events to process, and how to handle them. -Recent versions of the Graph CLI supports adding new `dataSources` to an existing subgraph through the `graph add` command: +Recent versions of the Graph CLI supports adding new `dataSources` to an existing Subgraph through the `graph add` command: ```sh graph add
[] @@ -101,19 +101,5 @@ The `graph add` command will fetch the ABI from Etherscan (unless an ABI path is ABI 文件必须与您的合约相匹配。 获取 ABI 文件的方法有以下几种: - 如果您正在构建自己的项目,您可以获取最新的 ABI。 -- If you are building a subgraph for a public project, you can download that project to your computer and get the ABI by using [`npx hardhat compile`](https://hardhat.org/hardhat-runner/docs/guides/compile-contracts#compiling-your-contracts) or using `solc` to compile. -- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your subgraph will fail. - -## SpecVersion Releases - -| 版本 | Release 说明 | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | -| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | Added support for event handlers having access to transaction receipts. | -| 0.0.4 | Added support for managing subgraph features. | +- If you are building a Subgraph for a public project, you can download that project to your computer and get the ABI by using [`npx hardhat compile`](https://hardhat.org/hardhat-runner/docs/guides/compile-contracts#compiling-your-contracts) or using `solc` to compile. +- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your Subgraph will fail. From d3a4da9cfc1a10482fc8c55e27bdb7e284fb372c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:21:57 -0500 Subject: [PATCH 0775/1789] New translations install-the-cli.mdx (Urdu (Pakistan)) --- .../developing/creating/install-the-cli.mdx | 38 ++++++------------- 1 file changed, 12 insertions(+), 26 deletions(-) diff --git a/website/src/pages/ur/subgraphs/developing/creating/install-the-cli.mdx b/website/src/pages/ur/subgraphs/developing/creating/install-the-cli.mdx index 14ead227c976..fc2b95935dc4 100644 --- a/website/src/pages/ur/subgraphs/developing/creating/install-the-cli.mdx +++ b/website/src/pages/ur/subgraphs/developing/creating/install-the-cli.mdx @@ -2,11 +2,11 @@ title: گراف CLI انسٹال کریں --- -> In order to use your subgraph on The Graph's decentralized network, you will need to [create an API key](/resources/subgraph-studio-faq/#2-how-do-i-create-an-api-key) in [Subgraph Studio](https://thegraph.com/studio/apikeys/). It is recommended that you add signal to your subgraph with at least 3,000 GRT to attract 2-3 Indexers. To learn more about signaling, check out [curating](/resources/roles/curating/). +> In order to use your Subgraph on The Graph's decentralized network, you will need to [create an API key](/resources/subgraph-studio-faq/#2-how-do-i-create-an-api-key) in [Subgraph Studio](https://thegraph.com/studio/apikeys/). It is recommended that you add signal to your Subgraph with at least 3,000 GRT to attract 2-3 Indexers. To learn more about signaling, check out [curating](/resources/roles/curating/). ## جائزہ -The [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) is a command-line interface that facilitates developers' commands for The Graph. It processes a [subgraph manifest](/subgraphs/developing/creating/subgraph-manifest/) and compiles the [mappings](/subgraphs/developing/creating/assemblyscript-mappings/) to create the files you will need to deploy the subgraph to [Subgraph Studio](https://thegraph.com/studio/) and the network. +The [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) is a command-line interface that facilitates developers' commands for The Graph. It processes a [Subgraph manifest](/subgraphs/developing/creating/subgraph-manifest/) and compiles the [mappings](/subgraphs/developing/creating/assemblyscript-mappings/) to create the files you will need to deploy the Subgraph to [Subgraph Studio](https://thegraph.com/studio/) and the network. ## شروع ہوا چاہتا ہے @@ -28,13 +28,13 @@ npm install -g @graphprotocol/graph-cli@latest yarn global add @graphprotocol/graph-cli ``` -The `graph init` command can be used to set up a new subgraph project, either from an existing contract or from an example subgraph. If you already have a smart contract deployed to your preferred network, you can bootstrap a new subgraph from that contract to get started. +The `graph init` command can be used to set up a new Subgraph project, either from an existing contract or from an example Subgraph. If you already have a smart contract deployed to your preferred network, you can bootstrap a new Subgraph from that contract to get started. ## سب گراف بنائیں ### ایک موجودہ کنٹریکٹ سے -The following command creates a subgraph that indexes all events of an existing contract: +The following command creates a Subgraph that indexes all events of an existing contract: ```sh graph init \ @@ -51,25 +51,25 @@ graph init \ - If any of the optional arguments are missing, it guides you through an interactive form. -- The `` is the ID of your subgraph in [Subgraph Studio](https://thegraph.com/studio/). It can be found on your subgraph details page. +- The `` is the ID of your Subgraph in [Subgraph Studio](https://thegraph.com/studio/). It can be found on your Subgraph details page. ### ایک مثالی سب گراف سے -The following command initializes a new project from an example subgraph: +The following command initializes a new project from an example Subgraph: ```sh graph init --from-example=example-subgraph ``` -- The [example subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant, which manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. +- The [example Subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant, which manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. -- The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. +- The Subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. ### Add New `dataSources` to an Existing Subgraph -`dataSources` are key components of subgraphs. They define the sources of data that the subgraph indexes and processes. A `dataSource` specifies which smart contract to listen to, which events to process, and how to handle them. +`dataSources` are key components of Subgraphs. They define the sources of data that the Subgraph indexes and processes. A `dataSource` specifies which smart contract to listen to, which events to process, and how to handle them. -Recent versions of the Graph CLI supports adding new `dataSources` to an existing subgraph through the `graph add` command: +Recent versions of the Graph CLI supports adding new `dataSources` to an existing Subgraph through the `graph add` command: ```sh graph add
[] @@ -101,19 +101,5 @@ The `graph add` command will fetch the ABI from Etherscan (unless an ABI path is ABI فائل (فائلیں) آپ کے کنٹریکٹ (کنٹریکٹس) سے مماثل ہونی چاہیں. ABI کی فائلیں حاصل کرنے کے چند طریقے ہیں: - اگر آپ اپنا پراجیکٹ خود بنا رہے ہیں، تو ممکنہ طور پر آپ کو اپنے حالیہ ABIs تک رسائی حاصل ہوگی. -- If you are building a subgraph for a public project, you can download that project to your computer and get the ABI by using [`npx hardhat compile`](https://hardhat.org/hardhat-runner/docs/guides/compile-contracts#compiling-your-contracts) or using `solc` to compile. -- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your subgraph will fail. - -## SpecVersion Releases - -| ورزن | جاری کردہ نوٹس | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | -| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | Added support for event handlers having access to transaction receipts. | -| 0.0.4 | Added support for managing subgraph features. | +- If you are building a Subgraph for a public project, you can download that project to your computer and get the ABI by using [`npx hardhat compile`](https://hardhat.org/hardhat-runner/docs/guides/compile-contracts#compiling-your-contracts) or using `solc` to compile. +- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your Subgraph will fail. From cb154fa7c7620cc646ba5bf4bccf83aa3fafca10 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:21:58 -0500 Subject: [PATCH 0776/1789] New translations install-the-cli.mdx (Vietnamese) --- .../developing/creating/install-the-cli.mdx | 38 ++++++------------- 1 file changed, 12 insertions(+), 26 deletions(-) diff --git a/website/src/pages/vi/subgraphs/developing/creating/install-the-cli.mdx b/website/src/pages/vi/subgraphs/developing/creating/install-the-cli.mdx index ab11aa3306cb..f9573b198f89 100644 --- a/website/src/pages/vi/subgraphs/developing/creating/install-the-cli.mdx +++ b/website/src/pages/vi/subgraphs/developing/creating/install-the-cli.mdx @@ -2,11 +2,11 @@ title: Cài đặt Graph CLI --- -> In order to use your subgraph on The Graph's decentralized network, you will need to [create an API key](/resources/subgraph-studio-faq/#2-how-do-i-create-an-api-key) in [Subgraph Studio](https://thegraph.com/studio/apikeys/). It is recommended that you add signal to your subgraph with at least 3,000 GRT to attract 2-3 Indexers. To learn more about signaling, check out [curating](/resources/roles/curating/). +> In order to use your Subgraph on The Graph's decentralized network, you will need to [create an API key](/resources/subgraph-studio-faq/#2-how-do-i-create-an-api-key) in [Subgraph Studio](https://thegraph.com/studio/apikeys/). It is recommended that you add signal to your Subgraph with at least 3,000 GRT to attract 2-3 Indexers. To learn more about signaling, check out [curating](/resources/roles/curating/). ## Tổng quan -The [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) is a command-line interface that facilitates developers' commands for The Graph. It processes a [subgraph manifest](/subgraphs/developing/creating/subgraph-manifest/) and compiles the [mappings](/subgraphs/developing/creating/assemblyscript-mappings/) to create the files you will need to deploy the subgraph to [Subgraph Studio](https://thegraph.com/studio/) and the network. +The [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) is a command-line interface that facilitates developers' commands for The Graph. It processes a [Subgraph manifest](/subgraphs/developing/creating/subgraph-manifest/) and compiles the [mappings](/subgraphs/developing/creating/assemblyscript-mappings/) to create the files you will need to deploy the Subgraph to [Subgraph Studio](https://thegraph.com/studio/) and the network. ## Getting Started @@ -28,13 +28,13 @@ npm install -g @graphprotocol/graph-cli@latest yarn global add @graphprotocol/graph-cli ``` -The `graph init` command can be used to set up a new subgraph project, either from an existing contract or from an example subgraph. If you already have a smart contract deployed to your preferred network, you can bootstrap a new subgraph from that contract to get started. +The `graph init` command can be used to set up a new Subgraph project, either from an existing contract or from an example Subgraph. If you already have a smart contract deployed to your preferred network, you can bootstrap a new Subgraph from that contract to get started. ## Tạo một Subgraph ### From an Existing Contract -The following command creates a subgraph that indexes all events of an existing contract: +The following command creates a Subgraph that indexes all events of an existing contract: ```sh graph init \ @@ -51,25 +51,25 @@ graph init \ - If any of the optional arguments are missing, it guides you through an interactive form. -- The `` is the ID of your subgraph in [Subgraph Studio](https://thegraph.com/studio/). It can be found on your subgraph details page. +- The `` is the ID of your Subgraph in [Subgraph Studio](https://thegraph.com/studio/). It can be found on your Subgraph details page. ### From an Example Subgraph -The following command initializes a new project from an example subgraph: +The following command initializes a new project from an example Subgraph: ```sh graph init --from-example=example-subgraph ``` -- The [example subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant, which manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. +- The [example Subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant, which manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. -- The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. +- The Subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. ### Add New `dataSources` to an Existing Subgraph -`dataSources` are key components of subgraphs. They define the sources of data that the subgraph indexes and processes. A `dataSource` specifies which smart contract to listen to, which events to process, and how to handle them. +`dataSources` are key components of Subgraphs. They define the sources of data that the Subgraph indexes and processes. A `dataSource` specifies which smart contract to listen to, which events to process, and how to handle them. -Recent versions of the Graph CLI supports adding new `dataSources` to an existing subgraph through the `graph add` command: +Recent versions of the Graph CLI supports adding new `dataSources` to an existing Subgraph through the `graph add` command: ```sh graph add
[] @@ -101,19 +101,5 @@ The `graph add` command will fetch the ABI from Etherscan (unless an ABI path is (Các) tệp ABI phải khớp với (các) hợp đồng của bạn. Có một số cách để lấy tệp ABI: - Nếu bạn đang xây dựng dự án của riêng mình, bạn có thể sẽ có quyền truy cập vào các ABI mới nhất của mình. -- If you are building a subgraph for a public project, you can download that project to your computer and get the ABI by using [`npx hardhat compile`](https://hardhat.org/hardhat-runner/docs/guides/compile-contracts#compiling-your-contracts) or using `solc` to compile. -- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your subgraph will fail. - -## SpecVersion Releases - -| Phiên bản | Ghi chú phát hành | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | -| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | Added support for event handlers having access to transaction receipts. | -| 0.0.4 | Added support for managing subgraph features. | +- If you are building a Subgraph for a public project, you can download that project to your computer and get the ABI by using [`npx hardhat compile`](https://hardhat.org/hardhat-runner/docs/guides/compile-contracts#compiling-your-contracts) or using `solc` to compile. +- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your Subgraph will fail. From 3c0404fb7b19f48a6ca1fb3509e53ba090128b8c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:21:59 -0500 Subject: [PATCH 0777/1789] New translations install-the-cli.mdx (Marathi) --- .../developing/creating/install-the-cli.mdx | 38 ++++++------------- 1 file changed, 12 insertions(+), 26 deletions(-) diff --git a/website/src/pages/mr/subgraphs/developing/creating/install-the-cli.mdx b/website/src/pages/mr/subgraphs/developing/creating/install-the-cli.mdx index 51dfb940edcb..c6892188ddfa 100644 --- a/website/src/pages/mr/subgraphs/developing/creating/install-the-cli.mdx +++ b/website/src/pages/mr/subgraphs/developing/creating/install-the-cli.mdx @@ -2,11 +2,11 @@ title: Install the Graph CLI --- -> In order to use your subgraph on The Graph's decentralized network, you will need to [create an API key](/resources/subgraph-studio-faq/#2-how-do-i-create-an-api-key) in [Subgraph Studio](https://thegraph.com/studio/apikeys/). It is recommended that you add signal to your subgraph with at least 3,000 GRT to attract 2-3 Indexers. To learn more about signaling, check out [curating](/resources/roles/curating/). +> In order to use your Subgraph on The Graph's decentralized network, you will need to [create an API key](/resources/subgraph-studio-faq/#2-how-do-i-create-an-api-key) in [Subgraph Studio](https://thegraph.com/studio/apikeys/). It is recommended that you add signal to your Subgraph with at least 3,000 GRT to attract 2-3 Indexers. To learn more about signaling, check out [curating](/resources/roles/curating/). ## सविश्लेषण -The [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) is a command-line interface that facilitates developers' commands for The Graph. It processes a [subgraph manifest](/subgraphs/developing/creating/subgraph-manifest/) and compiles the [mappings](/subgraphs/developing/creating/assemblyscript-mappings/) to create the files you will need to deploy the subgraph to [Subgraph Studio](https://thegraph.com/studio/) and the network. +The [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) is a command-line interface that facilitates developers' commands for The Graph. It processes a [Subgraph manifest](/subgraphs/developing/creating/subgraph-manifest/) and compiles the [mappings](/subgraphs/developing/creating/assemblyscript-mappings/) to create the files you will need to deploy the Subgraph to [Subgraph Studio](https://thegraph.com/studio/) and the network. ## प्रारंभ करणे @@ -28,13 +28,13 @@ npm install -g @graphprotocol/graph-cli@latest yarn global add @graphprotocol/graph-cli ``` -The `graph init` command can be used to set up a new subgraph project, either from an existing contract or from an example subgraph. If you already have a smart contract deployed to your preferred network, you can bootstrap a new subgraph from that contract to get started. +The `graph init` command can be used to set up a new Subgraph project, either from an existing contract or from an example Subgraph. If you already have a smart contract deployed to your preferred network, you can bootstrap a new Subgraph from that contract to get started. ## सबग्राफ तयार करा ### विद्यमान करारातून -The following command creates a subgraph that indexes all events of an existing contract: +The following command creates a Subgraph that indexes all events of an existing contract: ```sh graph init \ @@ -51,25 +51,25 @@ graph init \ - If any of the optional arguments are missing, it guides you through an interactive form. -- The `` is the ID of your subgraph in [Subgraph Studio](https://thegraph.com/studio/). It can be found on your subgraph details page. +- The `` is the ID of your Subgraph in [Subgraph Studio](https://thegraph.com/studio/). It can be found on your Subgraph details page. ### सबग्राफच्या उदाहरणावरून -The following command initializes a new project from an example subgraph: +The following command initializes a new project from an example Subgraph: ```sh graph init --from-example=example-subgraph ``` -- The [example subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant, which manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. +- The [example Subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant, which manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. -- The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. +- The Subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. ### Add New `dataSources` to an Existing Subgraph -`dataSources` are key components of subgraphs. They define the sources of data that the subgraph indexes and processes. A `dataSource` specifies which smart contract to listen to, which events to process, and how to handle them. +`dataSources` are key components of Subgraphs. They define the sources of data that the Subgraph indexes and processes. A `dataSource` specifies which smart contract to listen to, which events to process, and how to handle them. -Recent versions of the Graph CLI supports adding new `dataSources` to an existing subgraph through the `graph add` command: +Recent versions of the Graph CLI supports adding new `dataSources` to an existing Subgraph through the `graph add` command: ```sh graph add
[] @@ -101,19 +101,5 @@ The `graph add` command will fetch the ABI from Etherscan (unless an ABI path is The ABI file(s) must match your contract(s). There are a few ways to obtain ABI files: - If you are building your own project, you will likely have access to your most current ABIs. -- If you are building a subgraph for a public project, you can download that project to your computer and get the ABI by using [`npx hardhat compile`](https://hardhat.org/hardhat-runner/docs/guides/compile-contracts#compiling-your-contracts) or using `solc` to compile. -- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your subgraph will fail. - -## SpecVersion Releases - -| आवृत्ती | रिलीझ नोट्स | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | -| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | Added support for event handlers having access to transaction receipts. | -| 0.0.4 | Added support for managing subgraph features. | +- If you are building a Subgraph for a public project, you can download that project to your computer and get the ABI by using [`npx hardhat compile`](https://hardhat.org/hardhat-runner/docs/guides/compile-contracts#compiling-your-contracts) or using `solc` to compile. +- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your Subgraph will fail. From e7d7f3dc492bd3d3ea738dc3e61a911ef27df1c1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:22:00 -0500 Subject: [PATCH 0778/1789] New translations install-the-cli.mdx (Hindi) --- .../developing/creating/install-the-cli.mdx | 38 ++++++------------- 1 file changed, 12 insertions(+), 26 deletions(-) diff --git a/website/src/pages/hi/subgraphs/developing/creating/install-the-cli.mdx b/website/src/pages/hi/subgraphs/developing/creating/install-the-cli.mdx index 84d3b139b130..93aa489c266f 100644 --- a/website/src/pages/hi/subgraphs/developing/creating/install-the-cli.mdx +++ b/website/src/pages/hi/subgraphs/developing/creating/install-the-cli.mdx @@ -2,11 +2,11 @@ title: . ग्राफ़ सीएलआई इनस्टॉल करें --- -> In order to use your subgraph on The Graph's decentralized network, you will need to [create an API key](/resources/subgraph-studio-faq/#2-how-do-i-create-an-api-key) in [Subgraph Studio](https://thegraph.com/studio/apikeys/). It is recommended that you add signal to your subgraph with at least 3,000 GRT to attract 2-3 Indexers. To learn more about signaling, check out [curating](/resources/roles/curating/). +> In order to use your Subgraph on The Graph's decentralized network, you will need to [create an API key](/resources/subgraph-studio-faq/#2-how-do-i-create-an-api-key) in [Subgraph Studio](https://thegraph.com/studio/apikeys/). It is recommended that you add signal to your Subgraph with at least 3,000 GRT to attract 2-3 Indexers. To learn more about signaling, check out [curating](/resources/roles/curating/). ## अवलोकन -The [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) is a command-line interface that facilitates developers' commands for The Graph. It processes a [subgraph manifest](/subgraphs/developing/creating/subgraph-manifest/) and compiles the [mappings](/subgraphs/developing/creating/assemblyscript-mappings/) to create the files you will need to deploy the subgraph to [Subgraph Studio](https://thegraph.com/studio/) and the network. +The [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) is a command-line interface that facilitates developers' commands for The Graph. It processes a [Subgraph manifest](/subgraphs/developing/creating/subgraph-manifest/) and compiles the [mappings](/subgraphs/developing/creating/assemblyscript-mappings/) to create the files you will need to deploy the Subgraph to [Subgraph Studio](https://thegraph.com/studio/) and the network. ## शुरू करना @@ -28,13 +28,13 @@ npm install -g @graphprotocol/graph-cli@latest yarn global add @graphprotocol/graph-cli ``` -The `graph init` command can be used to set up a new subgraph project, either from an existing contract or from an example subgraph. If you already have a smart contract deployed to your preferred network, you can bootstrap a new subgraph from that contract to get started. +The `graph init` command can be used to set up a new Subgraph project, either from an existing contract or from an example Subgraph. If you already have a smart contract deployed to your preferred network, you can bootstrap a new Subgraph from that contract to get started. ## एक सबग्राफ बनाएं ### एक मौजूदा कॉन्ट्रैक्ट से -यह कमांड एक subgraph बनाता है जो एक मौजूदा कॉन्ट्रैक्ट के सभी इवेंट्स को इंडेक्स करता है: +The following command creates a Subgraph that indexes all events of an existing contract: ```sh graph init \ @@ -51,25 +51,25 @@ graph init \ - यदि कोई वैकल्पिक तर्क गायब है, तो यह आपको एक इंटरैक्टिव फॉर्म के माध्यम से मार्गदर्शन करता है। -- The `` is the ID of your subgraph in [Subgraph Studio](https://thegraph.com/studio/). It can be found on your subgraph details page. +- The `` is the ID of your Subgraph in [Subgraph Studio](https://thegraph.com/studio/). It can be found on your Subgraph details page. ### एक उदाहरण सबग्राफ से -निम्नलिखित कमांड एक उदाहरण subgraph से एक नया प्रोजेक्ट प्रारंभ करता है: +The following command initializes a new project from an example Subgraph: ```sh graph init --from-example=example-subgraph ``` -- The [example subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant, which manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. +- The [example Subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant, which manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. -- The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. +- The Subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. ### Add New `dataSources` to an Existing Subgraph -`dataSources` are key components of subgraphs. They define the sources of data that the subgraph indexes and processes. A `dataSource` specifies which smart contract to listen to, which events to process, and how to handle them. +`dataSources` are key components of Subgraphs. They define the sources of data that the Subgraph indexes and processes. A `dataSource` specifies which smart contract to listen to, which events to process, and how to handle them. -Recent versions of the Graph CLI supports adding new `dataSources` to an existing subgraph through the `graph add` command: +Recent versions of the Graph CLI supports adding new `dataSources` to an existing Subgraph through the `graph add` command: ```sh graph add
[] @@ -101,19 +101,5 @@ The `graph add` command will fetch the ABI from Etherscan (unless an ABI path is एबीआई फाइल(फाइलों) को आपके अनुबंध(ओं) से मेल खाना चाहिए। ABI फ़ाइलें प्राप्त करने के कुछ तरीके हैं: - यदि आप अपना खुद का प्रोजेक्ट बना रहे हैं, तो आपके पास अपने सबसे मौजूदा एबीआई तक पहुंच होने की संभावना है। -- If you are building a subgraph for a public project, you can download that project to your computer and get the ABI by using [`npx hardhat compile`](https://hardhat.org/hardhat-runner/docs/guides/compile-contracts#compiling-your-contracts) or using `solc` to compile. -- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your subgraph will fail. - -## स्पेकवर्जन रिलीज़ - -| Version | Release notes | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | -| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | घटना हैंडलरों को लेनदेन रसीदों तक पहुंच प्रदान करने के लिए समर्थन जोड़ा गया है। | -| 0.0.4 | घटना हैंडलरों को लेनदेन रसीदों तक पहुंच प्रदान करने के लिए समर्थन जोड़ा गया है। | +- If you are building a Subgraph for a public project, you can download that project to your computer and get the ABI by using [`npx hardhat compile`](https://hardhat.org/hardhat-runner/docs/guides/compile-contracts#compiling-your-contracts) or using `solc` to compile. +- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your Subgraph will fail. From 6507cfa072d01bee08d50c3f0667583eeacddd74 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:22:01 -0500 Subject: [PATCH 0779/1789] New translations install-the-cli.mdx (Swahili) --- .../developing/creating/install-the-cli.mdx | 105 ++++++++++++++++++ 1 file changed, 105 insertions(+) create mode 100644 website/src/pages/sw/subgraphs/developing/creating/install-the-cli.mdx diff --git a/website/src/pages/sw/subgraphs/developing/creating/install-the-cli.mdx b/website/src/pages/sw/subgraphs/developing/creating/install-the-cli.mdx new file mode 100644 index 000000000000..c9d6966ef5fe --- /dev/null +++ b/website/src/pages/sw/subgraphs/developing/creating/install-the-cli.mdx @@ -0,0 +1,105 @@ +--- +title: Install the Graph CLI +--- + +> In order to use your Subgraph on The Graph's decentralized network, you will need to [create an API key](/resources/subgraph-studio-faq/#2-how-do-i-create-an-api-key) in [Subgraph Studio](https://thegraph.com/studio/apikeys/). It is recommended that you add signal to your Subgraph with at least 3,000 GRT to attract 2-3 Indexers. To learn more about signaling, check out [curating](/resources/roles/curating/). + +## Overview + +The [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) is a command-line interface that facilitates developers' commands for The Graph. It processes a [Subgraph manifest](/subgraphs/developing/creating/subgraph-manifest/) and compiles the [mappings](/subgraphs/developing/creating/assemblyscript-mappings/) to create the files you will need to deploy the Subgraph to [Subgraph Studio](https://thegraph.com/studio/) and the network. + +## Getting Started + +### Install the Graph CLI + +The Graph CLI is written in TypeScript, and you must have `node` and either `npm` or `yarn` installed to use it. Check for the [most recent](https://github.com/graphprotocol/graph-tooling/releases?q=%40graphprotocol%2Fgraph-cli&expanded=true) CLI version. + +On your local machine, run one of the following commands: + +#### Using [npm](https://www.npmjs.com/) + +```bash +npm install -g @graphprotocol/graph-cli@latest +``` + +#### Using [yarn](https://yarnpkg.com/) + +```bash +yarn global add @graphprotocol/graph-cli +``` + +The `graph init` command can be used to set up a new Subgraph project, either from an existing contract or from an example Subgraph. If you already have a smart contract deployed to your preferred network, you can bootstrap a new Subgraph from that contract to get started. + +## Create a Subgraph + +### From an Existing Contract + +The following command creates a Subgraph that indexes all events of an existing contract: + +```sh +graph init \ + --product subgraph-studio + --from-contract \ + [--network ] \ + [--abi ] \ + [] +``` + +- The command tries to retrieve the contract ABI from Etherscan. + + - The Graph CLI relies on a public RPC endpoint. While occasional failures are expected, retries typically resolve this issue. If failures persist, consider using a local ABI. + +- If any of the optional arguments are missing, it guides you through an interactive form. + +- The `` is the ID of your Subgraph in [Subgraph Studio](https://thegraph.com/studio/). It can be found on your Subgraph details page. + +### From an Example Subgraph + +The following command initializes a new project from an example Subgraph: + +```sh +graph init --from-example=example-subgraph +``` + +- The [example Subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant, which manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. + +- The Subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. + +### Add New `dataSources` to an Existing Subgraph + +`dataSources` are key components of Subgraphs. They define the sources of data that the Subgraph indexes and processes. A `dataSource` specifies which smart contract to listen to, which events to process, and how to handle them. + +Recent versions of the Graph CLI supports adding new `dataSources` to an existing Subgraph through the `graph add` command: + +```sh +graph add
[] + +Options: + + --abi Path to the contract ABI (default: download from Etherscan) + --contract-name Name of the contract (default: Contract) + --merge-entities Whether to merge entities with the same name (default: false) + --network-file Networks config file path (default: "./networks.json") +``` + +#### Specifics + +The `graph add` command will fetch the ABI from Etherscan (unless an ABI path is specified with the `--abi` option) and creates a new `dataSource`, similar to how the `graph init` command creates a `dataSource` `--from-contract`, updating the schema and mappings accordingly. This allows you to index implementation contracts from their proxy contracts. + +- The `--merge-entities` option identifies how the developer would like to handle `entity` and `event` name conflicts: + + - If `true`: the new `dataSource` should use existing `eventHandlers` & `entities`. + + - If `false`: a new `entity` & `event` handler should be created with `${dataSourceName}{EventName}`. + +- The contract `address` will be written to the `networks.json` for the relevant network. + +> Note: When using the interactive CLI, after successfully running `graph init`, you'll be prompted to add a new `dataSource`. + +### Getting The ABIs + +The ABI file(s) must match your contract(s). There are a few ways to obtain ABI files: + +- If you are building your own project, you will likely have access to your most current ABIs. +- If you are building a Subgraph for a public project, you can download that project to your computer and get the ABI by using [`npx hardhat compile`](https://hardhat.org/hardhat-runner/docs/guides/compile-contracts#compiling-your-contracts) or using `solc` to compile. +- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your Subgraph will fail. From e3caab855cacf51a7217c2039d0ab00fc5cd4184 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:22:02 -0500 Subject: [PATCH 0780/1789] New translations ql-schema.mdx (Romanian) --- .../developing/creating/ql-schema.mdx | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/website/src/pages/ro/subgraphs/developing/creating/ql-schema.mdx b/website/src/pages/ro/subgraphs/developing/creating/ql-schema.mdx index 27562f970620..2eb805320753 100644 --- a/website/src/pages/ro/subgraphs/developing/creating/ql-schema.mdx +++ b/website/src/pages/ro/subgraphs/developing/creating/ql-schema.mdx @@ -4,7 +4,7 @@ title: The Graph QL Schema ## Overview -The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. +The schema for your Subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. > Note: If you've never written a GraphQL schema, it is recommended that you check out this primer on the GraphQL type system. Reference documentation for GraphQL schemas can be found in the [GraphQL API](/subgraphs/querying/graphql-api/) section. @@ -12,7 +12,7 @@ The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas ar Before defining entities, it is important to take a step back and think about how your data is structured and linked. -- All queries will be made against the data model defined in the subgraph schema. As a result, the design of the subgraph schema should be informed by the queries that your application will need to perform. +- All queries will be made against the data model defined in the Subgraph schema. As a result, the design of the Subgraph schema should be informed by the queries that your application will need to perform. - It may be useful to imagine entities as "objects containing data", rather than as events or functions. - You define entity types in `schema.graphql`, and Graph Node will generate top-level fields for querying single instances and collections of that entity type. - Each type that should be an entity is required to be annotated with an `@entity` directive. @@ -72,16 +72,16 @@ For some entity types the `id` for `Bytes!` is constructed from the id's of two The following scalars are supported in the GraphQL API: -| Type | Description | -| --- | --- | -| `Bytes` | Byte array, represented as a hexadecimal string. Commonly used for Ethereum hashes and addresses. | -| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | -| `Boolean` | Scalar for `boolean` values. | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | -| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | +| Type | Description | +| ------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `Bytes` | Byte array, represented as a hexadecimal string. Commonly used for Ethereum hashes and addresses. | +| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | +| `Boolean` | Scalar for `boolean` values. | +| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | +| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | +| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | +| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | ### Enums @@ -141,7 +141,7 @@ type TokenBalance @entity { Reverse lookups can be defined on an entity through the `@derivedFrom` field. This creates a virtual field on the entity that may be queried but cannot be set manually through the mappings API. Rather, it is derived from the relationship defined on the other entity. For such relationships, it rarely makes sense to store both sides of the relationship, and both indexing and query performance will be better when only one side is stored and the other is derived. -For one-to-many relationships, the relationship should always be stored on the 'one' side, and the 'many' side should always be derived. Storing the relationship this way, rather than storing an array of entities on the 'many' side, will result in dramatically better performance for both indexing and querying the subgraph. In general, storing arrays of entities should be avoided as much as is practical. +For one-to-many relationships, the relationship should always be stored on the 'one' side, and the 'many' side should always be derived. Storing the relationship this way, rather than storing an array of entities on the 'many' side, will result in dramatically better performance for both indexing and querying the Subgraph. In general, storing arrays of entities should be avoided as much as is practical. #### Example @@ -160,7 +160,7 @@ type TokenBalance @entity { } ``` -Here is an example of how to write a mapping for a subgraph with reverse lookups: +Here is an example of how to write a mapping for a Subgraph with reverse lookups: ```typescript let token = new Token(event.address) // Create Token @@ -231,7 +231,7 @@ query usersWithOrganizations { } ``` -This more elaborate way of storing many-to-many relationships will result in less data stored for the subgraph, and therefore to a subgraph that is often dramatically faster to index and to query. +This more elaborate way of storing many-to-many relationships will result in less data stored for the Subgraph, and therefore to a Subgraph that is often dramatically faster to index and to query. ### Adding comments to the schema @@ -287,7 +287,7 @@ query { } ``` -> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the subgraph manifest. +> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the Subgraph manifest. ## Languages supported From 2833236a5ab2678dfae39abaeba2c5bd3951e1b5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:22:03 -0500 Subject: [PATCH 0781/1789] New translations ql-schema.mdx (French) --- .../developing/creating/ql-schema.mdx | 78 +++++++++---------- 1 file changed, 39 insertions(+), 39 deletions(-) diff --git a/website/src/pages/fr/subgraphs/developing/creating/ql-schema.mdx b/website/src/pages/fr/subgraphs/developing/creating/ql-schema.mdx index 0d6ae1beb2bf..c35fde106fa0 100644 --- a/website/src/pages/fr/subgraphs/developing/creating/ql-schema.mdx +++ b/website/src/pages/fr/subgraphs/developing/creating/ql-schema.mdx @@ -4,7 +4,7 @@ title: Schema The Graph QL ## Aperçu -Le schéma de votre subgraph se trouve dans le fichier `schema.graphql`. Les schémas GraphQL sont définis à l'aide du langage de définition d'interface GraphQL. +The schema for your Subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. > Remarque : si vous n'avez jamais écrit de schéma GraphQL, il est recommandé de consulter ce guide sur le système de types GraphQL. La documentation de référence pour les schémas GraphQL est disponible dans la section [API GraphQL](/subgraphs/querying/graphql-api/). @@ -12,7 +12,7 @@ Le schéma de votre subgraph se trouve dans le fichier `schema.graphql`. Les sch Avant de définir des entités, il est important de prendre du recul et de réfléchir à la manière dont vos données sont structurées et liées. -- Toutes les requêtes seront effectuées sur le modèle de données défini dans le schéma de subgraph. Par conséquent, la conception du schéma de subgraph doit être informée par les requêtes que votre application devra exécuter. +- All queries will be made against the data model defined in the Subgraph schema. As a result, the design of the Subgraph schema should be informed by the queries that your application will need to perform. - Il peut être utile d'imaginer les entités comme des "objets contenant des données", plutôt que comme des événements ou des fonctions. - Vous définissez les types d'entités dans `schema.graphql`, et Graph Node générera des champs de premier niveau pour interroger des instances uniques et des collections de ce type d'entité. - Chaque type qui doit être une entité doit être annoté avec une directive `@entity`. @@ -72,16 +72,16 @@ Pour certains types d'entités, l'`id` de `Bytes!` est construit à partir des i Les scalaires suivants sont supportés dans l'API GraphQL : -| Type | Description | -| --- | --- | -| `Bytes` | Tableau d'octets, représenté sous forme de chaîne hexadécimale. Couramment utilisé pour les hachages et adresses Ethereum. | -| `String` | Scalaire pour les valeurs de type `string`. Les caractères nuls ne sont pas pris en charge et sont automatiquement supprimés. | -| `Boolean` | Scalaire pour les valeurs de type `boolean` (booléennes). | -| `Int` | La spécification GraphQL définit `Int` comme un entier signé de 32 bits. | -| `Int8` | Un entier signé de 8 octets, également connu sous le nom d'entier signé de 64 bits, peut stocker des valeurs comprises entre -9 223 372 036 854 775 808 et 9 223 372 036 854 775 807. Il est préférable de l'utiliser pour représenter `i64` de l'ethereum. | -| `BigInt` | Grands entiers. Utilisé pour les types Ethereum `uint32`, `int64`, `uint64`, ..., `uint256`. Note : Tout ce qui est inférieur à `uint32`, comme `int32`, `uint24` ou `int8` est représenté par `i32`. | -| `BigDecimal` | `BigDecimal` Décimales de haute précision représentées par un significatif et un exposant. L'exposant est compris entre -6143 et +6144. Arrondi à 34 chiffres significatifs. | -| `Timestamp` | Il s'agit d'une valeur `i64` en microsecondes. Couramment utilisé pour les champs `timestamp` des séries chronologiques et des agrégations. | +| Type | Description | +| ------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `Bytes` | Tableau d'octets, représenté sous forme de chaîne hexadécimale. Couramment utilisé pour les hachages et adresses Ethereum. | +| `String` | Scalaire pour les valeurs de type `string`. Les caractères nuls ne sont pas pris en charge et sont automatiquement supprimés. | +| `Boolean` | Scalaire pour les valeurs de type `boolean` (booléennes). | +| `Int` | La spécification GraphQL définit `Int` comme un entier signé de 32 bits. | +| `Int8` | Un entier signé de 8 octets, également connu sous le nom d'entier signé de 64 bits, peut stocker des valeurs comprises entre -9 223 372 036 854 775 808 et 9 223 372 036 854 775 807. Il est préférable de l'utiliser pour représenter `i64` de l'ethereum. | +| `BigInt` | Grands entiers. Utilisé pour les types Ethereum `uint32`, `int64`, `uint64`, ..., `uint256`. Note : Tout ce qui est inférieur à `uint32`, comme `int32`, `uint24` ou `int8` est représenté par `i32`. | +| `BigDecimal` | `BigDecimal` Décimales de haute précision représentées par un significatif et un exposant. L'exposant est compris entre -6143 et +6144. Arrondi à 34 chiffres significatifs. | +| `Timestamp` | Il s'agit d'une valeur `i64` en microsecondes. Couramment utilisé pour les champs `timestamp` des séries chronologiques et des agrégations. | ### Enums @@ -141,7 +141,7 @@ type TokenBalance @entity { Les recherches inversées peuvent être définies sur une entité à travers le champ `@derivedFrom`. Cela crée un champ virtuel sur l'entité qui peut être interrogé mais qui ne peut pas être défini manuellement par l'intermédiaire de l'API des correspondances. Il est plutôt dérivé de la relation définie sur l'autre entité. Pour de telles relations, il est rarement utile de stocker les deux côtés de la relation, et l'indexation et les performances des requêtes seront meilleures si un seul côté est stocké et que l'autre est dérivé. -Pour les relations un-à-plusieurs, la relation doit toujours être stockée du côté « un » et le côté « plusieurs » doit toujours être dérivé. Stocker la relation de cette façon, plutôt que de stocker un tableau d'entités du côté « plusieurs », entraînera des performances considérablement meilleures pour l'indexation et l'interrogation du sous-graphe. En général, le stockage de tableaux d’entités doit être évité autant que possible. +For one-to-many relationships, the relationship should always be stored on the 'one' side, and the 'many' side should always be derived. Storing the relationship this way, rather than storing an array of entities on the 'many' side, will result in dramatically better performance for both indexing and querying the Subgraph. In general, storing arrays of entities should be avoided as much as is practical. #### Exemple @@ -160,7 +160,7 @@ type TokenBalance @entity { } ``` -Here is an example of how to write a mapping for a subgraph with reverse lookups: +Here is an example of how to write a mapping for a Subgraph with reverse lookups: ```typescript let token = new Token(event.address) // Create Token @@ -222,7 +222,7 @@ Cette approche nécessite que les requêtes descendent vers un niveau supplémen query usersWithOrganizations { users { organizations { - # ceci est une entité UserOrganization + # ceci est une entité UserOrganization organization { name } @@ -231,7 +231,7 @@ query usersWithOrganizations { } ``` -Cette manière plus élaborée de stocker des relations plusieurs-à-plusieurs entraînera moins de données stockées pour le subgraph, et donc vers un subgraph qui est souvent considérablement plus rapide à indexer et à interroger. +This more elaborate way of storing many-to-many relationships will result in less data stored for the Subgraph, and therefore to a Subgraph that is often dramatically faster to index and to query. ### Ajouter des commentaires au schéma @@ -287,7 +287,7 @@ query { } ``` -> **[Gestion des fonctionnalités](#experimental-features):** A partir de `specVersion` `0.0.4`, `fullTextSearch` doit être déclaré dans la section `features` du manifeste du subgraph. +> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the Subgraph manifest. ## Langues prises en charge @@ -295,30 +295,30 @@ Le choix d'une langue différente aura un effet définitif, bien que parfois sub Dictionnaires de langues pris en charge : -| Code | Dictionnaire | -| ------ | ------------ | -| simple | Général | -| da | Danois | -| nl | Néerlandais | -| en | Anglais | -| fi | Finlandais | -| fr | Français | -| de | Allemand | -| hu | Hongrois | -| it | Italien | -| no | Norvégien | -| pt | Portugais | -| ro | Roumain | -| ru | Russe | -| es | Espagnol | -| sv | Suédois | -| tr | Turc | +| Code | Dictionnaire | +| ------ | ---------------- | +| simple | Général | +| da | Danois | +| nl | Néerlandais | +| en | Anglais | +| fi | Finlandais | +| fr | Français | +| de | Allemand | +| hu | Hongrois | +| it | Italien | +| no | Norvégien | +| pt | Portugais | +| ro | Roumain | +| ru | Russe | +| es | Espagnol | +| sv | Suédois | +| tr | Turc | ### Algorithmes de classement Algorithmes de classement: -| Algorithme | Description | -| --- | --- | -| rank | Utilisez la qualité de correspondance (0-1) de la requête en texte intégral pour trier les résultats. | -| proximitéRang | Similaire au classement, mais inclut également la proximité des correspondances. | +| Algorithme | Description | +| -------------- | ----------------------------------------------------------------------------------------------------- | +| rank | Utilisez la qualité de correspondance (0-1) de la requête en texte intégral pour trier les résultats. | +| proximitéRang | Similaire au classement, mais inclut également la proximité des correspondances. | From 4c058c2c10b8d77a5bb1712d202460d961960f8c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:22:04 -0500 Subject: [PATCH 0782/1789] New translations ql-schema.mdx (Spanish) --- .../developing/creating/ql-schema.mdx | 40 +++++++++---------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/website/src/pages/es/subgraphs/developing/creating/ql-schema.mdx b/website/src/pages/es/subgraphs/developing/creating/ql-schema.mdx index 09924401ce11..a8b800ca5635 100644 --- a/website/src/pages/es/subgraphs/developing/creating/ql-schema.mdx +++ b/website/src/pages/es/subgraphs/developing/creating/ql-schema.mdx @@ -4,7 +4,7 @@ title: The Graph QL Schema ## Descripción -The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. +The schema for your Subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. > Note: If you've never written a GraphQL schema, it is recommended that you check out this primer on the GraphQL type system. Reference documentation for GraphQL schemas can be found in the [GraphQL API](/subgraphs/querying/graphql-api/) section. @@ -12,7 +12,7 @@ The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas ar Before defining entities, it is important to take a step back and think about how your data is structured and linked. -- All queries will be made against the data model defined in the subgraph schema. As a result, the design of the subgraph schema should be informed by the queries that your application will need to perform. +- All queries will be made against the data model defined in the Subgraph schema. As a result, the design of the Subgraph schema should be informed by the queries that your application will need to perform. - It may be useful to imagine entities as "objects containing data", rather than as events or functions. - You define entity types in `schema.graphql`, and Graph Node will generate top-level fields for querying single instances and collections of that entity type. - Each type that should be an entity is required to be annotated with an `@entity` directive. @@ -72,16 +72,16 @@ For some entity types the `id` for `Bytes!` is constructed from the id's of two The following scalars are supported in the GraphQL API: -| Tipo | Descripción | -| --- | --- | -| `Bytes` | Byte array, representado como un string hexadecimal. Comúnmente utilizado para los hashes y direcciones de Ethereum. | -| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | -| `Boolean` | Scalar for `boolean` values. | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | -| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | +| Tipo | Descripción | +| ------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `Bytes` | Byte array, representado como un string hexadecimal. Comúnmente utilizado para los hashes y direcciones de Ethereum. | +| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | +| `Boolean` | Scalar for `boolean` values. | +| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | +| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | +| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | +| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | ### Enums @@ -141,7 +141,7 @@ type TokenBalance @entity { Reverse lookups can be defined on an entity through the `@derivedFrom` field. This creates a virtual field on the entity that may be queried but cannot be set manually through the mappings API. Rather, it is derived from the relationship defined on the other entity. For such relationships, it rarely makes sense to store both sides of the relationship, and both indexing and query performance will be better when only one side is stored and the other is derived. -En el caso de las relaciones one-to-many, la relación debe almacenarse siempre en el lado "one", y el lado "many" debe derivarse siempre. Almacenar la relación de esta manera, en lugar de almacenar una array de entidades en el lado "many", resultará en un rendimiento dramáticamente mejor tanto para la indexación como para la consulta del subgrafo. En general, debe evitarse, en la medida de lo posible, el almacenamiento de arrays de entidades. +For one-to-many relationships, the relationship should always be stored on the 'one' side, and the 'many' side should always be derived. Storing the relationship this way, rather than storing an array of entities on the 'many' side, will result in dramatically better performance for both indexing and querying the Subgraph. In general, storing arrays of entities should be avoided as much as is practical. #### Ejemplo @@ -160,7 +160,7 @@ type TokenBalance @entity { } ``` -Here is an example of how to write a mapping for a subgraph with reverse lookups: +Here is an example of how to write a mapping for a Subgraph with reverse lookups: ```typescript let token = new Token(event.address) // Create Token @@ -231,7 +231,7 @@ query usersWithOrganizations { } ``` -Esta forma más elaborada de almacenar las relaciones many-to-many se traducirá en menos datos almacenados para el subgrafo y, por tanto, en un subgrafo que suele ser mucho más rápido de indexar y consultar. +This more elaborate way of storing many-to-many relationships will result in less data stored for the Subgraph, and therefore to a Subgraph that is often dramatically faster to index and to query. ### Agregar comentarios al esquema @@ -287,7 +287,7 @@ query { } ``` -> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the subgraph manifest. +> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the Subgraph manifest. ## Idiomas admitidos @@ -318,7 +318,7 @@ Diccionarios de idiomas admitidos: Algoritmos admitidos para ordenar los resultados: -| Algorithm | Description | -| --- | --- | -| rank | Usa la calidad de coincidencia (0-1) de la consulta de texto completo para ordenar los resultados. | -| rango de proximidad | Similar to rank but also includes the proximity of the matches. | +| Algorithm | Description | +| ------------------- | -------------------------------------------------------------------------------------------------- | +| rank | Usa la calidad de coincidencia (0-1) de la consulta de texto completo para ordenar los resultados. | +| rango de proximidad | Similar to rank but also includes the proximity of the matches. | From 93538ff0c12a1e0f343c371dd744f102a582fdb6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:22:05 -0500 Subject: [PATCH 0783/1789] New translations ql-schema.mdx (Arabic) --- .../developing/creating/ql-schema.mdx | 40 +++++++++---------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/website/src/pages/ar/subgraphs/developing/creating/ql-schema.mdx b/website/src/pages/ar/subgraphs/developing/creating/ql-schema.mdx index 56d9abb39ae7..c5b869610abd 100644 --- a/website/src/pages/ar/subgraphs/developing/creating/ql-schema.mdx +++ b/website/src/pages/ar/subgraphs/developing/creating/ql-schema.mdx @@ -4,7 +4,7 @@ title: The Graph QL Schema ## نظره عامة -The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. +The schema for your Subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. > Note: If you've never written a GraphQL schema, it is recommended that you check out this primer on the GraphQL type system. Reference documentation for GraphQL schemas can be found in the [GraphQL API](/subgraphs/querying/graphql-api/) section. @@ -12,7 +12,7 @@ The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas ar Before defining entities, it is important to take a step back and think about how your data is structured and linked. -- All queries will be made against the data model defined in the subgraph schema. As a result, the design of the subgraph schema should be informed by the queries that your application will need to perform. +- All queries will be made against the data model defined in the Subgraph schema. As a result, the design of the Subgraph schema should be informed by the queries that your application will need to perform. - It may be useful to imagine entities as "objects containing data", rather than as events or functions. - You define entity types in `schema.graphql`, and Graph Node will generate top-level fields for querying single instances and collections of that entity type. - Each type that should be an entity is required to be annotated with an `@entity` directive. @@ -72,16 +72,16 @@ For some entity types the `id` for `Bytes!` is constructed from the id's of two The following scalars are supported in the GraphQL API: -| النوع | الوصف | -| --- | --- | -| `Bytes` | مصفوفة Byte ، ممثلة كسلسلة سداسية عشرية. يشيع استخدامها في Ethereum hashes وعناوينه. | -| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | -| `Boolean` | Scalar for `boolean` values. | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | -| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | +| النوع | الوصف | +| ------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `Bytes` | مصفوفة Byte ، ممثلة كسلسلة سداسية عشرية. يشيع استخدامها في Ethereum hashes وعناوينه. | +| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | +| `Boolean` | Scalar for `boolean` values. | +| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | +| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | +| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | +| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | ### Enums @@ -141,7 +141,7 @@ type TokenBalance @entity { Reverse lookups can be defined on an entity through the `@derivedFrom` field. This creates a virtual field on the entity that may be queried but cannot be set manually through the mappings API. Rather, it is derived from the relationship defined on the other entity. For such relationships, it rarely makes sense to store both sides of the relationship, and both indexing and query performance will be better when only one side is stored and the other is derived. -For one-to-many relationships, the relationship should always be stored on the 'one' side, and the 'many' side should always be derived. Storing the relationship this way, rather than storing an array of entities on the 'many' side, will result in dramatically better performance for both indexing and querying the subgraph. In general, storing arrays of entities should be avoided as much as is practical. +For one-to-many relationships, the relationship should always be stored on the 'one' side, and the 'many' side should always be derived. Storing the relationship this way, rather than storing an array of entities on the 'many' side, will result in dramatically better performance for both indexing and querying the Subgraph. In general, storing arrays of entities should be avoided as much as is practical. #### Example @@ -160,7 +160,7 @@ type TokenBalance @entity { } ``` -Here is an example of how to write a mapping for a subgraph with reverse lookups: +Here is an example of how to write a mapping for a Subgraph with reverse lookups: ```typescript let token = new Token(event.address) // Create Token @@ -231,7 +231,7 @@ query usersWithOrganizations { } ``` -This more elaborate way of storing many-to-many relationships will result in less data stored for the subgraph, and therefore to a subgraph that is often dramatically faster to index and to query. +This more elaborate way of storing many-to-many relationships will result in less data stored for the Subgraph, and therefore to a Subgraph that is often dramatically faster to index and to query. ### إضافة تعليقات إلى المخطط (schema) @@ -287,7 +287,7 @@ query { } ``` -> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the subgraph manifest. +> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the Subgraph manifest. ## اللغات المدعومة @@ -318,7 +318,7 @@ Supported language dictionaries: Supported algorithms for ordering results: -| Algorithm | Description | -| ------------- | --------------------------------------------------------------- | -| rank | استخدم جودة مطابقة استعلام النص-الكامل (0-1) لترتيب النتائج. | -| proximityRank | Similar to rank but also includes the proximity of the matches. | +| Algorithm | Description | +| ------------- | ----------------------------------------------------------------------- | +| rank | استخدم جودة مطابقة استعلام النص-الكامل (0-1) لترتيب النتائج. | +| proximityRank | Similar to rank but also includes the proximity of the matches. | From 23e9c1def016ab601752897cc7cf5a7055657f4e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:22:06 -0500 Subject: [PATCH 0784/1789] New translations ql-schema.mdx (Czech) --- .../developing/creating/ql-schema.mdx | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/website/src/pages/cs/subgraphs/developing/creating/ql-schema.mdx b/website/src/pages/cs/subgraphs/developing/creating/ql-schema.mdx index c0a99bb516eb..dcc831244293 100644 --- a/website/src/pages/cs/subgraphs/developing/creating/ql-schema.mdx +++ b/website/src/pages/cs/subgraphs/developing/creating/ql-schema.mdx @@ -4,7 +4,7 @@ title: The Graph QL Schema ## Přehled -The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. +The schema for your Subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. > Note: If you've never written a GraphQL schema, it is recommended that you check out this primer on the GraphQL type system. Reference documentation for GraphQL schemas can be found in the [GraphQL API](/subgraphs/querying/graphql-api/) section. @@ -12,7 +12,7 @@ The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas ar Before defining entities, it is important to take a step back and think about how your data is structured and linked. -- All queries will be made against the data model defined in the subgraph schema. As a result, the design of the subgraph schema should be informed by the queries that your application will need to perform. +- All queries will be made against the data model defined in the Subgraph schema. As a result, the design of the Subgraph schema should be informed by the queries that your application will need to perform. - It may be useful to imagine entities as "objects containing data", rather than as events or functions. - You define entity types in `schema.graphql`, and Graph Node will generate top-level fields for querying single instances and collections of that entity type. - Each type that should be an entity is required to be annotated with an `@entity` directive. @@ -72,16 +72,16 @@ For some entity types the `id` for `Bytes!` is constructed from the id's of two The following scalars are supported in the GraphQL API: -| Typ | Popis | -| --- | --- | -| `Bytes` | Pole bajtů reprezentované jako hexadecimální řetězec. Běžně se používá pro hashe a adresy Ethereum. | -| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | -| `Boolean` | Scalar for `boolean` values. | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | -| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | +| Typ | Popis | +| ------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `Bytes` | Pole bajtů reprezentované jako hexadecimální řetězec. Běžně se používá pro hashe a adresy Ethereum. | +| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | +| `Boolean` | Scalar for `boolean` values. | +| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | +| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | +| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | +| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | ### Enums @@ -141,7 +141,7 @@ type TokenBalance @entity { Reverse lookups can be defined on an entity through the `@derivedFrom` field. This creates a virtual field on the entity that may be queried but cannot be set manually through the mappings API. Rather, it is derived from the relationship defined on the other entity. For such relationships, it rarely makes sense to store both sides of the relationship, and both indexing and query performance will be better when only one side is stored and the other is derived. -U vztahů typu "jeden k mnoha" by měl být vztah vždy uložen na straně "jeden" a strana "mnoho" by měla být vždy odvozena. Uložení vztahu tímto způsobem namísto uložení pole entit na straně "mnoho" povede k výrazně lepšímu výkonu jak při indexování, tak při dotazování na podgraf. Obecně platí, že ukládání polí entit je třeba se vyhnout, pokud je to praktické. +For one-to-many relationships, the relationship should always be stored on the 'one' side, and the 'many' side should always be derived. Storing the relationship this way, rather than storing an array of entities on the 'many' side, will result in dramatically better performance for both indexing and querying the Subgraph. In general, storing arrays of entities should be avoided as much as is practical. #### Příklad @@ -160,7 +160,7 @@ type TokenBalance @entity { } ``` -Here is an example of how to write a mapping for a subgraph with reverse lookups: +Here is an example of how to write a mapping for a Subgraph with reverse lookups: ```typescript let token = new Token(event.address) // Create Token @@ -231,7 +231,7 @@ query usersWithOrganizations { } ``` -Tento propracovanější způsob ukládání vztahů mnoho-více vede k menšímu množství dat uložených pro podgraf, a tedy k podgrafu, který je často výrazně rychlejší při indexování a dotazování. +This more elaborate way of storing many-to-many relationships will result in less data stored for the Subgraph, and therefore to a Subgraph that is often dramatically faster to index and to query. ### Přidání komentářů do schématu @@ -287,7 +287,7 @@ query { } ``` -> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the subgraph manifest. +> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the Subgraph manifest. ## Podporované jazyky From 0d3b6e8f071bdbc00acf2fd0636f84a0daa4e177 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:22:07 -0500 Subject: [PATCH 0785/1789] New translations ql-schema.mdx (German) --- .../developing/creating/ql-schema.mdx | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/website/src/pages/de/subgraphs/developing/creating/ql-schema.mdx b/website/src/pages/de/subgraphs/developing/creating/ql-schema.mdx index 7f0283d91f62..ab09a110ed23 100644 --- a/website/src/pages/de/subgraphs/developing/creating/ql-schema.mdx +++ b/website/src/pages/de/subgraphs/developing/creating/ql-schema.mdx @@ -4,7 +4,7 @@ title: The Graph QL Schema ## Überblick -The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. +The schema for your Subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. > Note: If you've never written a GraphQL schema, it is recommended that you check out this primer on the GraphQL type system. Reference documentation for GraphQL schemas can be found in the [GraphQL API](/subgraphs/querying/graphql-api/) section. @@ -12,7 +12,7 @@ The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas ar Before defining entities, it is important to take a step back and think about how your data is structured and linked. -- All queries will be made against the data model defined in the subgraph schema. As a result, the design of the subgraph schema should be informed by the queries that your application will need to perform. +- All queries will be made against the data model defined in the Subgraph schema. As a result, the design of the Subgraph schema should be informed by the queries that your application will need to perform. - It may be useful to imagine entities as "objects containing data", rather than as events or functions. - You define entity types in `schema.graphql`, and Graph Node will generate top-level fields for querying single instances and collections of that entity type. - Each type that should be an entity is required to be annotated with an `@entity` directive. @@ -72,16 +72,16 @@ For some entity types the `id` for `Bytes!` is constructed from the id's of two The following scalars are supported in the GraphQL API: -| Type | Beschreibung | -| --- | --- | -| `Bytes` | Byte array, represented as a hexadecimal string. Commonly used for Ethereum hashes and addresses. | -| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | -| `Boolean` | Scalar for `boolean` values. | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | -| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | +| Type | Beschreibung | +| ------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `Bytes` | Byte array, represented as a hexadecimal string. Commonly used for Ethereum hashes and addresses. | +| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | +| `Boolean` | Scalar for `boolean` values. | +| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | +| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | +| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | +| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | ### Enums @@ -141,7 +141,7 @@ type TokenBalance @entity { Reverse lookups can be defined on an entity through the `@derivedFrom` field. This creates a virtual field on the entity that may be queried but cannot be set manually through the mappings API. Rather, it is derived from the relationship defined on the other entity. For such relationships, it rarely makes sense to store both sides of the relationship, and both indexing and query performance will be better when only one side is stored and the other is derived. -For one-to-many relationships, the relationship should always be stored on the 'one' side, and the 'many' side should always be derived. Storing the relationship this way, rather than storing an array of entities on the 'many' side, will result in dramatically better performance for both indexing and querying the subgraph. In general, storing arrays of entities should be avoided as much as is practical. +For one-to-many relationships, the relationship should always be stored on the 'one' side, and the 'many' side should always be derived. Storing the relationship this way, rather than storing an array of entities on the 'many' side, will result in dramatically better performance for both indexing and querying the Subgraph. In general, storing arrays of entities should be avoided as much as is practical. #### Beispiel @@ -160,7 +160,7 @@ type TokenBalance @entity { } ``` -Here is an example of how to write a mapping for a subgraph with reverse lookups: +Here is an example of how to write a mapping for a Subgraph with reverse lookups: ```typescript let token = new Token(event.address) // Create Token @@ -231,7 +231,7 @@ query usersWithOrganizations { } ``` -This more elaborate way of storing many-to-many relationships will result in less data stored for the subgraph, and therefore to a subgraph that is often dramatically faster to index and to query. +This more elaborate way of storing many-to-many relationships will result in less data stored for the Subgraph, and therefore to a Subgraph that is often dramatically faster to index and to query. ### Adding comments to the schema @@ -287,7 +287,7 @@ query { } ``` -> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the subgraph manifest. +> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the Subgraph manifest. ## Languages supported From 0d3d8c558048f18dbadf1817bc5d276932f89e1e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:22:08 -0500 Subject: [PATCH 0786/1789] New translations ql-schema.mdx (Italian) --- .../developing/creating/ql-schema.mdx | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/website/src/pages/it/subgraphs/developing/creating/ql-schema.mdx b/website/src/pages/it/subgraphs/developing/creating/ql-schema.mdx index d3c22e25f97d..63cbca1acc72 100644 --- a/website/src/pages/it/subgraphs/developing/creating/ql-schema.mdx +++ b/website/src/pages/it/subgraphs/developing/creating/ql-schema.mdx @@ -4,7 +4,7 @@ title: The Graph QL Schema ## Panoramica -The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. +The schema for your Subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. > Note: If you've never written a GraphQL schema, it is recommended that you check out this primer on the GraphQL type system. Reference documentation for GraphQL schemas can be found in the [GraphQL API](/subgraphs/querying/graphql-api/) section. @@ -12,7 +12,7 @@ The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas ar Before defining entities, it is important to take a step back and think about how your data is structured and linked. -- All queries will be made against the data model defined in the subgraph schema. As a result, the design of the subgraph schema should be informed by the queries that your application will need to perform. +- All queries will be made against the data model defined in the Subgraph schema. As a result, the design of the Subgraph schema should be informed by the queries that your application will need to perform. - It may be useful to imagine entities as "objects containing data", rather than as events or functions. - You define entity types in `schema.graphql`, and Graph Node will generate top-level fields for querying single instances and collections of that entity type. - Each type that should be an entity is required to be annotated with an `@entity` directive. @@ -72,16 +72,16 @@ For some entity types the `id` for `Bytes!` is constructed from the id's of two The following scalars are supported in the GraphQL API: -| Tipo | Descrizione | -| --- | --- | -| `Bytes` | Byte array, rappresentato come una stringa esadecimale. Comunemente utilizzato per gli hash e gli indirizzi di Ethereum. | -| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | -| `Boolean` | Scalar for `boolean` values. | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | -| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | +| Tipo | Descrizione | +| ------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `Bytes` | Byte array, rappresentato come una stringa esadecimale. Comunemente utilizzato per gli hash e gli indirizzi di Ethereum. | +| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | +| `Boolean` | Scalar for `boolean` values. | +| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | +| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | +| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | +| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | ### Enum @@ -141,7 +141,7 @@ type TokenBalance @entity { Reverse lookups can be defined on an entity through the `@derivedFrom` field. This creates a virtual field on the entity that may be queried but cannot be set manually through the mappings API. Rather, it is derived from the relationship defined on the other entity. For such relationships, it rarely makes sense to store both sides of the relationship, and both indexing and query performance will be better when only one side is stored and the other is derived. -Per le relazioni uno-a-molti, la relazione deve sempre essere memorizzata sul lato "uno" e il lato "molti" deve sempre essere derivato. Memorizzare la relazione in questo modo, piuttosto che memorizzare un array di entità sul lato "molti", migliorerà notevolmente le prestazioni sia per l'indicizzazione che per l'interrogazione del subgraph. In generale, la memorizzazione di array di entità dovrebbe essere evitata per quanto possibile. +For one-to-many relationships, the relationship should always be stored on the 'one' side, and the 'many' side should always be derived. Storing the relationship this way, rather than storing an array of entities on the 'many' side, will result in dramatically better performance for both indexing and querying the Subgraph. In general, storing arrays of entities should be avoided as much as is practical. #### Esempio @@ -160,7 +160,7 @@ type TokenBalance @entity { } ``` -Here is an example of how to write a mapping for a subgraph with reverse lookups: +Here is an example of how to write a mapping for a Subgraph with reverse lookups: ```typescript let token = new Token(event.address) // Create Token @@ -231,7 +231,7 @@ query usersWithOrganizations { } ``` -Questo modo più elaborato di memorizzare le relazioni molti-a-molti si traduce in una minore quantità di dati memorizzati per il subgraph e quindi in un subgraph che spesso è molto più veloce da indicizzare e da effettuare query. +This more elaborate way of storing many-to-many relationships will result in less data stored for the Subgraph, and therefore to a Subgraph that is often dramatically faster to index and to query. ### Aggiungere commenti allo schema @@ -287,7 +287,7 @@ query { } ``` -> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the subgraph manifest. +> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the Subgraph manifest. ## Lingue supportate From de3c4b4ae3cd693c2d718d29cdafb06502777a16 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:22:10 -0500 Subject: [PATCH 0787/1789] New translations ql-schema.mdx (Japanese) --- .../developing/creating/ql-schema.mdx | 74 +++++++++---------- 1 file changed, 37 insertions(+), 37 deletions(-) diff --git a/website/src/pages/ja/subgraphs/developing/creating/ql-schema.mdx b/website/src/pages/ja/subgraphs/developing/creating/ql-schema.mdx index fb06d8d022a0..32b7c233efa2 100644 --- a/website/src/pages/ja/subgraphs/developing/creating/ql-schema.mdx +++ b/website/src/pages/ja/subgraphs/developing/creating/ql-schema.mdx @@ -4,7 +4,7 @@ title: The Graph QL Schema ## 概要 -The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. +The schema for your Subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. > Note: If you've never written a GraphQL schema, it is recommended that you check out this primer on the GraphQL type system. Reference documentation for GraphQL schemas can be found in the [GraphQL API](/subgraphs/querying/graphql-api/) section. @@ -12,7 +12,7 @@ The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas ar Before defining entities, it is important to take a step back and think about how your data is structured and linked. -- All queries will be made against the data model defined in the subgraph schema. As a result, the design of the subgraph schema should be informed by the queries that your application will need to perform. +- All queries will be made against the data model defined in the Subgraph schema. As a result, the design of the Subgraph schema should be informed by the queries that your application will need to perform. - It may be useful to imagine entities as "objects containing data", rather than as events or functions. - You define entity types in `schema.graphql`, and Graph Node will generate top-level fields for querying single instances and collections of that entity type. - Each type that should be an entity is required to be annotated with an `@entity` directive. @@ -72,16 +72,16 @@ For some entity types the `id` for `Bytes!` is constructed from the id's of two The following scalars are supported in the GraphQL API: -| タイプ | 説明書き | -| --- | --- | -| `Bytes` | Byte 配列で、16 進数の文字列で表されます。Ethereum のハッシュやアドレスによく使われます。 | -| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | -| `Boolean` | Scalar for `boolean` values. | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | -| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | +| タイプ | 説明書き | +| ------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `Bytes` | Byte 配列で、16 進数の文字列で表されます。Ethereum のハッシュやアドレスによく使われます。 | +| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | +| `Boolean` | Scalar for `boolean` values. | +| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | +| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | +| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | +| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | ### Enums @@ -141,7 +141,7 @@ type TokenBalance @entity { Reverse lookups can be defined on an entity through the `@derivedFrom` field. This creates a virtual field on the entity that may be queried but cannot be set manually through the mappings API. Rather, it is derived from the relationship defined on the other entity. For such relationships, it rarely makes sense to store both sides of the relationship, and both indexing and query performance will be better when only one side is stored and the other is derived. -1 対多の関係では、関係は常に「1」側に格納され、「多」側は常に派生されるべきです。「多」側にエンティティの配列を格納するのではなく、このように関係を格納することで、サブグラフのインデックス作成と問い合わせの両方で劇的にパフォーマンスが向上します。一般的に、エンティティの配列を保存することは、現実的に可能な限り避けるべきです。 +For one-to-many relationships, the relationship should always be stored on the 'one' side, and the 'many' side should always be derived. Storing the relationship this way, rather than storing an array of entities on the 'many' side, will result in dramatically better performance for both indexing and querying the Subgraph. In general, storing arrays of entities should be avoided as much as is practical. #### 例 @@ -160,7 +160,7 @@ type TokenBalance @entity { } ``` -Here is an example of how to write a mapping for a subgraph with reverse lookups: +Here is an example of how to write a mapping for a Subgraph with reverse lookups: ```typescript let token = new Token(event.address) // Create Token @@ -231,7 +231,7 @@ query usersWithOrganizations { } ``` -このように多対多の関係をより精巧に保存する方法では、サブグラフに保存されるデータが少なくなるため、サブグラフのインデックス作成や問い合わせが劇的に速くなります。 +This more elaborate way of storing many-to-many relationships will result in less data stored for the Subgraph, and therefore to a Subgraph that is often dramatically faster to index and to query. ### スキーマへのコメントの追加 @@ -287,7 +287,7 @@ query { } ``` -> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the subgraph manifest. +> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the Subgraph manifest. ## 対応言語 @@ -296,29 +296,29 @@ query { サポートされている言語の辞書: | Code | 辞書 | -| ------ | ------------ | -| simple | General | -| da | Danish | -| nl | Dutch | -| en | English | -| fi | Finnish | -| fr | French | -| de | German | -| hu | Hungarian | -| it | Italian | -| no | Norwegian | -| pt | ポルトガル語 | -| ro | Romanian | -| ru | Russian | -| es | Spanish | -| sv | Swedish | -| tr | Turkish | +| ------ | ---------- | +| simple | General | +| da | Danish | +| nl | Dutch | +| en | English | +| fi | Finnish | +| fr | French | +| de | German | +| hu | Hungarian | +| it | Italian | +| no | Norwegian | +| pt | ポルトガル語 | +| ro | Romanian | +| ru | Russian | +| es | Spanish | +| sv | Swedish | +| tr | Turkish | ### ランキングアルゴリズム サポートされている結果の順序付けのアルゴリズム: -| Algorithm | Description | -| ------------- | ------------------------------------------------------------------- | -| rank | フルテキストクエリのマッチ品質 (0-1) を使用して結果を並べ替えます。 | -| proximityRank | Similar to rank but also includes the proximity of the matches. | +| Algorithm | Description | +| ------------- | ----------------------------------------------------------------------- | +| rank | フルテキストクエリのマッチ品質 (0-1) を使用して結果を並べ替えます。 | +| proximityRank | Similar to rank but also includes the proximity of the matches. | From 69f80d25d5b2c7d32179fe8afbb3fe6dc5357072 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:22:11 -0500 Subject: [PATCH 0788/1789] New translations ql-schema.mdx (Korean) --- .../developing/creating/ql-schema.mdx | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/website/src/pages/ko/subgraphs/developing/creating/ql-schema.mdx b/website/src/pages/ko/subgraphs/developing/creating/ql-schema.mdx index 27562f970620..2eb805320753 100644 --- a/website/src/pages/ko/subgraphs/developing/creating/ql-schema.mdx +++ b/website/src/pages/ko/subgraphs/developing/creating/ql-schema.mdx @@ -4,7 +4,7 @@ title: The Graph QL Schema ## Overview -The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. +The schema for your Subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. > Note: If you've never written a GraphQL schema, it is recommended that you check out this primer on the GraphQL type system. Reference documentation for GraphQL schemas can be found in the [GraphQL API](/subgraphs/querying/graphql-api/) section. @@ -12,7 +12,7 @@ The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas ar Before defining entities, it is important to take a step back and think about how your data is structured and linked. -- All queries will be made against the data model defined in the subgraph schema. As a result, the design of the subgraph schema should be informed by the queries that your application will need to perform. +- All queries will be made against the data model defined in the Subgraph schema. As a result, the design of the Subgraph schema should be informed by the queries that your application will need to perform. - It may be useful to imagine entities as "objects containing data", rather than as events or functions. - You define entity types in `schema.graphql`, and Graph Node will generate top-level fields for querying single instances and collections of that entity type. - Each type that should be an entity is required to be annotated with an `@entity` directive. @@ -72,16 +72,16 @@ For some entity types the `id` for `Bytes!` is constructed from the id's of two The following scalars are supported in the GraphQL API: -| Type | Description | -| --- | --- | -| `Bytes` | Byte array, represented as a hexadecimal string. Commonly used for Ethereum hashes and addresses. | -| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | -| `Boolean` | Scalar for `boolean` values. | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | -| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | +| Type | Description | +| ------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `Bytes` | Byte array, represented as a hexadecimal string. Commonly used for Ethereum hashes and addresses. | +| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | +| `Boolean` | Scalar for `boolean` values. | +| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | +| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | +| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | +| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | ### Enums @@ -141,7 +141,7 @@ type TokenBalance @entity { Reverse lookups can be defined on an entity through the `@derivedFrom` field. This creates a virtual field on the entity that may be queried but cannot be set manually through the mappings API. Rather, it is derived from the relationship defined on the other entity. For such relationships, it rarely makes sense to store both sides of the relationship, and both indexing and query performance will be better when only one side is stored and the other is derived. -For one-to-many relationships, the relationship should always be stored on the 'one' side, and the 'many' side should always be derived. Storing the relationship this way, rather than storing an array of entities on the 'many' side, will result in dramatically better performance for both indexing and querying the subgraph. In general, storing arrays of entities should be avoided as much as is practical. +For one-to-many relationships, the relationship should always be stored on the 'one' side, and the 'many' side should always be derived. Storing the relationship this way, rather than storing an array of entities on the 'many' side, will result in dramatically better performance for both indexing and querying the Subgraph. In general, storing arrays of entities should be avoided as much as is practical. #### Example @@ -160,7 +160,7 @@ type TokenBalance @entity { } ``` -Here is an example of how to write a mapping for a subgraph with reverse lookups: +Here is an example of how to write a mapping for a Subgraph with reverse lookups: ```typescript let token = new Token(event.address) // Create Token @@ -231,7 +231,7 @@ query usersWithOrganizations { } ``` -This more elaborate way of storing many-to-many relationships will result in less data stored for the subgraph, and therefore to a subgraph that is often dramatically faster to index and to query. +This more elaborate way of storing many-to-many relationships will result in less data stored for the Subgraph, and therefore to a Subgraph that is often dramatically faster to index and to query. ### Adding comments to the schema @@ -287,7 +287,7 @@ query { } ``` -> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the subgraph manifest. +> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the Subgraph manifest. ## Languages supported From a2c102e9136849c1d47e723ec637db582aa9ff0f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:22:12 -0500 Subject: [PATCH 0789/1789] New translations ql-schema.mdx (Dutch) --- .../developing/creating/ql-schema.mdx | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/website/src/pages/nl/subgraphs/developing/creating/ql-schema.mdx b/website/src/pages/nl/subgraphs/developing/creating/ql-schema.mdx index 27562f970620..2eb805320753 100644 --- a/website/src/pages/nl/subgraphs/developing/creating/ql-schema.mdx +++ b/website/src/pages/nl/subgraphs/developing/creating/ql-schema.mdx @@ -4,7 +4,7 @@ title: The Graph QL Schema ## Overview -The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. +The schema for your Subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. > Note: If you've never written a GraphQL schema, it is recommended that you check out this primer on the GraphQL type system. Reference documentation for GraphQL schemas can be found in the [GraphQL API](/subgraphs/querying/graphql-api/) section. @@ -12,7 +12,7 @@ The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas ar Before defining entities, it is important to take a step back and think about how your data is structured and linked. -- All queries will be made against the data model defined in the subgraph schema. As a result, the design of the subgraph schema should be informed by the queries that your application will need to perform. +- All queries will be made against the data model defined in the Subgraph schema. As a result, the design of the Subgraph schema should be informed by the queries that your application will need to perform. - It may be useful to imagine entities as "objects containing data", rather than as events or functions. - You define entity types in `schema.graphql`, and Graph Node will generate top-level fields for querying single instances and collections of that entity type. - Each type that should be an entity is required to be annotated with an `@entity` directive. @@ -72,16 +72,16 @@ For some entity types the `id` for `Bytes!` is constructed from the id's of two The following scalars are supported in the GraphQL API: -| Type | Description | -| --- | --- | -| `Bytes` | Byte array, represented as a hexadecimal string. Commonly used for Ethereum hashes and addresses. | -| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | -| `Boolean` | Scalar for `boolean` values. | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | -| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | +| Type | Description | +| ------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `Bytes` | Byte array, represented as a hexadecimal string. Commonly used for Ethereum hashes and addresses. | +| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | +| `Boolean` | Scalar for `boolean` values. | +| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | +| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | +| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | +| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | ### Enums @@ -141,7 +141,7 @@ type TokenBalance @entity { Reverse lookups can be defined on an entity through the `@derivedFrom` field. This creates a virtual field on the entity that may be queried but cannot be set manually through the mappings API. Rather, it is derived from the relationship defined on the other entity. For such relationships, it rarely makes sense to store both sides of the relationship, and both indexing and query performance will be better when only one side is stored and the other is derived. -For one-to-many relationships, the relationship should always be stored on the 'one' side, and the 'many' side should always be derived. Storing the relationship this way, rather than storing an array of entities on the 'many' side, will result in dramatically better performance for both indexing and querying the subgraph. In general, storing arrays of entities should be avoided as much as is practical. +For one-to-many relationships, the relationship should always be stored on the 'one' side, and the 'many' side should always be derived. Storing the relationship this way, rather than storing an array of entities on the 'many' side, will result in dramatically better performance for both indexing and querying the Subgraph. In general, storing arrays of entities should be avoided as much as is practical. #### Example @@ -160,7 +160,7 @@ type TokenBalance @entity { } ``` -Here is an example of how to write a mapping for a subgraph with reverse lookups: +Here is an example of how to write a mapping for a Subgraph with reverse lookups: ```typescript let token = new Token(event.address) // Create Token @@ -231,7 +231,7 @@ query usersWithOrganizations { } ``` -This more elaborate way of storing many-to-many relationships will result in less data stored for the subgraph, and therefore to a subgraph that is often dramatically faster to index and to query. +This more elaborate way of storing many-to-many relationships will result in less data stored for the Subgraph, and therefore to a Subgraph that is often dramatically faster to index and to query. ### Adding comments to the schema @@ -287,7 +287,7 @@ query { } ``` -> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the subgraph manifest. +> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the Subgraph manifest. ## Languages supported From b31de60566dbc878a973d3b449e9e18a5b28674f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:22:13 -0500 Subject: [PATCH 0790/1789] New translations ql-schema.mdx (Polish) --- .../developing/creating/ql-schema.mdx | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/website/src/pages/pl/subgraphs/developing/creating/ql-schema.mdx b/website/src/pages/pl/subgraphs/developing/creating/ql-schema.mdx index 27562f970620..2eb805320753 100644 --- a/website/src/pages/pl/subgraphs/developing/creating/ql-schema.mdx +++ b/website/src/pages/pl/subgraphs/developing/creating/ql-schema.mdx @@ -4,7 +4,7 @@ title: The Graph QL Schema ## Overview -The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. +The schema for your Subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. > Note: If you've never written a GraphQL schema, it is recommended that you check out this primer on the GraphQL type system. Reference documentation for GraphQL schemas can be found in the [GraphQL API](/subgraphs/querying/graphql-api/) section. @@ -12,7 +12,7 @@ The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas ar Before defining entities, it is important to take a step back and think about how your data is structured and linked. -- All queries will be made against the data model defined in the subgraph schema. As a result, the design of the subgraph schema should be informed by the queries that your application will need to perform. +- All queries will be made against the data model defined in the Subgraph schema. As a result, the design of the Subgraph schema should be informed by the queries that your application will need to perform. - It may be useful to imagine entities as "objects containing data", rather than as events or functions. - You define entity types in `schema.graphql`, and Graph Node will generate top-level fields for querying single instances and collections of that entity type. - Each type that should be an entity is required to be annotated with an `@entity` directive. @@ -72,16 +72,16 @@ For some entity types the `id` for `Bytes!` is constructed from the id's of two The following scalars are supported in the GraphQL API: -| Type | Description | -| --- | --- | -| `Bytes` | Byte array, represented as a hexadecimal string. Commonly used for Ethereum hashes and addresses. | -| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | -| `Boolean` | Scalar for `boolean` values. | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | -| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | +| Type | Description | +| ------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `Bytes` | Byte array, represented as a hexadecimal string. Commonly used for Ethereum hashes and addresses. | +| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | +| `Boolean` | Scalar for `boolean` values. | +| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | +| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | +| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | +| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | ### Enums @@ -141,7 +141,7 @@ type TokenBalance @entity { Reverse lookups can be defined on an entity through the `@derivedFrom` field. This creates a virtual field on the entity that may be queried but cannot be set manually through the mappings API. Rather, it is derived from the relationship defined on the other entity. For such relationships, it rarely makes sense to store both sides of the relationship, and both indexing and query performance will be better when only one side is stored and the other is derived. -For one-to-many relationships, the relationship should always be stored on the 'one' side, and the 'many' side should always be derived. Storing the relationship this way, rather than storing an array of entities on the 'many' side, will result in dramatically better performance for both indexing and querying the subgraph. In general, storing arrays of entities should be avoided as much as is practical. +For one-to-many relationships, the relationship should always be stored on the 'one' side, and the 'many' side should always be derived. Storing the relationship this way, rather than storing an array of entities on the 'many' side, will result in dramatically better performance for both indexing and querying the Subgraph. In general, storing arrays of entities should be avoided as much as is practical. #### Example @@ -160,7 +160,7 @@ type TokenBalance @entity { } ``` -Here is an example of how to write a mapping for a subgraph with reverse lookups: +Here is an example of how to write a mapping for a Subgraph with reverse lookups: ```typescript let token = new Token(event.address) // Create Token @@ -231,7 +231,7 @@ query usersWithOrganizations { } ``` -This more elaborate way of storing many-to-many relationships will result in less data stored for the subgraph, and therefore to a subgraph that is often dramatically faster to index and to query. +This more elaborate way of storing many-to-many relationships will result in less data stored for the Subgraph, and therefore to a Subgraph that is often dramatically faster to index and to query. ### Adding comments to the schema @@ -287,7 +287,7 @@ query { } ``` -> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the subgraph manifest. +> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the Subgraph manifest. ## Languages supported From de643935007ce7e069c83fccf58609544c0c8a7c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:22:14 -0500 Subject: [PATCH 0791/1789] New translations ql-schema.mdx (Portuguese) --- .../developing/creating/ql-schema.mdx | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/website/src/pages/pt/subgraphs/developing/creating/ql-schema.mdx b/website/src/pages/pt/subgraphs/developing/creating/ql-schema.mdx index db1f1f513082..cdf5173a9bfa 100644 --- a/website/src/pages/pt/subgraphs/developing/creating/ql-schema.mdx +++ b/website/src/pages/pt/subgraphs/developing/creating/ql-schema.mdx @@ -4,7 +4,7 @@ title: The Graph QL Schema ## Visão geral -The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. +The schema for your Subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. > Note: If you've never written a GraphQL schema, it is recommended that you check out this primer on the GraphQL type system. Reference documentation for GraphQL schemas can be found in the [GraphQL API](/subgraphs/querying/graphql-api/) section. @@ -12,7 +12,7 @@ The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas ar Before defining entities, it is important to take a step back and think about how your data is structured and linked. -- All queries will be made against the data model defined in the subgraph schema. As a result, the design of the subgraph schema should be informed by the queries that your application will need to perform. +- All queries will be made against the data model defined in the Subgraph schema. As a result, the design of the Subgraph schema should be informed by the queries that your application will need to perform. - It may be useful to imagine entities as "objects containing data", rather than as events or functions. - You define entity types in `schema.graphql`, and Graph Node will generate top-level fields for querying single instances and collections of that entity type. - Each type that should be an entity is required to be annotated with an `@entity` directive. @@ -72,16 +72,16 @@ For some entity types the `id` for `Bytes!` is constructed from the id's of two The following scalars are supported in the GraphQL API: -| Tipo | Descrição | -| --- | --- | -| `Bytes` | Arranjo de bytes, representado como string hexadecimal. Usado frequentemente por hashes e endereços no Ethereum. | -| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | -| `Boolean` | Scalar for `boolean` values. | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | -| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | +| Tipo | Descrição | +| ------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `Bytes` | Arranjo de bytes, representado como string hexadecimal. Usado frequentemente por hashes e endereços no Ethereum. | +| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | +| `Boolean` | Scalar for `boolean` values. | +| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | +| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | +| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | +| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | ### Enums @@ -141,7 +141,7 @@ type TokenBalance @entity { Reverse lookups can be defined on an entity through the `@derivedFrom` field. This creates a virtual field on the entity that may be queried but cannot be set manually through the mappings API. Rather, it is derived from the relationship defined on the other entity. For such relationships, it rarely makes sense to store both sides of the relationship, and both indexing and query performance will be better when only one side is stored and the other is derived. -Para relacionamentos um-com-vários, o relacionamento sempre deve ser armazenado no lado 'um', e o lado 'vários' deve sempre ser derivado. Armazenar o relacionamento desta maneira, em vez de armazenar um arranjo de entidades no lado 'vários', melhorará dramaticamente o desempenho para o indexing e os queries no subgraph. Em geral, evite armazenar arranjos de entidades enquanto for prático. +For one-to-many relationships, the relationship should always be stored on the 'one' side, and the 'many' side should always be derived. Storing the relationship this way, rather than storing an array of entities on the 'many' side, will result in dramatically better performance for both indexing and querying the Subgraph. In general, storing arrays of entities should be avoided as much as is practical. #### Exemplo @@ -160,7 +160,7 @@ type TokenBalance @entity { } ``` -Here is an example of how to write a mapping for a subgraph with reverse lookups: +Here is an example of how to write a mapping for a Subgraph with reverse lookups: ```typescript let token = new Token(event.address) // Create Token @@ -231,7 +231,7 @@ query usersWithOrganizations { } ``` -Esta maneira mais elaborada de armazenar relacionamentos vários-com-vários armazenará menos dados para o subgraph, portanto, o subgraph ficará muito mais rápido de indexar e consultar. +This more elaborate way of storing many-to-many relationships will result in less data stored for the Subgraph, and therefore to a Subgraph that is often dramatically faster to index and to query. ### Como adicionar comentários ao schema @@ -287,7 +287,7 @@ query { } ``` -> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the subgraph manifest. +> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the Subgraph manifest. ## Idiomas apoiados From 50e35dee5114a7ddf7aa2e64b59e99759aa984d4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:22:15 -0500 Subject: [PATCH 0792/1789] New translations ql-schema.mdx (Russian) --- .../developing/creating/ql-schema.mdx | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/website/src/pages/ru/subgraphs/developing/creating/ql-schema.mdx b/website/src/pages/ru/subgraphs/developing/creating/ql-schema.mdx index fb468f6110f5..563399d2aec7 100644 --- a/website/src/pages/ru/subgraphs/developing/creating/ql-schema.mdx +++ b/website/src/pages/ru/subgraphs/developing/creating/ql-schema.mdx @@ -4,7 +4,7 @@ title: The Graph QL Schema ## Обзор -The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. +The schema for your Subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. > Note: If you've never written a GraphQL schema, it is recommended that you check out this primer on the GraphQL type system. Reference documentation for GraphQL schemas can be found in the [GraphQL API](/subgraphs/querying/graphql-api/) section. @@ -12,7 +12,7 @@ The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas ar Прежде чем определять объекты, важно сделать шаг назад и задуматься над тем, как структурированы и связаны Ваши данные. -- All queries will be made against the data model defined in the subgraph schema. As a result, the design of the subgraph schema should be informed by the queries that your application will need to perform. +- All queries will be made against the data model defined in the Subgraph schema. As a result, the design of the Subgraph schema should be informed by the queries that your application will need to perform. - Может быть полезно представить объекты как «объекты, содержащие данные», а не как события или функции. - You define entity types in `schema.graphql`, and Graph Node will generate top-level fields for querying single instances and collections of that entity type. - Each type that should be an entity is required to be annotated with an `@entity` directive. @@ -72,16 +72,16 @@ For some entity types the `id` for `Bytes!` is constructed from the id's of two В API GraphQL поддерживаются следующие скаляры: -| Тип | Описание | -| --- | --- | -| `Bytes` | Массив байтов, представленный в виде шестнадцатеричной строки. Обычно используется для хэшей и адресов Ethereum. | -| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | -| `Boolean` | Scalar for `boolean` values. | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | -| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | +| Тип | Описание | +| ------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `Bytes` | Массив байтов, представленный в виде шестнадцатеричной строки. Обычно используется для хэшей и адресов Ethereum. | +| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | +| `Boolean` | Scalar for `boolean` values. | +| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | +| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | +| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | +| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | ### Перечисления @@ -141,7 +141,7 @@ type TokenBalance @entity { Reverse lookups can be defined on an entity through the `@derivedFrom` field. This creates a virtual field on the entity that may be queried but cannot be set manually through the mappings API. Rather, it is derived from the relationship defined on the other entity. For such relationships, it rarely makes sense to store both sides of the relationship, and both indexing and query performance will be better when only one side is stored and the other is derived. -Для связей "один ко многим" связь всегда должна храниться на стороне "один", а сторона "многие" всегда должна быть производной. Такое сохранение связи, вместо хранения массива объектов на стороне "многие", приведет к значительному повышению производительности как при индексации, так и при запросах к субграфам. В общем, следует избегать хранения массивов объектов настолько, насколько это возможно. +For one-to-many relationships, the relationship should always be stored on the 'one' side, and the 'many' side should always be derived. Storing the relationship this way, rather than storing an array of entities on the 'many' side, will result in dramatically better performance for both indexing and querying the Subgraph. In general, storing arrays of entities should be avoided as much as is practical. #### Пример @@ -160,7 +160,7 @@ type TokenBalance @entity { } ``` -Here is an example of how to write a mapping for a subgraph with reverse lookups: +Here is an example of how to write a mapping for a Subgraph with reverse lookups: ```typescript let token = new Token(event.address) // Create Token @@ -231,7 +231,7 @@ query usersWithOrganizations { } ``` -Такой более сложный способ хранения связей "многие ко многим" приведет к уменьшению объема хранимых данных для субграфа и, следовательно, к тому, что субграф будет значительно быстрее индексироваться и запрашиваться. +This more elaborate way of storing many-to-many relationships will result in less data stored for the Subgraph, and therefore to a Subgraph that is often dramatically faster to index and to query. ### Добавление комментариев к схеме @@ -287,7 +287,7 @@ query { } ``` -> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the subgraph manifest. +> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the Subgraph manifest. ## Поддерживаемые языки From a1e4711bfb39d249173588aab030496e944bae7b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:22:16 -0500 Subject: [PATCH 0793/1789] New translations ql-schema.mdx (Swedish) --- .../developing/creating/ql-schema.mdx | 75 ++++++++++--------- 1 file changed, 40 insertions(+), 35 deletions(-) diff --git a/website/src/pages/sv/subgraphs/developing/creating/ql-schema.mdx b/website/src/pages/sv/subgraphs/developing/creating/ql-schema.mdx index 426092a76eb4..fd4d6fe36903 100644 --- a/website/src/pages/sv/subgraphs/developing/creating/ql-schema.mdx +++ b/website/src/pages/sv/subgraphs/developing/creating/ql-schema.mdx @@ -4,7 +4,7 @@ title: The Graph QL Schema ## Översikt -The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. +The schema for your Subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. > Note: If you've never written a GraphQL schema, it is recommended that you check out this primer on the GraphQL type system. Reference documentation for GraphQL schemas can be found in the [GraphQL API](/subgraphs/querying/graphql-api/) section. @@ -12,7 +12,7 @@ The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas ar Before defining entities, it is important to take a step back and think about how your data is structured and linked. -- All queries will be made against the data model defined in the subgraph schema. As a result, the design of the subgraph schema should be informed by the queries that your application will need to perform. +- All queries will be made against the data model defined in the Subgraph schema. As a result, the design of the Subgraph schema should be informed by the queries that your application will need to perform. - It may be useful to imagine entities as "objects containing data", rather than as events or functions. - You define entity types in `schema.graphql`, and Graph Node will generate top-level fields for querying single instances and collections of that entity type. - Each type that should be an entity is required to be annotated with an `@entity` directive. @@ -72,16 +72,16 @@ For some entity types the `id` for `Bytes!` is constructed from the id's of two The following scalars are supported in the GraphQL API: -| Typ | Beskrivning | -| --- | --- | -| `Bytes` | Bytematris, representerad som en hexadecimal sträng. Vanligt används för Ethereum-hashar och adresser. | -| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | -| `Boolean` | Scalar for `boolean` values. | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | -| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | +| Typ | Beskrivning | +| ------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `Bytes` | Bytematris, representerad som en hexadecimal sträng. Vanligt används för Ethereum-hashar och adresser. | +| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | +| `Boolean` | Scalar for `boolean` values. | +| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | +| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | +| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | +| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | ### Enums @@ -141,7 +141,7 @@ type TokenBalance @entity { Reverse lookups can be defined on an entity through the `@derivedFrom` field. This creates a virtual field on the entity that may be queried but cannot be set manually through the mappings API. Rather, it is derived from the relationship defined on the other entity. For such relationships, it rarely makes sense to store both sides of the relationship, and both indexing and query performance will be better when only one side is stored and the other is derived. -För en-till-många-relationer bör relationen alltid lagras på 'en'-sidan, och 'många'-sidan bör alltid härledas. Att lagra relationen på detta sätt, istället för att lagra en array av entiteter på 'många'-sidan, kommer att resultera i dramatiskt bättre prestanda både för indexering och för frågning av subgraphen. Generellt sett bör lagring av arrayer av entiteter undvikas så mycket som är praktiskt möjligt. +For one-to-many relationships, the relationship should always be stored on the 'one' side, and the 'many' side should always be derived. Storing the relationship this way, rather than storing an array of entities on the 'many' side, will result in dramatically better performance for both indexing and querying the Subgraph. In general, storing arrays of entities should be avoided as much as is practical. #### Exempel @@ -160,7 +160,7 @@ type TokenBalance @entity { } ``` -Here is an example of how to write a mapping for a subgraph with reverse lookups: +Here is an example of how to write a mapping for a Subgraph with reverse lookups: ```typescript let token = new Token(event.address) // Create Token @@ -231,7 +231,7 @@ query usersWithOrganizations { } ``` -Detta mer avancerade sätt att lagra många-till-många-relationer kommer att leda till att mindre data lagras för subgrafen, och därför till en subgraf som ofta är dramatiskt snabbare att indexera och att fråga. +This more elaborate way of storing many-to-many relationships will result in less data stored for the Subgraph, and therefore to a Subgraph that is often dramatically faster to index and to query. ### Lägga till kommentarer i schemat @@ -259,7 +259,12 @@ type _Schema_ name: "bandSearch" language: en algorithm: rank - include: [{ entity: "Band", fields: [{ name: "name" }, { name: "description" }, { name: "bio" }] }] + include: [ + { + entity: "Band" + fields: [{ name: "name" }, { name: "description" }, { name: "bio" }] + } + ] ) type Band @entity { @@ -287,7 +292,7 @@ query { } ``` -> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the subgraph manifest. +> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the Subgraph manifest. ## Stödda språk @@ -295,24 +300,24 @@ Att välja ett annat språk kommer att ha en definitiv, om än ibland subtil, ef Stödda språkordböcker: -| Code | Ordbok | -| ----- | ------------ | -| enkel | General | -| da | Danish | -| nl | Dutch | -| en | English | -| fi | Finnish | -| fr | French | -| de | German | -| hu | Hungarian | -| it | Italian | -| no | Norwegian | -| pt | Portugisiska | -| ro | Romanian | -| ru | Russian | -| es | Spanish | -| sv | Swedish | -| tr | Turkish | +| Code | Ordbok | +| ------ | ------------ | +| enkel | General | +| da | Danish | +| nl | Dutch | +| en | English | +| fi | Finnish | +| fr | French | +| de | German | +| hu | Hungarian | +| it | Italian | +| no | Norwegian | +| pt | Portugisiska | +| ro | Romanian | +| ru | Russian | +| es | Spanish | +| sv | Swedish | +| tr | Turkish | ### Rankningsalgoritmer From f7ab6aea7fe129e6ca60baaa33ba356ddc69c2d8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:22:17 -0500 Subject: [PATCH 0794/1789] New translations ql-schema.mdx (Turkish) --- .../developing/creating/ql-schema.mdx | 68 +++++++++---------- 1 file changed, 34 insertions(+), 34 deletions(-) diff --git a/website/src/pages/tr/subgraphs/developing/creating/ql-schema.mdx b/website/src/pages/tr/subgraphs/developing/creating/ql-schema.mdx index 3aedce85e696..021bdc7326e6 100644 --- a/website/src/pages/tr/subgraphs/developing/creating/ql-schema.mdx +++ b/website/src/pages/tr/subgraphs/developing/creating/ql-schema.mdx @@ -4,7 +4,7 @@ title: The Graph QL Schema ## Genel Bakış -The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. +The schema for your Subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. > Note: If you've never written a GraphQL schema, it is recommended that you check out this primer on the GraphQL type system. Reference documentation for GraphQL schemas can be found in the [GraphQL API](/subgraphs/querying/graphql-api/) section. @@ -12,7 +12,7 @@ The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas ar Varlıkları tanımlamadan önce, verilerinizin nasıl yapılandırıldığını ve nasıl bağlantılı olduğunu düşünmek önemlidir. -- All queries will be made against the data model defined in the subgraph schema. As a result, the design of the subgraph schema should be informed by the queries that your application will need to perform. +- All queries will be made against the data model defined in the Subgraph schema. As a result, the design of the Subgraph schema should be informed by the queries that your application will need to perform. - Varlıkları, olaylar veya fonksiyonlar yerine “veri içeren nesneler” olarak düşünmek faydalı olabilir. - You define entity types in `schema.graphql`, and Graph Node will generate top-level fields for querying single instances and collections of that entity type. - Each type that should be an entity is required to be annotated with an `@entity` directive. @@ -72,16 +72,16 @@ For some entity types the `id` for `Bytes!` is constructed from the id's of two GraphQL API'sinde desteklenen skalarlardan bazıları şunlardır: -| Tür | Tanım | -| --- | --- | -| `Bytes` | Byte dizisi, onaltılık bir dizgi olarak temsil edilir. Ethereum hash değerleri ve adresleri için yaygın olarak kullanılır. | -| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | -| `Boolean` | Scalar for `boolean` values. | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | -| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | +| Tür | Tanım | +| ------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `Bytes` | Byte dizisi, onaltılık bir dizgi olarak temsil edilir. Ethereum hash değerleri ve adresleri için yaygın olarak kullanılır. | +| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | +| `Boolean` | Scalar for `boolean` values. | +| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | +| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | +| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | +| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | ### Numaralandırmalar @@ -141,7 +141,7 @@ type TokenBalance @entity { Reverse lookups can be defined on an entity through the `@derivedFrom` field. This creates a virtual field on the entity that may be queried but cannot be set manually through the mappings API. Rather, it is derived from the relationship defined on the other entity. For such relationships, it rarely makes sense to store both sides of the relationship, and both indexing and query performance will be better when only one side is stored and the other is derived. -Birden çoğa ilişkileriiçin, ilişki her zaman 'birden' tarafında depolanmalı ve her zaman 'çoğa' tarafında türetilmelidir. İlişkinin 'çoğa' tarafında bir dizi varlık depolamak yerine bu şekilde saklanması, subgraph indeksleme ve sorgulaması adına önemli ölçüde daha iyi performans sağlayacaktır. Genel olarak, varlık dizilerini depolamaktan mümkün olduğunca sakınılması gerekmektedir. +For one-to-many relationships, the relationship should always be stored on the 'one' side, and the 'many' side should always be derived. Storing the relationship this way, rather than storing an array of entities on the 'many' side, will result in dramatically better performance for both indexing and querying the Subgraph. In general, storing arrays of entities should be avoided as much as is practical. #### Örnek @@ -160,7 +160,7 @@ type TokenBalance @entity { } ``` -Here is an example of how to write a mapping for a subgraph with reverse lookups: +Here is an example of how to write a mapping for a Subgraph with reverse lookups: ```typescript let token = new Token(event.address) // Create Token @@ -231,7 +231,7 @@ query usersWithOrganizations { } ``` -Çoktan çoğa ilişkileri depolamanın daha ayrıntılı bu yolu, subgraph için depolanan veri miktarının azalmasına ve bu sonucunda genellikle indekslenmesi ve sorgulanması önemli ölçüde daha hızlı olan bir subgraph sağlayacaktır. +This more elaborate way of storing many-to-many relationships will result in less data stored for the Subgraph, and therefore to a Subgraph that is often dramatically faster to index and to query. ### Şemaya notlar/yorumlar ekleme @@ -287,7 +287,7 @@ query { } ``` -> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the subgraph manifest. +> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the Subgraph manifest. ## Desteklenen diller @@ -295,24 +295,24 @@ Farklı bir dil seçmek, tam metin arama API'sı üzerinde bazen az olsa da kesi Desteklenen dil sözlükleri: -| Code | Sözlük | -| ----- | ---------- | -| yalın | General | -| da | Danish | -| nl | Dutch | -| en | English | -| fi | Finnish | -| fr | French | -| de | German | -| hu | Hungarian | -| it | Italian | -| no | Norwegian | -| pt | Portekizce | -| ro | Romanian | -| ru | Russian | -| es | Spanish | -| sv | Swedish | -| tr | Turkish | +| Code | Sözlük | +| ------ | ---------- | +| yalın | General | +| da | Danish | +| nl | Dutch | +| en | English | +| fi | Finnish | +| fr | French | +| de | German | +| hu | Hungarian | +| it | Italian | +| no | Norwegian | +| pt | Portekizce | +| ro | Romanian | +| ru | Russian | +| es | Spanish | +| sv | Swedish | +| tr | Turkish | ### Algoritmaları Sıralama From 479e4839af07f42ee8a81da22fd050c0d814246b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:22:18 -0500 Subject: [PATCH 0795/1789] New translations ql-schema.mdx (Ukrainian) --- .../developing/creating/ql-schema.mdx | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/website/src/pages/uk/subgraphs/developing/creating/ql-schema.mdx b/website/src/pages/uk/subgraphs/developing/creating/ql-schema.mdx index 27562f970620..2eb805320753 100644 --- a/website/src/pages/uk/subgraphs/developing/creating/ql-schema.mdx +++ b/website/src/pages/uk/subgraphs/developing/creating/ql-schema.mdx @@ -4,7 +4,7 @@ title: The Graph QL Schema ## Overview -The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. +The schema for your Subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. > Note: If you've never written a GraphQL schema, it is recommended that you check out this primer on the GraphQL type system. Reference documentation for GraphQL schemas can be found in the [GraphQL API](/subgraphs/querying/graphql-api/) section. @@ -12,7 +12,7 @@ The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas ar Before defining entities, it is important to take a step back and think about how your data is structured and linked. -- All queries will be made against the data model defined in the subgraph schema. As a result, the design of the subgraph schema should be informed by the queries that your application will need to perform. +- All queries will be made against the data model defined in the Subgraph schema. As a result, the design of the Subgraph schema should be informed by the queries that your application will need to perform. - It may be useful to imagine entities as "objects containing data", rather than as events or functions. - You define entity types in `schema.graphql`, and Graph Node will generate top-level fields for querying single instances and collections of that entity type. - Each type that should be an entity is required to be annotated with an `@entity` directive. @@ -72,16 +72,16 @@ For some entity types the `id` for `Bytes!` is constructed from the id's of two The following scalars are supported in the GraphQL API: -| Type | Description | -| --- | --- | -| `Bytes` | Byte array, represented as a hexadecimal string. Commonly used for Ethereum hashes and addresses. | -| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | -| `Boolean` | Scalar for `boolean` values. | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | -| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | +| Type | Description | +| ------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `Bytes` | Byte array, represented as a hexadecimal string. Commonly used for Ethereum hashes and addresses. | +| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | +| `Boolean` | Scalar for `boolean` values. | +| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | +| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | +| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | +| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | ### Enums @@ -141,7 +141,7 @@ type TokenBalance @entity { Reverse lookups can be defined on an entity through the `@derivedFrom` field. This creates a virtual field on the entity that may be queried but cannot be set manually through the mappings API. Rather, it is derived from the relationship defined on the other entity. For such relationships, it rarely makes sense to store both sides of the relationship, and both indexing and query performance will be better when only one side is stored and the other is derived. -For one-to-many relationships, the relationship should always be stored on the 'one' side, and the 'many' side should always be derived. Storing the relationship this way, rather than storing an array of entities on the 'many' side, will result in dramatically better performance for both indexing and querying the subgraph. In general, storing arrays of entities should be avoided as much as is practical. +For one-to-many relationships, the relationship should always be stored on the 'one' side, and the 'many' side should always be derived. Storing the relationship this way, rather than storing an array of entities on the 'many' side, will result in dramatically better performance for both indexing and querying the Subgraph. In general, storing arrays of entities should be avoided as much as is practical. #### Example @@ -160,7 +160,7 @@ type TokenBalance @entity { } ``` -Here is an example of how to write a mapping for a subgraph with reverse lookups: +Here is an example of how to write a mapping for a Subgraph with reverse lookups: ```typescript let token = new Token(event.address) // Create Token @@ -231,7 +231,7 @@ query usersWithOrganizations { } ``` -This more elaborate way of storing many-to-many relationships will result in less data stored for the subgraph, and therefore to a subgraph that is often dramatically faster to index and to query. +This more elaborate way of storing many-to-many relationships will result in less data stored for the Subgraph, and therefore to a Subgraph that is often dramatically faster to index and to query. ### Adding comments to the schema @@ -287,7 +287,7 @@ query { } ``` -> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the subgraph manifest. +> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the Subgraph manifest. ## Languages supported From 819976d1b9c9f22aad184915e437551f865ca4c3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:22:19 -0500 Subject: [PATCH 0796/1789] New translations ql-schema.mdx (Chinese Simplified) --- .../developing/creating/ql-schema.mdx | 78 +++++++++---------- 1 file changed, 39 insertions(+), 39 deletions(-) diff --git a/website/src/pages/zh/subgraphs/developing/creating/ql-schema.mdx b/website/src/pages/zh/subgraphs/developing/creating/ql-schema.mdx index 6e24e832c85d..2dbb3e2a8991 100644 --- a/website/src/pages/zh/subgraphs/developing/creating/ql-schema.mdx +++ b/website/src/pages/zh/subgraphs/developing/creating/ql-schema.mdx @@ -2,9 +2,9 @@ title: The Graph QL Schema --- -## 概述 +## Overview -The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. +The schema for your Subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. > Note: If you've never written a GraphQL schema, it is recommended that you check out this primer on the GraphQL type system. Reference documentation for GraphQL schemas can be found in the [GraphQL API](/subgraphs/querying/graphql-api/) section. @@ -12,7 +12,7 @@ The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas ar Before defining entities, it is important to take a step back and think about how your data is structured and linked. -- All queries will be made against the data model defined in the subgraph schema. As a result, the design of the subgraph schema should be informed by the queries that your application will need to perform. +- All queries will be made against the data model defined in the Subgraph schema. As a result, the design of the Subgraph schema should be informed by the queries that your application will need to perform. - It may be useful to imagine entities as "objects containing data", rather than as events or functions. - You define entity types in `schema.graphql`, and Graph Node will generate top-level fields for querying single instances and collections of that entity type. - Each type that should be an entity is required to be annotated with an `@entity` directive. @@ -72,16 +72,16 @@ For some entity types the `id` for `Bytes!` is constructed from the id's of two The following scalars are supported in the GraphQL API: -| 类型 | 描述 | -| --- | --- | -| `Bytes` | 字节数组,表示为十六进制字符串。 通常用于以太坊hash和地址。 | -| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | -| `Boolean` | Scalar for `boolean` values. | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | -| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | +| 类型 | 描述 | +| ------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `Bytes` | 字节数组,表示为十六进制字符串。 通常用于以太坊hash和地址。 | +| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | +| `Boolean` | Scalar for `boolean` values. | +| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | +| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | +| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | +| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | ### 枚举类型 @@ -141,7 +141,7 @@ type TokenBalance @entity { Reverse lookups can be defined on an entity through the `@derivedFrom` field. This creates a virtual field on the entity that may be queried but cannot be set manually through the mappings API. Rather, it is derived from the relationship defined on the other entity. For such relationships, it rarely makes sense to store both sides of the relationship, and both indexing and query performance will be better when only one side is stored and the other is derived. -对于一对多关系,关系应始终存储在“一”端,而“多”端应始终派生。 以这种方式存储关系,而不是在“多”端存储实体数组,将大大提高索引和查询子图的性能。 通常,应尽可能避免存储实体数组。 +For one-to-many relationships, the relationship should always be stored on the 'one' side, and the 'many' side should always be derived. Storing the relationship this way, rather than storing an array of entities on the 'many' side, will result in dramatically better performance for both indexing and querying the Subgraph. In general, storing arrays of entities should be avoided as much as is practical. #### 示例 @@ -160,7 +160,7 @@ type TokenBalance @entity { } ``` -Here is an example of how to write a mapping for a subgraph with reverse lookups: +Here is an example of how to write a mapping for a Subgraph with reverse lookups: ```typescript let token = new Token(event.address) // Create Token @@ -231,7 +231,7 @@ query usersWithOrganizations { } ``` -这种存储多对多关系的更精细的方式将导致为子图存储的数据更少,因此子图的索引和查询速度通常会大大加快。 +This more elaborate way of storing many-to-many relationships will result in less data stored for the Subgraph, and therefore to a Subgraph that is often dramatically faster to index and to query. ### 向模式添加注释 @@ -287,7 +287,7 @@ query { } ``` -> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the subgraph manifest. +> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the Subgraph manifest. ## 支持的语言 @@ -295,30 +295,30 @@ query { 支持的语言词典: -| Code | 词典 | -| ------ | --------- | -| simple | General | -| da | Danish | -| nl | Dutch | -| en | English | -| fi | Finnish | -| fr | French | -| de | German | -| hu | Hungarian | -| it | Italian | -| no | Norwegian | -| pt | 葡萄牙语 | -| ro | Romanian | -| ru | Russian | -| es | Spanish | -| sv | Swedish | -| tr | Turkish | +| Code | 词典 | +| ------ | ---------- | +| simple | General | +| da | Danish | +| nl | Dutch | +| en | English | +| fi | Finnish | +| fr | French | +| de | German | +| hu | Hungarian | +| it | Italian | +| no | Norwegian | +| pt | 葡萄牙语 | +| ro | Romanian | +| ru | Russian | +| es | Spanish | +| sv | Swedish | +| tr | Turkish | ### 排序算法 支持的排序结果算法: -| Algorithm | Description | -| ------------- | --------------------------------------------------------------- | -| rank | 使用全文查询的匹配质量 (0-1) 对结果进行排序。 | -| proximityRank | Similar to rank but also includes the proximity of the matches. | +| Algorithm | Description | +| ------------- | ----------------------------------------------------------------------- | +| rank | 使用全文查询的匹配质量 (0-1) 对结果进行排序。 | +| proximityRank | Similar to rank but also includes the proximity of the matches. | From 4bec1b496fb8e0a1f83095214d4d9f24ea35174b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:22:20 -0500 Subject: [PATCH 0797/1789] New translations ql-schema.mdx (Urdu (Pakistan)) --- .../developing/creating/ql-schema.mdx | 40 +++++++++---------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/website/src/pages/ur/subgraphs/developing/creating/ql-schema.mdx b/website/src/pages/ur/subgraphs/developing/creating/ql-schema.mdx index 833aee6e0499..7cecd1e9e746 100644 --- a/website/src/pages/ur/subgraphs/developing/creating/ql-schema.mdx +++ b/website/src/pages/ur/subgraphs/developing/creating/ql-schema.mdx @@ -4,7 +4,7 @@ title: The Graph QL Schema ## جائزہ -The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. +The schema for your Subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. > Note: If you've never written a GraphQL schema, it is recommended that you check out this primer on the GraphQL type system. Reference documentation for GraphQL schemas can be found in the [GraphQL API](/subgraphs/querying/graphql-api/) section. @@ -12,7 +12,7 @@ The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas ar Before defining entities, it is important to take a step back and think about how your data is structured and linked. -- All queries will be made against the data model defined in the subgraph schema. As a result, the design of the subgraph schema should be informed by the queries that your application will need to perform. +- All queries will be made against the data model defined in the Subgraph schema. As a result, the design of the Subgraph schema should be informed by the queries that your application will need to perform. - It may be useful to imagine entities as "objects containing data", rather than as events or functions. - You define entity types in `schema.graphql`, and Graph Node will generate top-level fields for querying single instances and collections of that entity type. - Each type that should be an entity is required to be annotated with an `@entity` directive. @@ -72,16 +72,16 @@ For some entity types the `id` for `Bytes!` is constructed from the id's of two The following scalars are supported in the GraphQL API: -| قسم | تفصیل | -| --- | --- | -| `Bytes` | Byte array، ایک ہیکساڈیسیمل سٹرنگ کے طور پر پیش کیا جاتا ہے. عام طور پر Ethereum hashes اور ایڈریسیس کے لیے استعمال ہوتا ہے. | -| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | -| `Boolean` | Scalar for `boolean` values. | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | -| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | +| قسم | تفصیل | +| ------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `Bytes` | Byte array، ایک ہیکساڈیسیمل سٹرنگ کے طور پر پیش کیا جاتا ہے. عام طور پر Ethereum hashes اور ایڈریسیس کے لیے استعمال ہوتا ہے. | +| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | +| `Boolean` | Scalar for `boolean` values. | +| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | +| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | +| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | +| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | ### Enums @@ -141,7 +141,7 @@ type TokenBalance @entity { Reverse lookups can be defined on an entity through the `@derivedFrom` field. This creates a virtual field on the entity that may be queried but cannot be set manually through the mappings API. Rather, it is derived from the relationship defined on the other entity. For such relationships, it rarely makes sense to store both sides of the relationship, and both indexing and query performance will be better when only one side is stored and the other is derived. -ون-ٹو-مینی تعلقات کے لیے، تعلق کو ہمیشہ 'ون' سائیڈ پر رکھنا چاہیے، اور 'مینی' سائیڈ کو ہمیشہ اخذ کیا جانا چاہیے۔ 'مینی' سائیڈ پر ہستیوں کی ایک ایرے کو ذخیرہ کرنے کے بجائے اس طرح سے تعلق کو ذخیرہ کرنے کے نتیجے میں سب گراف کی انڈیکسنگ اور کیوریز دونوں کے لیے نمایاں طور پر بہتر کارکردگی ہوگی۔ عام طور پر، ہستیوں کی ایریز کو ذخیرہ کرنے سے اتنا ہی گریز کیا جانا چاہیے جتنا کہ عملی ہو. +For one-to-many relationships, the relationship should always be stored on the 'one' side, and the 'many' side should always be derived. Storing the relationship this way, rather than storing an array of entities on the 'many' side, will result in dramatically better performance for both indexing and querying the Subgraph. In general, storing arrays of entities should be avoided as much as is practical. #### مثال @@ -160,7 +160,7 @@ type TokenBalance @entity { } ``` -Here is an example of how to write a mapping for a subgraph with reverse lookups: +Here is an example of how to write a mapping for a Subgraph with reverse lookups: ```typescript let token = new Token(event.address) // Create Token @@ -231,7 +231,7 @@ query usersWithOrganizations { } ``` -Many-to-many تعلقات کو ذخیرہ کرنے کے اس زیادہ وسیع طریقے کے نتیجے میں سب گراف کے لیے کم ڈیٹا ذخیرہ کیا جائے گا، اور اس لیے ایک سب گراف میں جو اکثر نمایاں طور پر انڈیکس اور کیوری کے لیے تیز تر ہوتا ہے. +This more elaborate way of storing many-to-many relationships will result in less data stored for the Subgraph, and therefore to a Subgraph that is often dramatically faster to index and to query. ### اسکیما میں کامینٹس شامل کرنا @@ -287,7 +287,7 @@ query { } ``` -> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the subgraph manifest. +> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the Subgraph manifest. ## تعاون یافتہ زبانیں ہیں @@ -318,7 +318,7 @@ query { نتائج ترتیب دینے کے لیے معاون الگورتھم: -| Algorithm | Description | -| ------------ | --------------------------------------------------------------------------- | -| rank | نتائج ترتیب دینے کے لیے فل ٹیکسٹ کیوری کے میچ کوالٹی (1-0) کا استعمال کریں. | -| قربت کا درجہ | Similar to rank but also includes the proximity of the matches. | +| Algorithm | Description | +| ------------- | --------------------------------------------------------------------------- | +| rank | نتائج ترتیب دینے کے لیے فل ٹیکسٹ کیوری کے میچ کوالٹی (1-0) کا استعمال کریں. | +| قربت کا درجہ | Similar to rank but also includes the proximity of the matches. | From 42a83e3449834df8e168bbfe3d6c945410050ab1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:22:21 -0500 Subject: [PATCH 0798/1789] New translations ql-schema.mdx (Vietnamese) --- .../developing/creating/ql-schema.mdx | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/website/src/pages/vi/subgraphs/developing/creating/ql-schema.mdx b/website/src/pages/vi/subgraphs/developing/creating/ql-schema.mdx index e0b62e6f5e8d..a24817836bc8 100644 --- a/website/src/pages/vi/subgraphs/developing/creating/ql-schema.mdx +++ b/website/src/pages/vi/subgraphs/developing/creating/ql-schema.mdx @@ -4,7 +4,7 @@ title: The Graph QL Schema ## Tổng quan -The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. +The schema for your Subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. > Note: If you've never written a GraphQL schema, it is recommended that you check out this primer on the GraphQL type system. Reference documentation for GraphQL schemas can be found in the [GraphQL API](/subgraphs/querying/graphql-api/) section. @@ -12,7 +12,7 @@ The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas ar Before defining entities, it is important to take a step back and think about how your data is structured and linked. -- All queries will be made against the data model defined in the subgraph schema. As a result, the design of the subgraph schema should be informed by the queries that your application will need to perform. +- All queries will be made against the data model defined in the Subgraph schema. As a result, the design of the Subgraph schema should be informed by the queries that your application will need to perform. - It may be useful to imagine entities as "objects containing data", rather than as events or functions. - You define entity types in `schema.graphql`, and Graph Node will generate top-level fields for querying single instances and collections of that entity type. - Each type that should be an entity is required to be annotated with an `@entity` directive. @@ -72,16 +72,16 @@ For some entity types the `id` for `Bytes!` is constructed from the id's of two The following scalars are supported in the GraphQL API: -| Loại | Miêu tả | -| --- | --- | -| `Bytes` | Mảng byte, được biểu diễn dưới dạng chuỗi thập lục phân. Thường được sử dụng cho các mã băm và địa chỉ Ethereum. | -| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | -| `Boolean` | Scalar for `boolean` values. | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | -| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | +| Loại | Miêu tả | +| ------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `Bytes` | Mảng byte, được biểu diễn dưới dạng chuỗi thập lục phân. Thường được sử dụng cho các mã băm và địa chỉ Ethereum. | +| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | +| `Boolean` | Scalar for `boolean` values. | +| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | +| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | +| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | +| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | ### Enums @@ -141,7 +141,7 @@ type TokenBalance @entity { Reverse lookups can be defined on an entity through the `@derivedFrom` field. This creates a virtual field on the entity that may be queried but cannot be set manually through the mappings API. Rather, it is derived from the relationship defined on the other entity. For such relationships, it rarely makes sense to store both sides of the relationship, and both indexing and query performance will be better when only one side is stored and the other is derived. -For one-to-many relationships, the relationship should always be stored on the 'one' side, and the 'many' side should always be derived. Storing the relationship this way, rather than storing an array of entities on the 'many' side, will result in dramatically better performance for both indexing and querying the subgraph. In general, storing arrays of entities should be avoided as much as is practical. +For one-to-many relationships, the relationship should always be stored on the 'one' side, and the 'many' side should always be derived. Storing the relationship this way, rather than storing an array of entities on the 'many' side, will result in dramatically better performance for both indexing and querying the Subgraph. In general, storing arrays of entities should be avoided as much as is practical. #### Ví dụ @@ -160,7 +160,7 @@ type TokenBalance @entity { } ``` -Here is an example of how to write a mapping for a subgraph with reverse lookups: +Here is an example of how to write a mapping for a Subgraph with reverse lookups: ```typescript let token = new Token(event.address) // Create Token @@ -231,7 +231,7 @@ query usersWithOrganizations { } ``` -This more elaborate way of storing many-to-many relationships will result in less data stored for the subgraph, and therefore to a subgraph that is often dramatically faster to index and to query. +This more elaborate way of storing many-to-many relationships will result in less data stored for the Subgraph, and therefore to a Subgraph that is often dramatically faster to index and to query. ### Adding comments to the schema @@ -287,7 +287,7 @@ query { } ``` -> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the subgraph manifest. +> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the Subgraph manifest. ## Các ngôn ngữ được hỗ trợ From f18ac95a4fd5a2a05c52d0559364667bb36f72bb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:22:22 -0500 Subject: [PATCH 0799/1789] New translations ql-schema.mdx (Marathi) --- .../developing/creating/ql-schema.mdx | 76 +++++++++---------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/website/src/pages/mr/subgraphs/developing/creating/ql-schema.mdx b/website/src/pages/mr/subgraphs/developing/creating/ql-schema.mdx index 0e96ef80d066..6af6f1fe497d 100644 --- a/website/src/pages/mr/subgraphs/developing/creating/ql-schema.mdx +++ b/website/src/pages/mr/subgraphs/developing/creating/ql-schema.mdx @@ -4,7 +4,7 @@ title: The Graph QL Schema ## सविश्लेषण -The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. +The schema for your Subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. > Note: If you've never written a GraphQL schema, it is recommended that you check out this primer on the GraphQL type system. Reference documentation for GraphQL schemas can be found in the [GraphQL API](/subgraphs/querying/graphql-api/) section. @@ -12,7 +12,7 @@ The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas ar Before defining entities, it is important to take a step back and think about how your data is structured and linked. -- All queries will be made against the data model defined in the subgraph schema. As a result, the design of the subgraph schema should be informed by the queries that your application will need to perform. +- All queries will be made against the data model defined in the Subgraph schema. As a result, the design of the Subgraph schema should be informed by the queries that your application will need to perform. - It may be useful to imagine entities as "objects containing data", rather than as events or functions. - You define entity types in `schema.graphql`, and Graph Node will generate top-level fields for querying single instances and collections of that entity type. - Each type that should be an entity is required to be annotated with an `@entity` directive. @@ -72,16 +72,16 @@ For some entity types the `id` for `Bytes!` is constructed from the id's of two The following scalars are supported in the GraphQL API: -| प्रकार | वर्णन | -| --- | --- | -| `Bytes` | Byte array, represented as a hexadecimal string. Commonly used for Ethereum hashes and addresses. | -| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | -| `Boolean` | Scalar for `boolean` values. | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | -| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | +| प्रकार | वर्णन | +| ------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `Bytes` | Byte array, represented as a hexadecimal string. Commonly used for Ethereum hashes and addresses. | +| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | +| `Boolean` | Scalar for `boolean` values. | +| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | +| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | +| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | +| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | ### एनम्स @@ -141,7 +141,7 @@ type TokenBalance @entity { Reverse lookups can be defined on an entity through the `@derivedFrom` field. This creates a virtual field on the entity that may be queried but cannot be set manually through the mappings API. Rather, it is derived from the relationship defined on the other entity. For such relationships, it rarely makes sense to store both sides of the relationship, and both indexing and query performance will be better when only one side is stored and the other is derived. -एक-ते-अनेक संबंधांसाठी, संबंध नेहमी 'एका' बाजूला साठवले पाहिजेत आणि 'अनेक' बाजू नेहमी काढल्या पाहिजेत. 'अनेक' बाजूंवर संस्थांचा अ‍ॅरे संचयित करण्याऐवजी अशा प्रकारे नातेसंबंध संचयित केल्याने, अनुक्रमणिका आणि सबग्राफ क्वेरी या दोन्हीसाठी नाटकीयरित्या चांगले कार्यप्रदर्शन होईल. सर्वसाधारणपणे, घटकांचे अ‍ॅरे संग्रहित करणे जितके व्यावहारिक आहे तितके टाळले पाहिजे. +For one-to-many relationships, the relationship should always be stored on the 'one' side, and the 'many' side should always be derived. Storing the relationship this way, rather than storing an array of entities on the 'many' side, will result in dramatically better performance for both indexing and querying the Subgraph. In general, storing arrays of entities should be avoided as much as is practical. #### उदाहरण @@ -160,7 +160,7 @@ type TokenBalance @entity { } ``` -Here is an example of how to write a mapping for a subgraph with reverse lookups: +Here is an example of how to write a mapping for a Subgraph with reverse lookups: ```typescript let token = new Token(event.address) // Create Token @@ -231,7 +231,7 @@ query usersWithOrganizations { } ``` -अनेक-ते-अनेक संबंध संचयित करण्याच्या या अधिक विस्तृत मार्गामुळे सबग्राफसाठी कमी डेटा संग्रहित केला जाईल आणि म्हणूनच अनुक्रमणिका आणि क्वेरीसाठी नाटकीयरित्या वेगवान असलेल्या सबग्राफमध्ये. +This more elaborate way of storing many-to-many relationships will result in less data stored for the Subgraph, and therefore to a Subgraph that is often dramatically faster to index and to query. ### स्कीमामध्ये टिप्पण्या जोडत आहे @@ -287,7 +287,7 @@ query { } ``` -> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the subgraph manifest. +> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the Subgraph manifest. ## भाषा समर्थित @@ -295,30 +295,30 @@ query { समर्थित भाषा शब्दकोश: -| Code | शब्दकोश | -| ---- | --------- | -| सोपे | General | -| da | Danish | -| nl | Dutch | -| en | English | -| fi | Finnish | -| fr | French | -| de | German | -| hu | Hungarian | -| it | Italian | -| no | Norwegian | -| pt | पोर्तुगीज | -| ro | Romanian | -| ru | Russian | -| es | Spanish | -| sv | Swedish | -| tr | Turkish | +| Code | शब्दकोश | +| ------ | ---------- | +| सोपे | General | +| da | Danish | +| nl | Dutch | +| en | English | +| fi | Finnish | +| fr | French | +| de | German | +| hu | Hungarian | +| it | Italian | +| no | Norwegian | +| pt | पोर्तुगीज | +| ro | Romanian | +| ru | Russian | +| es | Spanish | +| sv | Swedish | +| tr | Turkish | ### रँकिंग अल्गोरिदम परिणाम ऑर्डर करण्यासाठी समर्थित अल्गोरिदम: -| Algorithm | Description | -| ------------- | ---------------------------------------------------------------------- | -| rank | निकाल ऑर्डर करण्यासाठी फुलटेक्स्ट क्वेरीची जुळणी गुणवत्ता (0-1) वापरा. | -| proximityRank | Similar to rank but also includes the proximity of the matches. | +| Algorithm | Description | +| ------------- | ----------------------------------------------------------------------- | +| rank | निकाल ऑर्डर करण्यासाठी फुलटेक्स्ट क्वेरीची जुळणी गुणवत्ता (0-1) वापरा. | +| proximityRank | Similar to rank but also includes the proximity of the matches. | From cd6ab8ad0f55ef99b3cbc70c95dfcf4fd8ba67d1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:22:23 -0500 Subject: [PATCH 0800/1789] New translations ql-schema.mdx (Hindi) --- .../developing/creating/ql-schema.mdx | 68 +++++++++---------- 1 file changed, 34 insertions(+), 34 deletions(-) diff --git a/website/src/pages/hi/subgraphs/developing/creating/ql-schema.mdx b/website/src/pages/hi/subgraphs/developing/creating/ql-schema.mdx index 5c2b1f2037bc..1b53f4fac46c 100644 --- a/website/src/pages/hi/subgraphs/developing/creating/ql-schema.mdx +++ b/website/src/pages/hi/subgraphs/developing/creating/ql-schema.mdx @@ -4,7 +4,7 @@ title: The Graph QL Schema ## अवलोकन -The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. +The schema for your Subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. > Note: If you've never written a GraphQL schema, it is recommended that you check out this primer on the GraphQL type system. Reference documentation for GraphQL schemas can be found in the [GraphQL API](/subgraphs/querying/graphql-api/) section. @@ -12,7 +12,7 @@ The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas ar इससे पहले कि आप एन्टिटीज को परिभाषित करें, यह महत्वपूर्ण है कि आप एक कदम पीछे हटें और सोचें कि आपका डेटा कैसे संरचित और लिंक किया गया है। -- All queries will be made against the data model defined in the subgraph schema. As a result, the design of the subgraph schema should be informed by the queries that your application will need to perform. +- All queries will be made against the data model defined in the Subgraph schema. As a result, the design of the Subgraph schema should be informed by the queries that your application will need to perform. - यह उपयोगी हो सकता है कि संस्थाओं की कल्पना 'डेटा' समाहित करने वाले वस्तुओं के रूप में की जाए, न कि घटनाओं या कार्यों के रूप में। - You define entity types in `schema.graphql`, and Graph Node will generate top-level fields for querying single instances and collections of that entity type. - Each type that should be an entity is required to be annotated with an `@entity` directive. @@ -72,16 +72,16 @@ For some entity types the `id` for `Bytes!` is constructed from the id's of two नीचे दिए गए स्केलर्स GraphQL API में समर्थित हैं: -| प्रकार | Description | -| --- | --- | -| `Bytes` | बाइट सरणी, एक हेक्साडेसिमल स्ट्रिंग के रूप में दर्शाया गया है। आमतौर पर एथेरियम हैश और पतों के लिए उपयोग किया जाता है। | -| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | -| `Boolean` | Scalar for `boolean` values. | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | -| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | +| प्रकार | Description | +| ------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `Bytes` | बाइट सरणी, एक हेक्साडेसिमल स्ट्रिंग के रूप में दर्शाया गया है। आमतौर पर एथेरियम हैश और पतों के लिए उपयोग किया जाता है। | +| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | +| `Boolean` | Scalar for `boolean` values. | +| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | +| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | +| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | +| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | ### Enums @@ -141,7 +141,7 @@ type TokenBalance @entity { Reverse lookups can be defined on an entity through the `@derivedFrom` field. This creates a virtual field on the entity that may be queried but cannot be set manually through the mappings API. Rather, it is derived from the relationship defined on the other entity. For such relationships, it rarely makes sense to store both sides of the relationship, and both indexing and query performance will be better when only one side is stored and the other is derived. -एक-से-अनेक संबंधों के लिए, संबंध को हमेशा 'एक' पक्ष में संग्रहीत किया जाना चाहिए, और 'अनेक' पक्ष हमेशा निकाला जाना चाहिए। संबंधों को इस तरह से संग्रहीत करने के बजाय, 'अनेक' पक्ष पर संस्थाओं की एक सरणी संग्रहीत करने के परिणामस्वरूप, सबग्राफ को अनुक्रमित करने और क्वेरी करने दोनों के लिए नाटकीय रूप से बेहतर प्रदर्शन होगा। सामान्य तौर पर, संस्थाओं की सरणियों को संग्रहीत करने से जितना संभव हो उतना बचा जाना चाहिए। +For one-to-many relationships, the relationship should always be stored on the 'one' side, and the 'many' side should always be derived. Storing the relationship this way, rather than storing an array of entities on the 'many' side, will result in dramatically better performance for both indexing and querying the Subgraph. In general, storing arrays of entities should be avoided as much as is practical. #### उदाहरण @@ -160,7 +160,7 @@ type TokenBalance @entity { } ``` -Here is an example of how to write a mapping for a subgraph with reverse lookups: +Here is an example of how to write a mapping for a Subgraph with reverse lookups: ```typescript let token = new Token(event.address) // Create Token @@ -231,7 +231,7 @@ query usersWithOrganizations { } ``` -मैनी-टू-मैनी संबंधों को संग्रहीत करने के इस अधिक विस्तृत तरीके के परिणामस्वरूप सबग्राफ के लिए कम डेटा संग्रहीत होगा, और इसलिए एक सबग्राफ के लिए जो अक्सर इंडेक्स और क्वेरी के लिए नाटकीय रूप से तेज़ होता है। +This more elaborate way of storing many-to-many relationships will result in less data stored for the Subgraph, and therefore to a Subgraph that is often dramatically faster to index and to query. ### स्कीमा में टिप्पणियां जोड़ना @@ -287,7 +287,7 @@ query { } ``` -> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the subgraph manifest. +> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the Subgraph manifest. ## भाषाओं का समर्थन किया @@ -295,24 +295,24 @@ query { समर्थित भाषा शब्दकोश: -| Code | शब्दकोष | -| ------ | --------- | -| simple | General | -| da | Danish | -| nl | Dutch | -| en | English | -| fi | Finnish | -| fr | French | -| de | German | -| hu | Hungarian | -| it | Italian | -| no | Norwegian | -| pt | पुर्तगाली | -| ro | Romanian | -| ru | Russian | -| es | Spanish | -| sv | Swedish | -| tr | Turkish | +| Code | शब्दकोष | +| ------ | ---------- | +| simple | General | +| da | Danish | +| nl | Dutch | +| en | English | +| fi | Finnish | +| fr | French | +| de | German | +| hu | Hungarian | +| it | Italian | +| no | Norwegian | +| pt | पुर्तगाली | +| ro | Romanian | +| ru | Russian | +| es | Spanish | +| sv | Swedish | +| tr | Turkish | ### रैंकिंग एल्गोरिदम From ca01d2ff368e6ece0d9b0da1eb6360a2763cb9c7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:22:24 -0500 Subject: [PATCH 0801/1789] New translations ql-schema.mdx (Swahili) --- .../developing/creating/ql-schema.mdx | 324 ++++++++++++++++++ 1 file changed, 324 insertions(+) create mode 100644 website/src/pages/sw/subgraphs/developing/creating/ql-schema.mdx diff --git a/website/src/pages/sw/subgraphs/developing/creating/ql-schema.mdx b/website/src/pages/sw/subgraphs/developing/creating/ql-schema.mdx new file mode 100644 index 000000000000..2eb805320753 --- /dev/null +++ b/website/src/pages/sw/subgraphs/developing/creating/ql-schema.mdx @@ -0,0 +1,324 @@ +--- +title: The Graph QL Schema +--- + +## Overview + +The schema for your Subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. + +> Note: If you've never written a GraphQL schema, it is recommended that you check out this primer on the GraphQL type system. Reference documentation for GraphQL schemas can be found in the [GraphQL API](/subgraphs/querying/graphql-api/) section. + +### Defining Entities + +Before defining entities, it is important to take a step back and think about how your data is structured and linked. + +- All queries will be made against the data model defined in the Subgraph schema. As a result, the design of the Subgraph schema should be informed by the queries that your application will need to perform. +- It may be useful to imagine entities as "objects containing data", rather than as events or functions. +- You define entity types in `schema.graphql`, and Graph Node will generate top-level fields for querying single instances and collections of that entity type. +- Each type that should be an entity is required to be annotated with an `@entity` directive. +- By default, entities are mutable, meaning that mappings can load existing entities, modify them and store a new version of that entity. + - Mutability comes at a price, so for entity types that will never be modified, such as those containing data extracted verbatim from the chain, it is recommended to mark them as immutable with `@entity(immutable: true)`. + - If changes happen in the same block in which the entity was created, then mappings can make changes to immutable entities. Immutable entities are much faster to write and to query so they should be used whenever possible. + +#### Good Example + +The following `Gravatar` entity is structured around a Gravatar object and is a good example of how an entity could be defined. + +```graphql +type Gravatar @entity(immutable: true) { + id: Bytes! + owner: Bytes + displayName: String + imageUrl: String + accepted: Boolean +} +``` + +#### Bad Example + +The following example `GravatarAccepted` and `GravatarDeclined` entities are based around events. It is not recommended to map events or function calls to entities 1:1. + +```graphql +type GravatarAccepted @entity { + id: Bytes! + owner: Bytes + displayName: String + imageUrl: String +} + +type GravatarDeclined @entity { + id: Bytes! + owner: Bytes + displayName: String + imageUrl: String +} +``` + +#### Optional and Required Fields + +Entity fields can be defined as required or optional. Required fields are indicated by the `!` in the schema. If the field is a scalar field, you get an error when you try to store the entity. If the field references another entity then you get this error: + +``` +Null value resolved for non-null field 'name' +``` + +Each entity must have an `id` field, which must be of type `Bytes!` or `String!`. It is generally recommended to use `Bytes!`, unless the `id` contains human-readable text, since entities with `Bytes!` id's will be faster to write and query as those with a `String!` `id`. The `id` field serves as the primary key, and needs to be unique among all entities of the same type. For historical reasons, the type `ID!` is also accepted and is a synonym for `String!`. + +For some entity types the `id` for `Bytes!` is constructed from the id's of two other entities; that is possible using `concat`, e.g., `let id = left.id.concat(right.id) ` to form the id from the id's of `left` and `right`. Similarly, to construct an id from the id of an existing entity and a counter `count`, `let id = left.id.concatI32(count)` can be used. The concatenation is guaranteed to produce unique id's as long as the length of `left` is the same for all such entities, for example, because `left.id` is an `Address`. + +### Built-In Scalar Types + +#### GraphQL Supported Scalars + +The following scalars are supported in the GraphQL API: + +| Type | Description | +| ------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `Bytes` | Byte array, represented as a hexadecimal string. Commonly used for Ethereum hashes and addresses. | +| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | +| `Boolean` | Scalar for `boolean` values. | +| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | +| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | +| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | +| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | + +### Enums + +You can also create enums within a schema. Enums have the following syntax: + +```graphql +enum TokenStatus { + OriginalOwner + SecondOwner + ThirdOwner +} +``` + +Once the enum is defined in the schema, you can use the string representation of the enum value to set an enum field on an entity. For example, you can set the `tokenStatus` to `SecondOwner` by first defining your entity and subsequently setting the field with `entity.tokenStatus = "SecondOwner"`. The example below demonstrates what the Token entity would look like with an enum field: + +More detail on writing enums can be found in the [GraphQL documentation](https://graphql.org/learn/schema/). + +### Entity Relationships + +An entity may have a relationship to one or more other entities in your schema. These relationships may be traversed in your queries. Relationships in The Graph are unidirectional. It is possible to simulate bidirectional relationships by defining a unidirectional relationship on either "end" of the relationship. + +Relationships are defined on entities just like any other field except that the type specified is that of another entity. + +#### One-To-One Relationships + +Define a `Transaction` entity type with an optional one-to-one relationship with a `TransactionReceipt` entity type: + +```graphql +type Transaction @entity(immutable: true) { + id: Bytes! + transactionReceipt: TransactionReceipt +} + +type TransactionReceipt @entity(immutable: true) { + id: Bytes! + transaction: Transaction +} +``` + +#### One-To-Many Relationships + +Define a `TokenBalance` entity type with a required one-to-many relationship with a Token entity type: + +```graphql +type Token @entity(immutable: true) { + id: Bytes! +} + +type TokenBalance @entity { + id: Bytes! + amount: Int! + token: Token! +} +``` + +### Reverse Lookups + +Reverse lookups can be defined on an entity through the `@derivedFrom` field. This creates a virtual field on the entity that may be queried but cannot be set manually through the mappings API. Rather, it is derived from the relationship defined on the other entity. For such relationships, it rarely makes sense to store both sides of the relationship, and both indexing and query performance will be better when only one side is stored and the other is derived. + +For one-to-many relationships, the relationship should always be stored on the 'one' side, and the 'many' side should always be derived. Storing the relationship this way, rather than storing an array of entities on the 'many' side, will result in dramatically better performance for both indexing and querying the Subgraph. In general, storing arrays of entities should be avoided as much as is practical. + +#### Example + +We can make the balances for a token accessible from the token by deriving a `tokenBalances` field: + +```graphql +type Token @entity(immutable: true) { + id: Bytes! + tokenBalances: [TokenBalance!]! @derivedFrom(field: "token") +} + +type TokenBalance @entity { + id: Bytes! + amount: Int! + token: Token! +} +``` + +Here is an example of how to write a mapping for a Subgraph with reverse lookups: + +```typescript +let token = new Token(event.address) // Create Token +token.save() // tokenBalances is derived automatically + +let tokenBalance = new TokenBalance(event.address) +tokenBalance.amount = BigInt.fromI32(0) +tokenBalance.token = token.id // Reference stored here +tokenBalance.save() +``` + +#### Many-To-Many Relationships + +For many-to-many relationships, such as users that each may belong to any number of organizations, the most straightforward, but generally not the most performant, way to model the relationship is as an array in each of the two entities involved. If the relationship is symmetric, only one side of the relationship needs to be stored and the other side can be derived. + +#### Example + +Define a reverse lookup from a `User` entity type to an `Organization` entity type. In the example below, this is achieved by looking up the `members` attribute from within the `Organization` entity. In queries, the `organizations` field on `User` will be resolved by finding all `Organization` entities that include the user's ID. + +```graphql +type Organization @entity { + id: Bytes! + name: String! + members: [User!]! +} + +type User @entity { + id: Bytes! + name: String! + organizations: [Organization!]! @derivedFrom(field: "members") +} +``` + +A more performant way to store this relationship is through a mapping table that has one entry for each `User` / `Organization` pair with a schema like + +```graphql +type Organization @entity { + id: Bytes! + name: String! + members: [UserOrganization!]! @derivedFrom(field: "organization") +} + +type User @entity { + id: Bytes! + name: String! + organizations: [UserOrganization!] @derivedFrom(field: "user") +} + +type UserOrganization @entity { + id: Bytes! # Set to `user.id.concat(organization.id)` + user: User! + organization: Organization! +} +``` + +This approach requires that queries descend into one additional level to retrieve, for example, the organizations for users: + +```graphql +query usersWithOrganizations { + users { + organizations { + # this is a UserOrganization entity + organization { + name + } + } + } +} +``` + +This more elaborate way of storing many-to-many relationships will result in less data stored for the Subgraph, and therefore to a Subgraph that is often dramatically faster to index and to query. + +### Adding comments to the schema + +As per GraphQL spec, comments can be added above schema entity attributes using the hash symbol `#`. This is illustrated in the example below: + +```graphql +type MyFirstEntity @entity { + # unique identifier and primary key of the entity + id: Bytes! + address: Bytes! +} +``` + +## Defining Fulltext Search Fields + +Fulltext search queries filter and rank entities based on a text search input. Fulltext queries are able to return matches for similar words by processing the query text input into stems before comparing them to the indexed text data. + +A fulltext query definition includes the query name, the language dictionary used to process the text fields, the ranking algorithm used to order the results, and the fields included in the search. Each fulltext query may span multiple fields, but all included fields must be from a single entity type. + +To add a fulltext query, include a `_Schema_` type with a fulltext directive in the GraphQL schema. + +```graphql +type _Schema_ + @fulltext( + name: "bandSearch" + language: en + algorithm: rank + include: [{ entity: "Band", fields: [{ name: "name" }, { name: "description" }, { name: "bio" }] }] + ) + +type Band @entity { + id: Bytes! + name: String! + description: String! + bio: String + wallet: Address + labels: [Label!]! + discography: [Album!]! + members: [Musician!]! +} +``` + +The example `bandSearch` field can be used in queries to filter `Band` entities based on the text documents in the `name`, `description`, and `bio` fields. Jump to [GraphQL API - Queries](/subgraphs/querying/graphql-api/#queries) for a description of the fulltext search API and more example usage. + +```graphql +query { + bandSearch(text: "breaks & electro & detroit") { + id + name + description + wallet + } +} +``` + +> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the Subgraph manifest. + +## Languages supported + +Choosing a different language will have a definitive, though sometimes subtle, effect on the fulltext search API. Fields covered by a fulltext query field are examined in the context of the chosen language, so the lexemes produced by analysis and search queries vary from language to language. For example: when using the supported Turkish dictionary "token" is stemmed to "toke" while, of course, the English dictionary will stem it to "token". + +Supported language dictionaries: + +| Code | Dictionary | +| ------ | ---------- | +| simple | General | +| da | Danish | +| nl | Dutch | +| en | English | +| fi | Finnish | +| fr | French | +| de | German | +| hu | Hungarian | +| it | Italian | +| no | Norwegian | +| pt | Portuguese | +| ro | Romanian | +| ru | Russian | +| es | Spanish | +| sv | Swedish | +| tr | Turkish | + +### Ranking Algorithms + +Supported algorithms for ordering results: + +| Algorithm | Description | +| ------------- | ----------------------------------------------------------------------- | +| rank | Use the match quality (0-1) of the fulltext query to order the results. | +| proximityRank | Similar to rank but also includes the proximity of the matches. | From 255de031124ad2e1434fc8c3c28ad89f4777781e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:22:26 -0500 Subject: [PATCH 0802/1789] New translations subgraph-manifest.mdx (Romanian) --- .../developing/creating/subgraph-manifest.mdx | 88 +++++++++++-------- 1 file changed, 51 insertions(+), 37 deletions(-) diff --git a/website/src/pages/ro/subgraphs/developing/creating/subgraph-manifest.mdx b/website/src/pages/ro/subgraphs/developing/creating/subgraph-manifest.mdx index a42a50973690..428ff5332baf 100644 --- a/website/src/pages/ro/subgraphs/developing/creating/subgraph-manifest.mdx +++ b/website/src/pages/ro/subgraphs/developing/creating/subgraph-manifest.mdx @@ -4,19 +4,19 @@ title: Subgraph Manifest ## Overview -The subgraph manifest, `subgraph.yaml`, defines the smart contracts & network your subgraph will index, the events from these contracts to pay attention to, and how to map event data to entities that Graph Node stores and allows to query. +The Subgraph manifest, `subgraph.yaml`, defines the smart contracts & network your Subgraph will index, the events from these contracts to pay attention to, and how to map event data to entities that Graph Node stores and allows to query. -The **subgraph definition** consists of the following files: +The **Subgraph definition** consists of the following files: -- `subgraph.yaml`: Contains the subgraph manifest +- `subgraph.yaml`: Contains the Subgraph manifest -- `schema.graphql`: A GraphQL schema defining the data stored for your subgraph and how to query it via GraphQL +- `schema.graphql`: A GraphQL schema defining the data stored for your Subgraph and how to query it via GraphQL - `mapping.ts`: [AssemblyScript Mappings](https://github.com/AssemblyScript/assemblyscript) code that translates event data into entities defined in your schema (e.g. `mapping.ts` in this guide) ### Subgraph Capabilities -A single subgraph can: +A single Subgraph can: - Index data from multiple smart contracts (but not multiple networks). @@ -24,9 +24,9 @@ A single subgraph can: - Add an entry for each contract that requires indexing to the `dataSources` array. -The full specification for subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). +The full specification for Subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). -For the example subgraph listed above, `subgraph.yaml` is: +For the example Subgraph listed above, `subgraph.yaml` is: ```yaml specVersion: 0.0.4 @@ -79,47 +79,47 @@ dataSources: ## Subgraph Entries -> Important Note: Be sure you populate your subgraph manifest with all handlers and [entities](/subgraphs/developing/creating/ql-schema/). +> Important Note: Be sure you populate your Subgraph manifest with all handlers and [entities](/subgraphs/developing/creating/ql-schema/). The important entries to update for the manifest are: -- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. +- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the Subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. -- `description`: a human-readable description of what the subgraph is. This description is displayed in Graph Explorer when the subgraph is deployed to Subgraph Studio. +- `description`: a human-readable description of what the Subgraph is. This description is displayed in Graph Explorer when the Subgraph is deployed to Subgraph Studio. -- `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed in Graph Explorer. +- `repository`: the URL of the repository where the Subgraph manifest can be found. This is also displayed in Graph Explorer. - `features`: a list of all used [feature](#experimental-features) names. -- `indexerHints.prune`: Defines the retention of historical block data for a subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. +- `indexerHints.prune`: Defines the retention of historical block data for a Subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. -- `dataSources.source`: the address of the smart contract the subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. +- `dataSources.source`: the address of the smart contract the Subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. - `dataSources.source.startBlock`: the optional number of the block that the data source starts indexing from. In most cases, we suggest using the block in which the contract was created. - `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. -- `dataSources.context`: key-value pairs that can be used within subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for subgraph development. +- `dataSources.context`: key-value pairs that can be used within Subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for Subgraph development. - `dataSources.mapping.entities`: the entities that the data source writes to the store. The schema for each entity is defined in the schema.graphql file. - `dataSources.mapping.abis`: one or more named ABI files for the source contract as well as any other smart contracts that you interact with from within the mappings. -- `dataSources.mapping.eventHandlers`: lists the smart contract events this subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. +- `dataSources.mapping.eventHandlers`: lists the smart contract events this Subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. -- `dataSources.mapping.callHandlers`: lists the smart contract functions this subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. +- `dataSources.mapping.callHandlers`: lists the smart contract functions this Subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. -- `dataSources.mapping.blockHandlers`: lists the blocks this subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. +- `dataSources.mapping.blockHandlers`: lists the blocks this Subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. -A single subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. +A single Subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. ## Event Handlers -Event handlers in a subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the subgraph's manifest. This enables subgraphs to process and store event data according to defined logic. +Event handlers in a Subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the Subgraph's manifest. This enables Subgraphs to process and store event data according to defined logic. ### Defining an Event Handler -An event handler is declared within a data source in the subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. +An event handler is declared within a data source in the Subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. ```yaml dataSources: @@ -149,11 +149,11 @@ dataSources: ## Call Handlers -While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. +While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a Subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. Call handlers will only trigger in one of two cases: when the function specified is called by an account other than the contract itself or when it is marked as external in Solidity and called as part of another function in the same contract. -> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. +> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a Subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. ### Defining a Call Handler @@ -186,7 +186,7 @@ The `function` is the normalized function signature to filter calls by. The `han ### Mapping Function -Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: +Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example Subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: ```typescript import { CreateGravatarCall } from '../generated/Gravity/Gravity' @@ -205,7 +205,7 @@ The `handleCreateGravatar` function takes a new `CreateGravatarCall` which is a ## Block Handlers -In addition to subscribing to contract events or function calls, a subgraph may want to update its data as new blocks are appended to the chain. To achieve this a subgraph can run a function after every block or after blocks that match a pre-defined filter. +In addition to subscribing to contract events or function calls, a Subgraph may want to update its data as new blocks are appended to the chain. To achieve this a Subgraph can run a function after every block or after blocks that match a pre-defined filter. ### Supported Filters @@ -218,7 +218,7 @@ filter: _The defined handler will be called once for every block which contains a call to the contract (data source) the handler is defined under._ -> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. +> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a Subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. The absence of a filter for a block handler will ensure that the handler is called every block. A data source can only contain one block handler for each filter type. @@ -261,7 +261,7 @@ blockHandlers: every: 10 ``` -The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the subgraph to perform specific operations at regular block intervals. +The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the Subgraph to perform specific operations at regular block intervals. #### Once Filter @@ -276,7 +276,7 @@ blockHandlers: kind: once ``` -The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. +The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the Subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. ```ts export function handleOnce(block: ethereum.Block): void { @@ -288,7 +288,7 @@ export function handleOnce(block: ethereum.Block): void { ### Mapping Function -The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing subgraph entities in the store, call smart contracts and create or update entities. +The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing Subgraph entities in the store, call smart contracts and create or update entities. ```typescript import { ethereum } from '@graphprotocol/graph-ts' @@ -317,7 +317,7 @@ An event will only be triggered when both the signature and topic 0 match. By de Starting from `specVersion` `0.0.5` and `apiVersion` `0.0.7`, event handlers can have access to the receipt for the transaction which emitted them. -To do so, event handlers must be declared in the subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. +To do so, event handlers must be declared in the Subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. ```yaml eventHandlers: @@ -454,7 +454,7 @@ There are setters and getters like `setString` and `getString` for all value typ ## Start Blocks -The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. +The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a Subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. ```yaml dataSources: @@ -488,13 +488,13 @@ dataSources: ## Indexer Hints -The `indexerHints` setting in a subgraph's manifest provides directives for indexers on processing and managing a subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. +The `indexerHints` setting in a Subgraph's manifest provides directives for indexers on processing and managing a Subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. > This feature is available from `specVersion: 1.0.0` ### Prune -`indexerHints.prune`: Defines the retention of historical block data for a subgraph. Options include: +`indexerHints.prune`: Defines the retention of historical block data for a Subgraph. Options include: 1. `"never"`: No pruning of historical data; retains the entire history. 2. `"auto"`: Retains the minimum necessary history as set by the indexer, optimizing query performance. @@ -505,19 +505,19 @@ The `indexerHints` setting in a subgraph's manifest provides directives for inde prune: auto ``` -> The term "history" in this context of subgraphs is about storing data that reflects the old states of mutable entities. +> The term "history" in this context of Subgraphs is about storing data that reflects the old states of mutable entities. History as of a given block is required for: -- [Time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the subgraph's history -- Using the subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another subgraph, at that block -- Rewinding the subgraph back to that block +- [Time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the Subgraph's history +- Using the Subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another Subgraph, at that block +- Rewinding the Subgraph back to that block If historical data as of the block has been pruned, the above capabilities will not be available. > Using `"auto"` is generally recommended as it maximizes query performance and is sufficient for most users who do not require access to extensive historical data. -For subgraphs leveraging [time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your subgraph's settings: +For Subgraphs leveraging [time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your Subgraph's settings: To retain a specific amount of historical data: @@ -532,3 +532,17 @@ To preserve the complete history of entity states: indexerHints: prune: never ``` + +## SpecVersion Releases + +| Version | Release notes | +| :-----: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune Subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From 32057fd726630f4de053ea9b7393c8fdf8577303 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:22:27 -0500 Subject: [PATCH 0803/1789] New translations subgraph-manifest.mdx (French) --- .../developing/creating/subgraph-manifest.mdx | 88 +++++++++++-------- 1 file changed, 51 insertions(+), 37 deletions(-) diff --git a/website/src/pages/fr/subgraphs/developing/creating/subgraph-manifest.mdx b/website/src/pages/fr/subgraphs/developing/creating/subgraph-manifest.mdx index f3b29bd0de75..b4aa7e5f5ac9 100644 --- a/website/src/pages/fr/subgraphs/developing/creating/subgraph-manifest.mdx +++ b/website/src/pages/fr/subgraphs/developing/creating/subgraph-manifest.mdx @@ -4,19 +4,19 @@ title: Manifeste de Subgraph ## Aperçu -Le manifeste du subgraph, `subgraph.yaml`, définit les contrats intelligents et le réseau que votre subgraph va indexer, les événements de ces contrats auxquels il faut prêter attention, et comment faire correspondre les données d'événements aux entités que Graph Node stocke et permet d'interroger. +The Subgraph manifest, `subgraph.yaml`, defines the smart contracts & network your Subgraph will index, the events from these contracts to pay attention to, and how to map event data to entities that Graph Node stores and allows to query. -La **définition du subgraph** se compose des fichiers suivants : +The **Subgraph definition** consists of the following files: -- `subgraph.yaml` : Contient le manifeste du subgraph +- `subgraph.yaml`: Contains the Subgraph manifest -- `schema.graphql` : Un schéma GraphQL définissant les données stockées pour votre subgraph et comment les interroger via GraphQL +- `schema.graphql`: A GraphQL schema defining the data stored for your Subgraph and how to query it via GraphQL - `mapping.ts` : [Mappage AssemblyScript](https://github.com/AssemblyScript/assemblyscript) code qui traduit les données d'événements en entités définies dans votre schéma (par exemple `mapping.ts` dans ce guide) ### Capacités des subgraphs -Un seul subgraph peut : +A single Subgraph can: - Indexer les données de plusieurs contrats intelligents (mais pas de plusieurs réseaux). @@ -24,9 +24,9 @@ Un seul subgraph peut : - Ajouter une entrée pour chaque contrat nécessitant une indexation dans le tableau `dataSources`. -La spécification complète des manifestes de subgraphs est disponible [ici](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). +The full specification for Subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). -Pour l'exemple de subgraph cité ci-dessus, `subgraph.yaml` est : +For the example Subgraph listed above, `subgraph.yaml` is: ```yaml version spec : 0.0.4 @@ -79,47 +79,47 @@ les sources de données: ## Entrées de subgraphs -> Remarque importante : veillez à remplir le manifeste de votre subgraph avec tous les gestionnaires et [entités](/subgraphs/developing/creating/ql-schema/). +> Important Note: Be sure you populate your Subgraph manifest with all handlers and [entities](/subgraphs/developing/creating/ql-schema/). Les entrées importantes à mettre à jour pour le manifeste sont : -- `specVersion` : une version de semver qui identifie la structure du manifeste et les fonctionnalités supportées pour le subgraph. La dernière version est `1.2.0`. Voir la section [versions de specVersion](#specversion-releases) pour plus de détails sur les fonctionnalités et les versions. +- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the Subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. -- `description` : une description lisible par l'homme de ce qu'est le subgraph. Cette description est affichée dans Graph Explorer lorsque le subgraph est déployé dans Subgraph Studio. +- `description`: a human-readable description of what the Subgraph is. This description is displayed in Graph Explorer when the Subgraph is deployed to Subgraph Studio. -- `repository` : l'URL du dépôt où le manifeste du subgraph peut être trouvé. Cette URL est également affichée dans Graph Explorer. +- `repository`: the URL of the repository where the Subgraph manifest can be found. This is also displayed in Graph Explorer. - `features` : une liste de tous les noms de [fonctionnalités](#experimental-features) utilisés. -- `indexerHints.prune` : Définit la conservation des données de blocs historiques pour un subgraph. Voir [prune](#prune) dans la section [indexerHints](#indexer-hints). +- `indexerHints.prune`: Defines the retention of historical block data for a Subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. -- `dataSources.source` : l'adresse du contrat intelligent dont le subgraph est issu, et l'ABI du contrat intelligent à utiliser. L'adresse est optionnelle ; l'omettre permet d'indexer les événements correspondants de tous les contrats. +- `dataSources.source`: the address of the smart contract the Subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. - `dataSources.source.startBlock` : le numéro optionnel du bloc à partir duquel la source de données commence l'indexation. Dans la plupart des cas, nous suggérons d'utiliser le bloc dans lequel le contrat a été créé. - `dataSources.source.endBlock` : Le numéro optionnel du bloc sur lequel la source de données arrête l'indexation, y compris ce bloc. Version minimale de la spécification requise : `0.0.9`. -- `dataSources.context` : paires clé-valeur qui peuvent être utilisées dans les mappages de subgraphs. Supporte différents types de données comme `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, et `BigInt`. Chaque variable doit spécifier son `type` et ses `données`. Ces variables de contexte sont ensuite accessibles dans les fichiers de mappage, offrant plus d'options configurables pour le développement de subgraphs. +- `dataSources.context`: key-value pairs that can be used within Subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for Subgraph development. - `dataSources.mapping.entities` : les entités que la source de données écrit dans le store. Le schéma de chaque entité est défini dans le fichier schema.graphql. - `dataSources.mapping.abis` : un ou plusieurs fichiers ABI nommés pour le contrat source ainsi que pour tous les autres contrats intelligents avec lesquels vous interagissez à partir des mappages. -- `dataSources.mapping.eventHandlers` : liste les événements du contrat intelligent auxquels ce subgraph réagit et les gestionnaires dans le mappage - ./src/mapping.ts dans l'exemple - qui transforment ces événements en entités dans le store. +- `dataSources.mapping.eventHandlers`: lists the smart contract events this Subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. -- `dataSources.mapping.callHandlers` : liste les fonctions de contrat intelligent auxquelles ce subgraph réagit et les handlers dans le mappage qui transforment les entrées et sorties des appels de fonction en entités dans le store. +- `dataSources.mapping.callHandlers`: lists the smart contract functions this Subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. -- `dataSources.mapping.blockHandlers` : liste les blocs auxquels ce subgraph réagit et les gestionnaires du mappage à exécuter lorsqu'un bloc est ajouté à la blockchain. Sans filtre, le gestionnaire de bloc sera exécuté à chaque bloc. Un filtre d'appel optionnel peut être fourni en ajoutant un champ `filter` avec `kind : call` au gestionnaire. Ceci ne lancera le gestionnaire que si le bloc contient au moins un appel au contrat de la source de données. +- `dataSources.mapping.blockHandlers`: lists the blocks this Subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. -Un seul subgraph peut indexer des données provenant de plusieurs contrats intelligents. Ajoutez une entrée pour chaque contrat dont les données doivent être indexées dans le tableau `dataSources`. +A single Subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. ## Gestionnaires d'événements -Les gestionnaires d'événements dans un subgraph réagissent à des événements spécifiques émis par des contrats intelligents sur la blockchain et déclenchent des gestionnaires définis dans le manifeste du subgraph. Ceci permet aux subgraphs de traiter et de stocker les données des événements selon une logique définie. +Event handlers in a Subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the Subgraph's manifest. This enables Subgraphs to process and store event data according to defined logic. ### Définition d'un gestionnaire d'événements -Un gestionnaire d'événements est déclaré dans une source de données dans la configuration YAML du subgraph. Il spécifie quels événements écouter et la fonction correspondante à exécuter lorsque ces événements sont détectés. +An event handler is declared within a data source in the Subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. ```yaml dataSources: @@ -149,11 +149,11 @@ dataSources: ## Gestionnaires d'appels -Si les événements constituent un moyen efficace de collecter les modifications pertinentes de l'état d'un contrat, de nombreux contrats évitent de générer des logs afin d'optimiser les coûts de gaz. Dans ce cas, un subgraph peut s'abonner aux appels faits au contrat de source de données. Pour ce faire, il suffit de définir des gestionnaires d'appels faisant référence à la signature de la fonction et au gestionnaire de mappage qui traitera les appels à cette fonction. Pour traiter ces appels, le gestionnaire de mappage recevra un `ethereum.Call` comme argument avec les entrées et sorties typées de l'appel. Les appels effectués à n'importe quel niveau de la blockchain d'appels d'une transaction déclencheront le mappage, ce qui permettra de capturer l'activité avec le contrat de source de données par le biais de contrats proxy. +While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a Subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. Les gestionnaires d'appels ne se déclencheront que dans l'un des deux cas suivants : lorsque la fonction spécifiée est appelée par un compte autre que le contrat lui-même ou lorsqu'elle est marquée comme externe dans Solidity et appelée dans le cadre d'une autre fonction du même contrat. -> **Note:** Les gestionnaires d'appels dépendent actuellement de l'API de traçage de Parité. Certains réseaux, tels que BNB chain et Arbitrum, ne supportent pas cette API. Si un subgraph indexant l'un de ces réseaux contient un ou plusieurs gestionnaires d'appels, il ne commencera pas à se synchroniser. Les développeurs de subgraphs devraient plutôt utiliser des gestionnaires d'événements. Ceux-ci sont bien plus performants que les gestionnaires d'appels et sont pris en charge par tous les réseaux evm. +> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a Subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. ### Définir un gestionnaire d'appels @@ -186,7 +186,7 @@ La propriété `function` est la signature de la fonction normalisée pour filtr ### Fonction de cartographie -Chaque gestionnaire d'appel prend un seul paramètre qui a un type correspondant au nom de la fonction appelée. Dans l'exemple du subgraph ci-dessus, le mapping contient un gestionnaire d'appel lorsque la fonction `createGravatar` est appelée et reçoit un paramètre `CreateGravatarCall` en tant qu'argument : +Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example Subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: ```typescript import { CreateGravatarCall } from '../generated/Gravity/Gravity' @@ -205,7 +205,7 @@ La fonction `handleCreateGravatar` prend un nouveau `CreateGravatarCall` qui est ## Block Handlers -En plus de s'abonner à des événements de contrat ou à des appels de fonction, un subgraph peut souhaiter mettre à jour ses données à mesure que de nouveaux blocs sont ajoutés à la chaîne. Pour y parvenir, un subgraph peut exécuter une fonction après chaque bloc ou après des blocs correspondant à un filtre prédéfini. +In addition to subscribing to contract events or function calls, a Subgraph may want to update its data as new blocks are appended to the chain. To achieve this a Subgraph can run a function after every block or after blocks that match a pre-defined filter. ### Filtres pris en charge @@ -218,7 +218,7 @@ filter: _Le gestionnaire défini sera appelé une fois pour chaque bloc qui contient un appel au contrat (source de données) sous lequel le gestionnaire est défini._ -> **Note:** Le filtre `call` dépend actuellement de l'API de traçage de Parité. Certains réseaux, tels que BNB chain et Arbitrum, ne supportent pas cette API. Si un subgraph indexant un de ces réseaux contient un ou plusieurs gestionnaire de bloc avec un filtre `call`, il ne commencera pas à se synchroniser. +> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a Subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. L'absence de filtre pour un gestionnaire de bloc garantira que le gestionnaire est appelé à chaque bloc. Une source de données ne peut contenir qu'un seul gestionnaire de bloc pour chaque type de filtre. @@ -261,7 +261,7 @@ blockHandlers: every: 10 ``` -Le gestionnaire défini sera appelé une fois tous les `n` blocs, où `n` est la valeur fournie dans le champ `every`. Cette configuration permet au subgraph d'effectuer des opérations spécifiques à intervalles réguliers. +The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the Subgraph to perform specific operations at regular block intervals. #### Le filtre Once @@ -276,7 +276,7 @@ blockHandlers: kind: once ``` -Le gestionnaire défini avec le filtre once ne sera appelé qu'une seule fois avant l'exécution de tous les autres gestionnaires. Cette configuration permet au subgraph d'utiliser le gestionnaire comme gestionnaire d'initialisation, effectuant des tâches spécifiques au début de l'indexation. +The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the Subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. ```ts export function handleOnce(block: ethereum.Block): void { @@ -288,7 +288,7 @@ export function handleOnce(block: ethereum.Block): void { ### Fonction de cartographie -La fonction de mappage recevra une `ethereum.Block` comme seul argument. Comme les fonctions de mappage pour les événements, cette fonction peut accéder aux entités de subgraphs existantes dans le store, appeler des contrats intelligents et créer ou mettre à jour des entités. +The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing Subgraph entities in the store, call smart contracts and create or update entities. ```typescript import { ethereum } from '@graphprotocol/graph-ts' @@ -317,7 +317,7 @@ Un événement ne sera déclenché que si la signature et le sujet 0 corresponde A partir de `specVersion` `0.0.5` et `apiVersion` `0.0.7`, les gestionnaires d'événements peuvent avoir accès au reçu de la transaction qui les a émis. -Pour ce faire, les gestionnaires d'événements doivent être déclarés dans le manifeste du subgraph avec la nouvelle clé `receipt : true`, qui est facultative et prend par défaut la valeur false. +To do so, event handlers must be declared in the Subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. ```yaml eventHandlers: @@ -454,7 +454,7 @@ Il existe des setters et getters comme `setString` et `getString` pour tous les ## Blocs de démarrage -Le `startBlock` est un paramètre optionnel qui vous permet de définir à partir de quel bloc de la chaîne la source de données commencera l'indexation. Définir le bloc de départ permet à la source de données de sauter potentiellement des millions de blocs qui ne sont pas pertinents. En règle générale, un développeur de subgraphs définira `startBlock` au bloc dans lequel le contrat intelligent de la source de données a été créé. +The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a Subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. ```yaml dataSources: @@ -488,13 +488,13 @@ dataSources: ## Conseils pour l'indexeur -Le paramètre `indexerHints` dans le manifeste d'un subgraph fournit des directives aux Indexeurs sur le traitement et la gestion d'un subgraph. Il influence les décisions opérationnelles concernant le traitement des données, les stratégies d'indexation et les optimisations. Actuellement, il propose l'option `prune` pour gérer la rétention ou suppression des données historiques. +The `indexerHints` setting in a Subgraph's manifest provides directives for indexers on processing and managing a Subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. > Cette fonctionnalité est disponible à partir de `specVersion : 1.0.0` ### Prune -`indexerHints.prune` : Définit la rétention des données de blocs historiques pour un subgraph. Les options sont les suivantes : +`indexerHints.prune`: Defines the retention of historical block data for a Subgraph. Options include: 1. `"never"`: Aucune suppression des données historiques ; conserve l'ensemble de l'historique. 2. `"auto"`: Conserve l'historique minimum nécessaire tel que défini par l'Indexeur, optimisant ainsi les performances de la requête. @@ -505,19 +505,19 @@ Le paramètre `indexerHints` dans le manifeste d'un subgraph fournit des directi prune: auto ``` -> Le terme "historique" dans ce contexte des subgraphs concerne le stockage des données qui reflètent les anciens états des entités mutables. +> The term "history" in this context of Subgraphs is about storing data that reflects the old states of mutable entities. L'historique à partir d'un bloc donné est requis pour : -- Les [requêtes chronologiques](/subgraphs/querying/graphql-api/#time-travel-queries), qui permettent d'interroger les états passés de ces entités à des moments précis de l'histoire du subgraph -- Utilisation du subgraph comme [base de greffage](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) dans un autre subgraph, à ce bloc -- Rembobiner le subgraph jusqu'à ce bloc +- [Time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the Subgraph's history +- Using the Subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another Subgraph, at that block +- Rewinding the Subgraph back to that block Si les données historiques à partir du bloc ont été purgées, les capacités ci-dessus ne seront pas disponibles. > L'utilisation de `"auto"` est généralement recommandée car elle maximise les performances des requêtes et est suffisante pour la plupart des utilisateurs qui n'ont pas besoin d'accéder à des données historiques étendues. -Pour les subgraphs exploitant les [requêtes chronologiques](/subgraphs/querying/graphql-api/#time-travel-queries), il est conseillé de définir un nombre spécifique de blocs pour la conservation des données historiques ou d'utiliser `prune: never` pour conserver tous les états d'entité historiques. Vous trouverez ci-dessous des exemples de configuration des deux options dans les paramètres de votre subgraphs : +For Subgraphs leveraging [time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your Subgraph's settings: Pour conserver une quantité spécifique de données historiques : @@ -532,3 +532,17 @@ Préserver l'histoire complète des États de l'entité : indexerHints: prune: never ``` + +## SpecVersion Releases + +| Version | Notes de version | +| :-----: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune Subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From 22f8d0510ee9abe17d700bf31aad596ed25a0fb8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:22:28 -0500 Subject: [PATCH 0804/1789] New translations subgraph-manifest.mdx (Spanish) --- .../developing/creating/subgraph-manifest.mdx | 88 +++++++++++-------- 1 file changed, 51 insertions(+), 37 deletions(-) diff --git a/website/src/pages/es/subgraphs/developing/creating/subgraph-manifest.mdx b/website/src/pages/es/subgraphs/developing/creating/subgraph-manifest.mdx index c825906fef29..3a3af097c31f 100644 --- a/website/src/pages/es/subgraphs/developing/creating/subgraph-manifest.mdx +++ b/website/src/pages/es/subgraphs/developing/creating/subgraph-manifest.mdx @@ -4,19 +4,19 @@ title: Subgraph Manifest ## Descripción -The subgraph manifest, `subgraph.yaml`, defines the smart contracts & network your subgraph will index, the events from these contracts to pay attention to, and how to map event data to entities that Graph Node stores and allows to query. +The Subgraph manifest, `subgraph.yaml`, defines the smart contracts & network your Subgraph will index, the events from these contracts to pay attention to, and how to map event data to entities that Graph Node stores and allows to query. -The **subgraph definition** consists of the following files: +The **Subgraph definition** consists of the following files: -- `subgraph.yaml`: Contains the subgraph manifest +- `subgraph.yaml`: Contains the Subgraph manifest -- `schema.graphql`: A GraphQL schema defining the data stored for your subgraph and how to query it via GraphQL +- `schema.graphql`: A GraphQL schema defining the data stored for your Subgraph and how to query it via GraphQL - `mapping.ts`: [AssemblyScript Mappings](https://github.com/AssemblyScript/assemblyscript) code that translates event data into entities defined in your schema (e.g. `mapping.ts` in this guide) ### Subgraph Capabilities -A single subgraph can: +A single Subgraph can: - Index data from multiple smart contracts (but not multiple networks). @@ -24,9 +24,9 @@ A single subgraph can: - Add an entry for each contract that requires indexing to the `dataSources` array. -The full specification for subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). +The full specification for Subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). -For the example subgraph listed above, `subgraph.yaml` is: +For the example Subgraph listed above, `subgraph.yaml` is: ```yaml specVersion: 0.0.4 @@ -79,47 +79,47 @@ dataSources: ## Subgraph Entries -> Important Note: Be sure you populate your subgraph manifest with all handlers and [entities](/subgraphs/developing/creating/ql-schema/). +> Important Note: Be sure you populate your Subgraph manifest with all handlers and [entities](/subgraphs/developing/creating/ql-schema/). Las entradas importantes a actualizar para el manifiesto son: -- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. +- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the Subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. -- `description`: a human-readable description of what the subgraph is. This description is displayed in Graph Explorer when the subgraph is deployed to Subgraph Studio. +- `description`: a human-readable description of what the Subgraph is. This description is displayed in Graph Explorer when the Subgraph is deployed to Subgraph Studio. -- `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed in Graph Explorer. +- `repository`: the URL of the repository where the Subgraph manifest can be found. This is also displayed in Graph Explorer. - `features`: a list of all used [feature](#experimental-features) names. -- `indexerHints.prune`: Defines the retention of historical block data for a subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. +- `indexerHints.prune`: Defines the retention of historical block data for a Subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. -- `dataSources.source`: the address of the smart contract the subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. +- `dataSources.source`: the address of the smart contract the Subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. - `dataSources.source.startBlock`: the optional number of the block that the data source starts indexing from. In most cases, we suggest using the block in which the contract was created. - `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. -- `dataSources.context`: key-value pairs that can be used within subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for subgraph development. +- `dataSources.context`: key-value pairs that can be used within Subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for Subgraph development. - `dataSources.mapping.entities`: the entities that the data source writes to the store. The schema for each entity is defined in the schema.graphql file. - `dataSources.mapping.abis`: one or more named ABI files for the source contract as well as any other smart contracts that you interact with from within the mappings. -- `dataSources.mapping.eventHandlers`: lists the smart contract events this subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. +- `dataSources.mapping.eventHandlers`: lists the smart contract events this Subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. -- `dataSources.mapping.callHandlers`: lists the smart contract functions this subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. +- `dataSources.mapping.callHandlers`: lists the smart contract functions this Subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. -- `dataSources.mapping.blockHandlers`: lists the blocks this subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. +- `dataSources.mapping.blockHandlers`: lists the blocks this Subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. -A single subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. +A single Subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. ## Event Handlers -Event handlers in a subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the subgraph's manifest. This enables subgraphs to process and store event data according to defined logic. +Event handlers in a Subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the Subgraph's manifest. This enables Subgraphs to process and store event data according to defined logic. ### Defining an Event Handler -An event handler is declared within a data source in the subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. +An event handler is declared within a data source in the Subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. ```yaml dataSources: @@ -149,11 +149,11 @@ dataSources: ## Call Handlers -While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. +While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a Subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. Los call handlers solo se activarán en uno de estos dos casos: cuando la función especificada sea llamada por una cuenta distinta del propio contrato o cuando esté marcada como externa en Solidity y sea llamada como parte de otra función en el mismo contrato. -> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. +> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a Subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. ### Definición de un Call Handler @@ -186,7 +186,7 @@ The `function` is the normalized function signature to filter calls by. The `han ### Función mapeo -Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: +Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example Subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: ```typescript import { CreateGravatarCall } from '../generated/Gravity/Gravity' @@ -205,7 +205,7 @@ The `handleCreateGravatar` function takes a new `CreateGravatarCall` which is a ## Handlers de bloques -Además de suscribirse a eventos del contracto o calls de funciones, un subgrafo puede querer actualizar sus datos a medida que se añaden nuevos bloques a la cadena. Para ello, un subgrafo puede ejecutar una función después de cada bloque o después de los bloques que coincidan con un filtro predefinido. +In addition to subscribing to contract events or function calls, a Subgraph may want to update its data as new blocks are appended to the chain. To achieve this a Subgraph can run a function after every block or after blocks that match a pre-defined filter. ### Filtros admitidos @@ -218,7 +218,7 @@ filter: _The defined handler will be called once for every block which contains a call to the contract (data source) the handler is defined under._ -> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. +> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a Subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. La ausencia de un filtro para un handler de bloque asegurará que el handler sea llamado en cada bloque. Una fuente de datos solo puede contener un handler de bloque para cada tipo de filtro. @@ -261,7 +261,7 @@ blockHandlers: every: 10 ``` -The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the subgraph to perform specific operations at regular block intervals. +The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the Subgraph to perform specific operations at regular block intervals. #### Once Filter @@ -276,7 +276,7 @@ blockHandlers: kind: once ``` -The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. +The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the Subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. ```ts export function handleOnce(block: ethereum.Block): void { @@ -288,7 +288,7 @@ export function handleOnce(block: ethereum.Block): void { ### Función mapeo -The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing subgraph entities in the store, call smart contracts and create or update entities. +The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing Subgraph entities in the store, call smart contracts and create or update entities. ```typescript import { ethereum } from '@graphprotocol/graph-ts' @@ -317,7 +317,7 @@ An event will only be triggered when both the signature and topic 0 match. By de Starting from `specVersion` `0.0.5` and `apiVersion` `0.0.7`, event handlers can have access to the receipt for the transaction which emitted them. -To do so, event handlers must be declared in the subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. +To do so, event handlers must be declared in the Subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. ```yaml eventHandlers: @@ -454,7 +454,7 @@ There are setters and getters like `setString` and `getString` for all value typ ## Bloques iniciales -The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. +The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a Subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. ```yaml dataSources: @@ -488,13 +488,13 @@ dataSources: ## Indexer Hints -The `indexerHints` setting in a subgraph's manifest provides directives for indexers on processing and managing a subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. +The `indexerHints` setting in a Subgraph's manifest provides directives for indexers on processing and managing a Subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. > This feature is available from `specVersion: 1.0.0` ### Prune -`indexerHints.prune`: Defines the retention of historical block data for a subgraph. Options include: +`indexerHints.prune`: Defines the retention of historical block data for a Subgraph. Options include: 1. `"never"`: No pruning of historical data; retains the entire history. 2. `"auto"`: Retains the minimum necessary history as set by the indexer, optimizing query performance. @@ -505,19 +505,19 @@ The `indexerHints` setting in a subgraph's manifest provides directives for inde prune: auto ``` -> The term "history" in this context of subgraphs is about storing data that reflects the old states of mutable entities. +> The term "history" in this context of Subgraphs is about storing data that reflects the old states of mutable entities. History as of a given block is required for: -- [Time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the subgraph's history -- Using the subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another subgraph, at that block -- Rewinding the subgraph back to that block +- [Time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the Subgraph's history +- Using the Subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another Subgraph, at that block +- Rewinding the Subgraph back to that block If historical data as of the block has been pruned, the above capabilities will not be available. > Using `"auto"` is generally recommended as it maximizes query performance and is sufficient for most users who do not require access to extensive historical data. -For subgraphs leveraging [time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your subgraph's settings: +For Subgraphs leveraging [time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your Subgraph's settings: To retain a specific amount of historical data: @@ -532,3 +532,17 @@ To preserve the complete history of entity states: indexerHints: prune: never ``` + +## SpecVersion Releases + +| Version | Notas del lanzamiento | +| :-----: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune Subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From 05428d7a6722b08262d0294103cc2026a3957f47 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:22:29 -0500 Subject: [PATCH 0805/1789] New translations subgraph-manifest.mdx (Arabic) --- .../developing/creating/subgraph-manifest.mdx | 88 +++++++++++-------- 1 file changed, 51 insertions(+), 37 deletions(-) diff --git a/website/src/pages/ar/subgraphs/developing/creating/subgraph-manifest.mdx b/website/src/pages/ar/subgraphs/developing/creating/subgraph-manifest.mdx index ba893838ca4e..7565b23ec958 100644 --- a/website/src/pages/ar/subgraphs/developing/creating/subgraph-manifest.mdx +++ b/website/src/pages/ar/subgraphs/developing/creating/subgraph-manifest.mdx @@ -4,19 +4,19 @@ title: Subgraph Manifest ## نظره عامة -The subgraph manifest, `subgraph.yaml`, defines the smart contracts & network your subgraph will index, the events from these contracts to pay attention to, and how to map event data to entities that Graph Node stores and allows to query. +The Subgraph manifest, `subgraph.yaml`, defines the smart contracts & network your Subgraph will index, the events from these contracts to pay attention to, and how to map event data to entities that Graph Node stores and allows to query. -The **subgraph definition** consists of the following files: +The **Subgraph definition** consists of the following files: -- `subgraph.yaml`: Contains the subgraph manifest +- `subgraph.yaml`: Contains the Subgraph manifest -- `schema.graphql`: A GraphQL schema defining the data stored for your subgraph and how to query it via GraphQL +- `schema.graphql`: A GraphQL schema defining the data stored for your Subgraph and how to query it via GraphQL - `mapping.ts`: [AssemblyScript Mappings](https://github.com/AssemblyScript/assemblyscript) code that translates event data into entities defined in your schema (e.g. `mapping.ts` in this guide) ### Subgraph Capabilities -A single subgraph can: +A single Subgraph can: - Index data from multiple smart contracts (but not multiple networks). @@ -24,9 +24,9 @@ A single subgraph can: - Add an entry for each contract that requires indexing to the `dataSources` array. -The full specification for subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). +The full specification for Subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). -For the example subgraph listed above, `subgraph.yaml` is: +For the example Subgraph listed above, `subgraph.yaml` is: ```yaml specVersion: 0.0.4 @@ -79,47 +79,47 @@ dataSources: ## Subgraph Entries -> Important Note: Be sure you populate your subgraph manifest with all handlers and [entities](/subgraphs/developing/creating/ql-schema/). +> Important Note: Be sure you populate your Subgraph manifest with all handlers and [entities](/subgraphs/developing/creating/ql-schema/). الإدخالات الهامة لتحديث manifest هي: -- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. +- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the Subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. -- `description`: a human-readable description of what the subgraph is. This description is displayed in Graph Explorer when the subgraph is deployed to Subgraph Studio. +- `description`: a human-readable description of what the Subgraph is. This description is displayed in Graph Explorer when the Subgraph is deployed to Subgraph Studio. -- `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed in Graph Explorer. +- `repository`: the URL of the repository where the Subgraph manifest can be found. This is also displayed in Graph Explorer. - `features`: a list of all used [feature](#experimental-features) names. -- `indexerHints.prune`: Defines the retention of historical block data for a subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. +- `indexerHints.prune`: Defines the retention of historical block data for a Subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. -- `dataSources.source`: the address of the smart contract the subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. +- `dataSources.source`: the address of the smart contract the Subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. - `dataSources.source.startBlock`: the optional number of the block that the data source starts indexing from. In most cases, we suggest using the block in which the contract was created. - `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. -- `dataSources.context`: key-value pairs that can be used within subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for subgraph development. +- `dataSources.context`: key-value pairs that can be used within Subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for Subgraph development. - `dataSources.mapping.entities`: the entities that the data source writes to the store. The schema for each entity is defined in the schema.graphql file. - `dataSources.mapping.abis`: one or more named ABI files for the source contract as well as any other smart contracts that you interact with from within the mappings. -- `dataSources.mapping.eventHandlers`: lists the smart contract events this subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. +- `dataSources.mapping.eventHandlers`: lists the smart contract events this Subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. -- `dataSources.mapping.callHandlers`: lists the smart contract functions this subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. +- `dataSources.mapping.callHandlers`: lists the smart contract functions this Subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. -- `dataSources.mapping.blockHandlers`: lists the blocks this subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. +- `dataSources.mapping.blockHandlers`: lists the blocks this Subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. -A single subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. +A single Subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. ## Event Handlers -Event handlers in a subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the subgraph's manifest. This enables subgraphs to process and store event data according to defined logic. +Event handlers in a Subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the Subgraph's manifest. This enables Subgraphs to process and store event data according to defined logic. ### Defining an Event Handler -An event handler is declared within a data source in the subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. +An event handler is declared within a data source in the Subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. ```yaml dataSources: @@ -149,11 +149,11 @@ dataSources: ## معالجات الاستدعاء(Call Handlers) -While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. +While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a Subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. Call handlers will only trigger in one of two cases: when the function specified is called by an account other than the contract itself or when it is marked as external in Solidity and called as part of another function in the same contract. -> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. +> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a Subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. ### تعريف معالج الاستدعاء @@ -186,7 +186,7 @@ The `function` is the normalized function signature to filter calls by. The `han ### دالة الـ Mapping -Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: +Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example Subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: ```typescript import { CreateGravatarCall } from '../generated/Gravity/Gravity' @@ -205,7 +205,7 @@ The `handleCreateGravatar` function takes a new `CreateGravatarCall` which is a ## معالجات الكتلة -In addition to subscribing to contract events or function calls, a subgraph may want to update its data as new blocks are appended to the chain. To achieve this a subgraph can run a function after every block or after blocks that match a pre-defined filter. +In addition to subscribing to contract events or function calls, a Subgraph may want to update its data as new blocks are appended to the chain. To achieve this a Subgraph can run a function after every block or after blocks that match a pre-defined filter. ### الفلاتر المدعومة @@ -218,7 +218,7 @@ filter: _The defined handler will be called once for every block which contains a call to the contract (data source) the handler is defined under._ -> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. +> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a Subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. The absence of a filter for a block handler will ensure that the handler is called every block. A data source can only contain one block handler for each filter type. @@ -261,7 +261,7 @@ blockHandlers: every: 10 ``` -The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the subgraph to perform specific operations at regular block intervals. +The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the Subgraph to perform specific operations at regular block intervals. #### Once Filter @@ -276,7 +276,7 @@ blockHandlers: kind: once ``` -The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. +The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the Subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. ```ts export function handleOnce(block: ethereum.Block): void { @@ -288,7 +288,7 @@ export function handleOnce(block: ethereum.Block): void { ### دالة الـ Mapping -The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing subgraph entities in the store, call smart contracts and create or update entities. +The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing Subgraph entities in the store, call smart contracts and create or update entities. ```typescript import { ethereum } from '@graphprotocol/graph-ts' @@ -317,7 +317,7 @@ An event will only be triggered when both the signature and topic 0 match. By de Starting from `specVersion` `0.0.5` and `apiVersion` `0.0.7`, event handlers can have access to the receipt for the transaction which emitted them. -To do so, event handlers must be declared in the subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. +To do so, event handlers must be declared in the Subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. ```yaml eventHandlers: @@ -454,7 +454,7 @@ There are setters and getters like `setString` and `getString` for all value typ ## كتل البدء -The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. +The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a Subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. ```yaml dataSources: @@ -488,13 +488,13 @@ dataSources: ## Indexer Hints -The `indexerHints` setting in a subgraph's manifest provides directives for indexers on processing and managing a subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. +The `indexerHints` setting in a Subgraph's manifest provides directives for indexers on processing and managing a Subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. > This feature is available from `specVersion: 1.0.0` ### Prune -`indexerHints.prune`: Defines the retention of historical block data for a subgraph. Options include: +`indexerHints.prune`: Defines the retention of historical block data for a Subgraph. Options include: 1. `"never"`: No pruning of historical data; retains the entire history. 2. `"auto"`: Retains the minimum necessary history as set by the indexer, optimizing query performance. @@ -505,19 +505,19 @@ The `indexerHints` setting in a subgraph's manifest provides directives for inde prune: auto ``` -> The term "history" in this context of subgraphs is about storing data that reflects the old states of mutable entities. +> The term "history" in this context of Subgraphs is about storing data that reflects the old states of mutable entities. History as of a given block is required for: -- [Time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the subgraph's history -- Using the subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another subgraph, at that block -- Rewinding the subgraph back to that block +- [Time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the Subgraph's history +- Using the Subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another Subgraph, at that block +- Rewinding the Subgraph back to that block If historical data as of the block has been pruned, the above capabilities will not be available. > Using `"auto"` is generally recommended as it maximizes query performance and is sufficient for most users who do not require access to extensive historical data. -For subgraphs leveraging [time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your subgraph's settings: +For Subgraphs leveraging [time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your Subgraph's settings: To retain a specific amount of historical data: @@ -532,3 +532,17 @@ To preserve the complete history of entity states: indexerHints: prune: never ``` + +## SpecVersion Releases + +| الاصدار | ملاحظات الإصدار | +| :-----: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune Subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From c97eaf5ee59dc4000902d239f2a683c319cd58b0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:22:30 -0500 Subject: [PATCH 0806/1789] New translations subgraph-manifest.mdx (Czech) --- .../developing/creating/subgraph-manifest.mdx | 88 +++++++++++-------- 1 file changed, 51 insertions(+), 37 deletions(-) diff --git a/website/src/pages/cs/subgraphs/developing/creating/subgraph-manifest.mdx b/website/src/pages/cs/subgraphs/developing/creating/subgraph-manifest.mdx index a434110b4282..b01cdb47b52f 100644 --- a/website/src/pages/cs/subgraphs/developing/creating/subgraph-manifest.mdx +++ b/website/src/pages/cs/subgraphs/developing/creating/subgraph-manifest.mdx @@ -4,19 +4,19 @@ title: Subgraph Manifest ## Přehled -The subgraph manifest, `subgraph.yaml`, defines the smart contracts & network your subgraph will index, the events from these contracts to pay attention to, and how to map event data to entities that Graph Node stores and allows to query. +The Subgraph manifest, `subgraph.yaml`, defines the smart contracts & network your Subgraph will index, the events from these contracts to pay attention to, and how to map event data to entities that Graph Node stores and allows to query. -The **subgraph definition** consists of the following files: +The **Subgraph definition** consists of the following files: -- `subgraph.yaml`: Contains the subgraph manifest +- `subgraph.yaml`: Contains the Subgraph manifest -- `schema.graphql`: A GraphQL schema defining the data stored for your subgraph and how to query it via GraphQL +- `schema.graphql`: A GraphQL schema defining the data stored for your Subgraph and how to query it via GraphQL - `mapping.ts`: [AssemblyScript Mappings](https://github.com/AssemblyScript/assemblyscript) code that translates event data into entities defined in your schema (e.g. `mapping.ts` in this guide) ### Subgraph Capabilities -A single subgraph can: +A single Subgraph can: - Index data from multiple smart contracts (but not multiple networks). @@ -24,9 +24,9 @@ A single subgraph can: - Add an entry for each contract that requires indexing to the `dataSources` array. -The full specification for subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). +The full specification for Subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). -For the example subgraph listed above, `subgraph.yaml` is: +For the example Subgraph listed above, `subgraph.yaml` is: ```yaml specVersion: 0.0.4 @@ -79,47 +79,47 @@ dataSources: ## Subgraph Entries -> Important Note: Be sure you populate your subgraph manifest with all handlers and [entities](/subgraphs/developing/creating/ql-schema/). +> Important Note: Be sure you populate your Subgraph manifest with all handlers and [entities](/subgraphs/developing/creating/ql-schema/). Důležité položky, které je třeba v manifestu aktualizovat, jsou: -- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. +- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the Subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. -- `description`: a human-readable description of what the subgraph is. This description is displayed in Graph Explorer when the subgraph is deployed to Subgraph Studio. +- `description`: a human-readable description of what the Subgraph is. This description is displayed in Graph Explorer when the Subgraph is deployed to Subgraph Studio. -- `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed in Graph Explorer. +- `repository`: the URL of the repository where the Subgraph manifest can be found. This is also displayed in Graph Explorer. - `features`: a list of all used [feature](#experimental-features) names. -- `indexerHints.prune`: Defines the retention of historical block data for a subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. +- `indexerHints.prune`: Defines the retention of historical block data for a Subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. -- `dataSources.source`: the address of the smart contract the subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. +- `dataSources.source`: the address of the smart contract the Subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. - `dataSources.source.startBlock`: the optional number of the block that the data source starts indexing from. In most cases, we suggest using the block in which the contract was created. - `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. -- `dataSources.context`: key-value pairs that can be used within subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for subgraph development. +- `dataSources.context`: key-value pairs that can be used within Subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for Subgraph development. - `dataSources.mapping.entities`: the entities that the data source writes to the store. The schema for each entity is defined in the schema.graphql file. - `dataSources.mapping.abis`: one or more named ABI files for the source contract as well as any other smart contracts that you interact with from within the mappings. -- `dataSources.mapping.eventHandlers`: lists the smart contract events this subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. +- `dataSources.mapping.eventHandlers`: lists the smart contract events this Subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. -- `dataSources.mapping.callHandlers`: lists the smart contract functions this subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. +- `dataSources.mapping.callHandlers`: lists the smart contract functions this Subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. -- `dataSources.mapping.blockHandlers`: lists the blocks this subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. +- `dataSources.mapping.blockHandlers`: lists the blocks this Subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. -A single subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. +A single Subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. ## Event Handlers -Event handlers in a subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the subgraph's manifest. This enables subgraphs to process and store event data according to defined logic. +Event handlers in a Subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the Subgraph's manifest. This enables Subgraphs to process and store event data according to defined logic. ### Defining an Event Handler -An event handler is declared within a data source in the subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. +An event handler is declared within a data source in the Subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. ```yaml dataSources: @@ -149,11 +149,11 @@ dataSources: ## Zpracovatelé hovorů -While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. +While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a Subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. Obsluhy volání se spustí pouze v jednom ze dvou případů: když je zadaná funkce volána jiným účtem než samotnou smlouvou nebo když je v Solidity označena jako externí a volána jako součást jiné funkce ve stejné smlouvě. -> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. +> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a Subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. ### Definice obsluhy volání @@ -186,7 +186,7 @@ The `function` is the normalized function signature to filter calls by. The `han ### Funkce mapování -Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: +Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example Subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: ```typescript import { CreateGravatarCall } from '../generated/Gravity/Gravity' @@ -205,7 +205,7 @@ The `handleCreateGravatar` function takes a new `CreateGravatarCall` which is a ## Obsluha bloků -Kromě přihlášení k událostem smlouvy nebo volání funkcí může podgraf chtít aktualizovat svá data, když jsou do řetězce přidány nové bloky. Za tímto účelem může podgraf spustit funkci po každém bloku nebo po blocích, které odpovídají předem definovanému filtru. +In addition to subscribing to contract events or function calls, a Subgraph may want to update its data as new blocks are appended to the chain. To achieve this a Subgraph can run a function after every block or after blocks that match a pre-defined filter. ### Podporované filtry @@ -218,7 +218,7 @@ filter: _The defined handler will be called once for every block which contains a call to the contract (data source) the handler is defined under._ -> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. +> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a Subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. Protože pro obsluhu bloku neexistuje žádný filtr, zajistí, že obsluha bude volána každý blok. Zdroj dat může obsahovat pouze jednu blokovou obsluhu pro každý typ filtru. @@ -261,7 +261,7 @@ blockHandlers: every: 10 ``` -The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the subgraph to perform specific operations at regular block intervals. +The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the Subgraph to perform specific operations at regular block intervals. #### Jednou Filtr @@ -276,7 +276,7 @@ blockHandlers: kind: once ``` -Definovaný obslužná rutina s filtrem once bude zavolána pouze jednou před spuštěním všech ostatních rutin. Tato konfigurace umožňuje, aby podgraf používal obslužný program jako inicializační obslužný, který provádí specifické úlohy na začátku indexování. +The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the Subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. ```ts export function handleOnce(block: ethereum.Block): void { @@ -288,7 +288,7 @@ export function handleOnce(block: ethereum.Block): void { ### Funkce mapování -The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing subgraph entities in the store, call smart contracts and create or update entities. +The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing Subgraph entities in the store, call smart contracts and create or update entities. ```typescript import { ethereum } from '@graphprotocol/graph-ts' @@ -317,7 +317,7 @@ An event will only be triggered when both the signature and topic 0 match. By de Starting from `specVersion` `0.0.5` and `apiVersion` `0.0.7`, event handlers can have access to the receipt for the transaction which emitted them. -To do so, event handlers must be declared in the subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. +To do so, event handlers must be declared in the Subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. ```yaml eventHandlers: @@ -454,7 +454,7 @@ There are setters and getters like `setString` and `getString` for all value typ ## Výchozí bloky -The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. +The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a Subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. ```yaml dataSources: @@ -488,13 +488,13 @@ dataSources: ## Tipy indexátor -The `indexerHints` setting in a subgraph's manifest provides directives for indexers on processing and managing a subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. +The `indexerHints` setting in a Subgraph's manifest provides directives for indexers on processing and managing a Subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. > This feature is available from `specVersion: 1.0.0` ### Prořezávat -`indexerHints.prune`: Defines the retention of historical block data for a subgraph. Options include: +`indexerHints.prune`: Defines the retention of historical block data for a Subgraph. Options include: 1. `"never"`: No pruning of historical data; retains the entire history. 2. `"auto"`: Retains the minimum necessary history as set by the indexer, optimizing query performance. @@ -505,19 +505,19 @@ The `indexerHints` setting in a subgraph's manifest provides directives for inde prune: auto ``` -> The term "history" in this context of subgraphs is about storing data that reflects the old states of mutable entities. +> The term "history" in this context of Subgraphs is about storing data that reflects the old states of mutable entities. History as of a given block is required for: -- [Time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the subgraph's history -- Using the subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another subgraph, at that block -- Rewinding the subgraph back to that block +- [Time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the Subgraph's history +- Using the Subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another Subgraph, at that block +- Rewinding the Subgraph back to that block If historical data as of the block has been pruned, the above capabilities will not be available. > Using `"auto"` is generally recommended as it maximizes query performance and is sufficient for most users who do not require access to extensive historical data. -For subgraphs leveraging [time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your subgraph's settings: +For Subgraphs leveraging [time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your Subgraph's settings: Uchování určitého množství historických dat: @@ -532,3 +532,17 @@ Zachování kompletní historie entitních států: indexerHints: prune: never ``` + +## SpecVersion Releases + +| Verze | Poznámky vydání | +| :---: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune Subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From caeeb95efc50cf8088b2270a1fddbae813b8800b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:22:31 -0500 Subject: [PATCH 0807/1789] New translations subgraph-manifest.mdx (German) --- .../developing/creating/subgraph-manifest.mdx | 88 +++++++++++-------- 1 file changed, 51 insertions(+), 37 deletions(-) diff --git a/website/src/pages/de/subgraphs/developing/creating/subgraph-manifest.mdx b/website/src/pages/de/subgraphs/developing/creating/subgraph-manifest.mdx index a3959f1f4d57..4b7059b04a6e 100644 --- a/website/src/pages/de/subgraphs/developing/creating/subgraph-manifest.mdx +++ b/website/src/pages/de/subgraphs/developing/creating/subgraph-manifest.mdx @@ -4,19 +4,19 @@ title: Subgraph Manifest ## Überblick -Das Subgraph-Manifest, `subgraph.yaml`, definiert die Smart Contracts und das Netzwerk, die Ihr Subgraph indizieren wird, die Ereignisse aus diesen Verträgen, auf die geachtet werden soll, und wie die Ereignisdaten auf Entitäten abgebildet werden, die Graph Node speichert und abfragen kann. +The Subgraph manifest, `subgraph.yaml`, defines the smart contracts & network your Subgraph will index, the events from these contracts to pay attention to, and how to map event data to entities that Graph Node stores and allows to query. -Die **Subgraph-Definition** besteht aus den folgenden Dateien: +The **Subgraph definition** consists of the following files: -- subgraph.yaml": Enthält das Manifest des Subgraphen +- `subgraph.yaml`: Contains the Subgraph manifest -- schema.graphql": Ein GraphQL-Schema, das die für Ihren Subgraph gespeicherten Daten definiert und festlegt, wie sie über GraphQL abgefragt werden können +- `schema.graphql`: A GraphQL schema defining the data stored for your Subgraph and how to query it via GraphQL - `mapping.ts`: [AssemblyScript Mappings](https://github.com/AssemblyScript/assemblyscript) code that translates event data into entities defined in your schema (e.g. `mapping.ts` in this guide) ### Subgraph-Fähigkeiten -A single subgraph can: +A single Subgraph can: - Index data from multiple smart contracts (but not multiple networks). @@ -24,9 +24,9 @@ A single subgraph can: - Add an entry for each contract that requires indexing to the `dataSources` array. -The full specification for subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). +The full specification for Subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). -For the example subgraph listed above, `subgraph.yaml` is: +For the example Subgraph listed above, `subgraph.yaml` is: ```yaml specVersion: 0.0.4 @@ -79,47 +79,47 @@ dataSources: ## Subgraph Entries -> Important Note: Be sure you populate your subgraph manifest with all handlers and [entities](/subgraphs/developing/creating/ql-schema/). +> Important Note: Be sure you populate your Subgraph manifest with all handlers and [entities](/subgraphs/developing/creating/ql-schema/). The important entries to update for the manifest are: -- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. +- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the Subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. -- `description`: a human-readable description of what the subgraph is. This description is displayed in Graph Explorer when the subgraph is deployed to Subgraph Studio. +- `description`: a human-readable description of what the Subgraph is. This description is displayed in Graph Explorer when the Subgraph is deployed to Subgraph Studio. -- `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed in Graph Explorer. +- `repository`: the URL of the repository where the Subgraph manifest can be found. This is also displayed in Graph Explorer. - `features`: a list of all used [feature](#experimental-features) names. -- `indexerHints.prune`: Defines the retention of historical block data for a subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. +- `indexerHints.prune`: Defines the retention of historical block data for a Subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. -- `dataSources.source`: the address of the smart contract the subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. +- `dataSources.source`: the address of the smart contract the Subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. - `dataSources.source.startBlock`: the optional number of the block that the data source starts indexing from. In most cases, we suggest using the block in which the contract was created. - `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. -- `dataSources.context`: key-value pairs that can be used within subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for subgraph development. +- `dataSources.context`: key-value pairs that can be used within Subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for Subgraph development. - `dataSources.mapping.entities`: the entities that the data source writes to the store. The schema for each entity is defined in the schema.graphql file. - `dataSources.mapping.abis`: one or more named ABI files for the source contract as well as any other smart contracts that you interact with from within the mappings. -- `dataSources.mapping.eventHandlers`: lists the smart contract events this subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. +- `dataSources.mapping.eventHandlers`: lists the smart contract events this Subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. -- `dataSources.mapping.callHandlers`: lists the smart contract functions this subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. +- `dataSources.mapping.callHandlers`: lists the smart contract functions this Subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. -- `dataSources.mapping.blockHandlers`: lists the blocks this subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. +- `dataSources.mapping.blockHandlers`: lists the blocks this Subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. -A single subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. +A single Subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. ## Event Handlers -Event handlers in a subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the subgraph's manifest. This enables subgraphs to process and store event data according to defined logic. +Event handlers in a Subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the Subgraph's manifest. This enables Subgraphs to process and store event data according to defined logic. ### Defining an Event Handler -An event handler is declared within a data source in the subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. +An event handler is declared within a data source in the Subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. ```yaml dataSources: @@ -149,11 +149,11 @@ dataSources: ## Call Handlers -While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. +While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a Subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. Call handlers will only trigger in one of two cases: when the function specified is called by an account other than the contract itself or when it is marked as external in Solidity and called as part of another function in the same contract. -> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. +> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a Subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. ### Defining a Call Handler @@ -186,7 +186,7 @@ The `function` is the normalized function signature to filter calls by. The `han ### Mapping Function -Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: +Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example Subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: ```typescript import { CreateGravatarCall } from '../generated/Gravity/Gravity' @@ -205,7 +205,7 @@ The `handleCreateGravatar` function takes a new `CreateGravatarCall` which is a ## Block Handlers -In addition to subscribing to contract events or function calls, a subgraph may want to update its data as new blocks are appended to the chain. To achieve this a subgraph can run a function after every block or after blocks that match a pre-defined filter. +In addition to subscribing to contract events or function calls, a Subgraph may want to update its data as new blocks are appended to the chain. To achieve this a Subgraph can run a function after every block or after blocks that match a pre-defined filter. ### Supported Filters @@ -218,7 +218,7 @@ filter: _The defined handler will be called once for every block which contains a call to the contract (data source) the handler is defined under._ -> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. +> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a Subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. The absence of a filter for a block handler will ensure that the handler is called every block. A data source can only contain one block handler for each filter type. @@ -261,7 +261,7 @@ blockHandlers: every: 10 ``` -The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the subgraph to perform specific operations at regular block intervals. +The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the Subgraph to perform specific operations at regular block intervals. #### Once Filter @@ -276,7 +276,7 @@ blockHandlers: kind: once ``` -The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. +The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the Subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. ```ts export function handleOnce(block: ethereum.Block): void { @@ -288,7 +288,7 @@ export function handleOnce(block: ethereum.Block): void { ### Mapping Function -The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing subgraph entities in the store, call smart contracts and create or update entities. +The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing Subgraph entities in the store, call smart contracts and create or update entities. ```typescript import { ethereum } from '@graphprotocol/graph-ts' @@ -317,7 +317,7 @@ An event will only be triggered when both the signature and topic 0 match. By de Starting from `specVersion` `0.0.5` and `apiVersion` `0.0.7`, event handlers can have access to the receipt for the transaction which emitted them. -To do so, event handlers must be declared in the subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. +To do so, event handlers must be declared in the Subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. ```yaml eventHandlers: @@ -454,7 +454,7 @@ There are setters and getters like `setString` and `getString` for all value typ ## Start Blocks -The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. +The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a Subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. ```yaml dataSources: @@ -488,13 +488,13 @@ dataSources: ## Indexer Hints -The `indexerHints` setting in a subgraph's manifest provides directives for indexers on processing and managing a subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. +The `indexerHints` setting in a Subgraph's manifest provides directives for indexers on processing and managing a Subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. > This feature is available from `specVersion: 1.0.0` ### Prune -`indexerHints.prune`: Defines the retention of historical block data for a subgraph. Options include: +`indexerHints.prune`: Defines the retention of historical block data for a Subgraph. Options include: 1. `"never"`: No pruning of historical data; retains the entire history. 2. `"auto"`: Retains the minimum necessary history as set by the indexer, optimizing query performance. @@ -505,19 +505,19 @@ The `indexerHints` setting in a subgraph's manifest provides directives for inde prune: auto ``` -> The term "history" in this context of subgraphs is about storing data that reflects the old states of mutable entities. +> The term "history" in this context of Subgraphs is about storing data that reflects the old states of mutable entities. History as of a given block is required for: -- [Time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the subgraph's history -- Using the subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another subgraph, at that block -- Rewinding the subgraph back to that block +- [Time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the Subgraph's history +- Using the Subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another Subgraph, at that block +- Rewinding the Subgraph back to that block If historical data as of the block has been pruned, the above capabilities will not be available. > Using `"auto"` is generally recommended as it maximizes query performance and is sufficient for most users who do not require access to extensive historical data. -For subgraphs leveraging [time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your subgraph's settings: +For Subgraphs leveraging [time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your Subgraph's settings: To retain a specific amount of historical data: @@ -532,3 +532,17 @@ To preserve the complete history of entity states: indexerHints: prune: never ``` + +## SpecVersion Releases + +| Version | Release notes | +| :-----: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune Subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From 94c69d1a848578b9ebdf65a62960c4144662b614 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:22:32 -0500 Subject: [PATCH 0808/1789] New translations subgraph-manifest.mdx (Italian) --- .../developing/creating/subgraph-manifest.mdx | 88 +++++++++++-------- 1 file changed, 51 insertions(+), 37 deletions(-) diff --git a/website/src/pages/it/subgraphs/developing/creating/subgraph-manifest.mdx b/website/src/pages/it/subgraphs/developing/creating/subgraph-manifest.mdx index d8b9c415b293..59d17716b5ba 100644 --- a/website/src/pages/it/subgraphs/developing/creating/subgraph-manifest.mdx +++ b/website/src/pages/it/subgraphs/developing/creating/subgraph-manifest.mdx @@ -4,19 +4,19 @@ title: Subgraph Manifest ## Panoramica -The subgraph manifest, `subgraph.yaml`, defines the smart contracts & network your subgraph will index, the events from these contracts to pay attention to, and how to map event data to entities that Graph Node stores and allows to query. +The Subgraph manifest, `subgraph.yaml`, defines the smart contracts & network your Subgraph will index, the events from these contracts to pay attention to, and how to map event data to entities that Graph Node stores and allows to query. -The **subgraph definition** consists of the following files: +The **Subgraph definition** consists of the following files: -- `subgraph.yaml`: Contains the subgraph manifest +- `subgraph.yaml`: Contains the Subgraph manifest -- `schema.graphql`: A GraphQL schema defining the data stored for your subgraph and how to query it via GraphQL +- `schema.graphql`: A GraphQL schema defining the data stored for your Subgraph and how to query it via GraphQL - `mapping.ts`: [AssemblyScript Mappings](https://github.com/AssemblyScript/assemblyscript) code that translates event data into entities defined in your schema (e.g. `mapping.ts` in this guide) ### Subgraph Capabilities -A single subgraph can: +A single Subgraph can: - Index data from multiple smart contracts (but not multiple networks). @@ -24,9 +24,9 @@ A single subgraph can: - Add an entry for each contract that requires indexing to the `dataSources` array. -The full specification for subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). +The full specification for Subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). -For the example subgraph listed above, `subgraph.yaml` is: +For the example Subgraph listed above, `subgraph.yaml` is: ```yaml specVersion: 0.0.4 @@ -79,47 +79,47 @@ dataSources: ## Subgraph Entries -> Important Note: Be sure you populate your subgraph manifest with all handlers and [entities](/subgraphs/developing/creating/ql-schema/). +> Important Note: Be sure you populate your Subgraph manifest with all handlers and [entities](/subgraphs/developing/creating/ql-schema/). Le voci importanti da aggiornare per il manifesto sono: -- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. +- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the Subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. -- `description`: a human-readable description of what the subgraph is. This description is displayed in Graph Explorer when the subgraph is deployed to Subgraph Studio. +- `description`: a human-readable description of what the Subgraph is. This description is displayed in Graph Explorer when the Subgraph is deployed to Subgraph Studio. -- `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed in Graph Explorer. +- `repository`: the URL of the repository where the Subgraph manifest can be found. This is also displayed in Graph Explorer. - `features`: a list of all used [feature](#experimental-features) names. -- `indexerHints.prune`: Defines the retention of historical block data for a subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. +- `indexerHints.prune`: Defines the retention of historical block data for a Subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. -- `dataSources.source`: the address of the smart contract the subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. +- `dataSources.source`: the address of the smart contract the Subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. - `dataSources.source.startBlock`: the optional number of the block that the data source starts indexing from. In most cases, we suggest using the block in which the contract was created. - `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. -- `dataSources.context`: key-value pairs that can be used within subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for subgraph development. +- `dataSources.context`: key-value pairs that can be used within Subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for Subgraph development. - `dataSources.mapping.entities`: the entities that the data source writes to the store. The schema for each entity is defined in the schema.graphql file. - `dataSources.mapping.abis`: one or more named ABI files for the source contract as well as any other smart contracts that you interact with from within the mappings. -- `dataSources.mapping.eventHandlers`: lists the smart contract events this subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. +- `dataSources.mapping.eventHandlers`: lists the smart contract events this Subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. -- `dataSources.mapping.callHandlers`: lists the smart contract functions this subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. +- `dataSources.mapping.callHandlers`: lists the smart contract functions this Subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. -- `dataSources.mapping.blockHandlers`: lists the blocks this subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. +- `dataSources.mapping.blockHandlers`: lists the blocks this Subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. -A single subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. +A single Subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. ## Event Handlers -Event handlers in a subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the subgraph's manifest. This enables subgraphs to process and store event data according to defined logic. +Event handlers in a Subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the Subgraph's manifest. This enables Subgraphs to process and store event data according to defined logic. ### Defining an Event Handler -An event handler is declared within a data source in the subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. +An event handler is declared within a data source in the Subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. ```yaml dataSources: @@ -149,11 +149,11 @@ dataSources: ## Gestori di chiamate -While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. +While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a Subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. I gestori di chiamate si attivano solo in uno dei due casi: quando la funzione specificata viene chiamata da un conto diverso dal contratto stesso o quando è contrassegnata come esterna in Solidity e chiamata come parte di un'altra funzione nello stesso contratto. -> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. +> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a Subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. ### Definire un gestore di chiamate @@ -186,7 +186,7 @@ The `function` is the normalized function signature to filter calls by. The `han ### Funzione di mappatura -Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: +Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example Subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: ```typescript import { CreateGravatarCall } from '../generated/Gravity/Gravity' @@ -205,7 +205,7 @@ The `handleCreateGravatar` function takes a new `CreateGravatarCall` which is a ## Gestori di blocchi -Oltre a sottoscrivere eventi di contratto o chiamate di funzione, un subgraph può voler aggiornare i propri dati quando nuovi blocchi vengono aggiunti alla chain. A tale scopo, un subgraph può eseguire una funzione dopo ogni blocco o dopo i blocchi che corrispondono a un filtro predefinito. +In addition to subscribing to contract events or function calls, a Subgraph may want to update its data as new blocks are appended to the chain. To achieve this a Subgraph can run a function after every block or after blocks that match a pre-defined filter. ### Filtri supportati @@ -218,7 +218,7 @@ filter: _The defined handler will be called once for every block which contains a call to the contract (data source) the handler is defined under._ -> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. +> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a Subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. L'assenza di un filtro per un gestore di blocchi garantisce che il gestore venga chiamato a ogni blocco. Una data source può contenere un solo gestore di blocchi per ogni tipo di filtro. @@ -261,7 +261,7 @@ blockHandlers: every: 10 ``` -The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the subgraph to perform specific operations at regular block intervals. +The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the Subgraph to perform specific operations at regular block intervals. #### Filtro once @@ -276,7 +276,7 @@ blockHandlers: kind: once ``` -Il gestore definito con il filtro once sarà chiamato una sola volta prima dell'esecuzione di tutti gli altri gestori. Questa configurazione consente al subgraph di utilizzare il gestore come gestore di inizializzazione, eseguendo compiti specifici all'inizio dell'indicizzazione. +The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the Subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. ```ts export function handleOnce(block: ethereum.Block): void { @@ -288,7 +288,7 @@ export function handleOnce(block: ethereum.Block): void { ### Funzione di mappatura -The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing subgraph entities in the store, call smart contracts and create or update entities. +The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing Subgraph entities in the store, call smart contracts and create or update entities. ```typescript import { ethereum } from '@graphprotocol/graph-ts' @@ -317,7 +317,7 @@ An event will only be triggered when both the signature and topic 0 match. By de Starting from `specVersion` `0.0.5` and `apiVersion` `0.0.7`, event handlers can have access to the receipt for the transaction which emitted them. -To do so, event handlers must be declared in the subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. +To do so, event handlers must be declared in the Subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. ```yaml eventHandlers: @@ -454,7 +454,7 @@ There are setters and getters like `setString` and `getString` for all value typ ## Blocchi di partenza -The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. +The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a Subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. ```yaml dataSources: @@ -488,13 +488,13 @@ dataSources: ## Indexer Hints -The `indexerHints` setting in a subgraph's manifest provides directives for indexers on processing and managing a subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. +The `indexerHints` setting in a Subgraph's manifest provides directives for indexers on processing and managing a Subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. > This feature is available from `specVersion: 1.0.0` ### Prune -`indexerHints.prune`: Defines the retention of historical block data for a subgraph. Options include: +`indexerHints.prune`: Defines the retention of historical block data for a Subgraph. Options include: 1. `"never"`: No pruning of historical data; retains the entire history. 2. `"auto"`: Retains the minimum necessary history as set by the indexer, optimizing query performance. @@ -505,19 +505,19 @@ The `indexerHints` setting in a subgraph's manifest provides directives for inde prune: auto ``` -> The term "history" in this context of subgraphs is about storing data that reflects the old states of mutable entities. +> The term "history" in this context of Subgraphs is about storing data that reflects the old states of mutable entities. History as of a given block is required for: -- [Time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the subgraph's history -- Using the subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another subgraph, at that block -- Rewinding the subgraph back to that block +- [Time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the Subgraph's history +- Using the Subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another Subgraph, at that block +- Rewinding the Subgraph back to that block If historical data as of the block has been pruned, the above capabilities will not be available. > Using `"auto"` is generally recommended as it maximizes query performance and is sufficient for most users who do not require access to extensive historical data. -For subgraphs leveraging [time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your subgraph's settings: +For Subgraphs leveraging [time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your Subgraph's settings: To retain a specific amount of historical data: @@ -532,3 +532,17 @@ To preserve the complete history of entity states: indexerHints: prune: never ``` + +## SpecVersion Releases + +| Versione | Note di rilascio | +| :------: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune Subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From 1f353c8f9cee8a1d0cae512d49a242399c3d304b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:22:33 -0500 Subject: [PATCH 0809/1789] New translations subgraph-manifest.mdx (Japanese) --- .../developing/creating/subgraph-manifest.mdx | 88 +++++++++++-------- 1 file changed, 51 insertions(+), 37 deletions(-) diff --git a/website/src/pages/ja/subgraphs/developing/creating/subgraph-manifest.mdx b/website/src/pages/ja/subgraphs/developing/creating/subgraph-manifest.mdx index 1fc82b54930d..e34caa492832 100644 --- a/website/src/pages/ja/subgraphs/developing/creating/subgraph-manifest.mdx +++ b/website/src/pages/ja/subgraphs/developing/creating/subgraph-manifest.mdx @@ -4,19 +4,19 @@ title: Subgraph Manifest ## 概要 -The subgraph manifest, `subgraph.yaml`, defines the smart contracts & network your subgraph will index, the events from these contracts to pay attention to, and how to map event data to entities that Graph Node stores and allows to query. +The Subgraph manifest, `subgraph.yaml`, defines the smart contracts & network your Subgraph will index, the events from these contracts to pay attention to, and how to map event data to entities that Graph Node stores and allows to query. -The **subgraph definition** consists of the following files: +The **Subgraph definition** consists of the following files: -- `subgraph.yaml`: Contains the subgraph manifest +- `subgraph.yaml`: Contains the Subgraph manifest -- `schema.graphql`: A GraphQL schema defining the data stored for your subgraph and how to query it via GraphQL +- `schema.graphql`: A GraphQL schema defining the data stored for your Subgraph and how to query it via GraphQL - `mapping.ts`: [AssemblyScript Mappings](https://github.com/AssemblyScript/assemblyscript) code that translates event data into entities defined in your schema (e.g. `mapping.ts` in this guide) ### Subgraph Capabilities -A single subgraph can: +A single Subgraph can: - Index data from multiple smart contracts (but not multiple networks). @@ -24,9 +24,9 @@ A single subgraph can: - Add an entry for each contract that requires indexing to the `dataSources` array. -The full specification for subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). +The full specification for Subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). -For the example subgraph listed above, `subgraph.yaml` is: +For the example Subgraph listed above, `subgraph.yaml` is: ```yaml specVersion: 0.0.4 @@ -79,47 +79,47 @@ dataSources: ## Subgraph Entries -> Important Note: Be sure you populate your subgraph manifest with all handlers and [entities](/subgraphs/developing/creating/ql-schema/). +> Important Note: Be sure you populate your Subgraph manifest with all handlers and [entities](/subgraphs/developing/creating/ql-schema/). マニフェストを更新する重要な項目は以下の通りです: -- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. +- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the Subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. -- `description`: a human-readable description of what the subgraph is. This description is displayed in Graph Explorer when the subgraph is deployed to Subgraph Studio. +- `description`: a human-readable description of what the Subgraph is. This description is displayed in Graph Explorer when the Subgraph is deployed to Subgraph Studio. -- `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed in Graph Explorer. +- `repository`: the URL of the repository where the Subgraph manifest can be found. This is also displayed in Graph Explorer. - `features`: a list of all used [feature](#experimental-features) names. -- `indexerHints.prune`: Defines the retention of historical block data for a subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. +- `indexerHints.prune`: Defines the retention of historical block data for a Subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. -- `dataSources.source`: the address of the smart contract the subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. +- `dataSources.source`: the address of the smart contract the Subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. - `dataSources.source.startBlock`: the optional number of the block that the data source starts indexing from. In most cases, we suggest using the block in which the contract was created. - `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. -- `dataSources.context`: key-value pairs that can be used within subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for subgraph development. +- `dataSources.context`: key-value pairs that can be used within Subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for Subgraph development. - `dataSources.mapping.entities`: the entities that the data source writes to the store. The schema for each entity is defined in the schema.graphql file. - `dataSources.mapping.abis`: one or more named ABI files for the source contract as well as any other smart contracts that you interact with from within the mappings. -- `dataSources.mapping.eventHandlers`: lists the smart contract events this subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. +- `dataSources.mapping.eventHandlers`: lists the smart contract events this Subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. -- `dataSources.mapping.callHandlers`: lists the smart contract functions this subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. +- `dataSources.mapping.callHandlers`: lists the smart contract functions this Subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. -- `dataSources.mapping.blockHandlers`: lists the blocks this subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. +- `dataSources.mapping.blockHandlers`: lists the blocks this Subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. -A single subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. +A single Subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. ## Event Handlers -Event handlers in a subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the subgraph's manifest. This enables subgraphs to process and store event data according to defined logic. +Event handlers in a Subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the Subgraph's manifest. This enables Subgraphs to process and store event data according to defined logic. ### Defining an Event Handler -An event handler is declared within a data source in the subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. +An event handler is declared within a data source in the Subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. ```yaml dataSources: @@ -149,11 +149,11 @@ dataSources: ## コールハンドラー -While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. +While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a Subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. コールハンドラーは、次の 2 つのケースのいずれかでのみトリガされます:指定された関数がコントラクト自身以外のアカウントから呼び出された場合、または Solidity で外部としてマークされ、同じコントラクト内の別の関数の一部として呼び出された場合。 -> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. +> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a Subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. ### コールハンドラーの定義 @@ -186,7 +186,7 @@ The `function` is the normalized function signature to filter calls by. The `han ### マッピング関数 -Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: +Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example Subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: ```typescript import { CreateGravatarCall } from '../generated/Gravity/Gravity' @@ -205,7 +205,7 @@ The `handleCreateGravatar` function takes a new `CreateGravatarCall` which is a ## ブロック・ハンドラー -コントラクトイベントやファンクションコールの購読に加えて、サブグラフは、新しいブロックがチェーンに追加されると、そのデータを更新したい場合があります。これを実現するために、サブグラフは各ブロックの後、あるいは事前に定義されたフィルタにマッチしたブロックの後に、関数を実行することができます。 +In addition to subscribing to contract events or function calls, a Subgraph may want to update its data as new blocks are appended to the chain. To achieve this a Subgraph can run a function after every block or after blocks that match a pre-defined filter. ### 対応フィルター @@ -218,7 +218,7 @@ filter: _The defined handler will be called once for every block which contains a call to the contract (data source) the handler is defined under._ -> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. +> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a Subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. ブロックハンドラーにフィルターがない場合、ハンドラーはブロックごとに呼び出されます。1 つのデータソースには、各フィルタータイプに対して 1 つのブロックハンドラーしか含めることができません。 @@ -261,7 +261,7 @@ blockHandlers: every: 10 ``` -The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the subgraph to perform specific operations at regular block intervals. +The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the Subgraph to perform specific operations at regular block intervals. #### ワンスフィルター @@ -276,7 +276,7 @@ blockHandlers: kind: once ``` -Once フィルターを使用して定義されたハンドラーは、他のすべてのハンドラーが実行される前に 1 回だけ呼び出されます。 この構成により、サブグラフはハンドラーを初期化ハンドラーとして使用し、インデックス作成の開始時に特定のタスクを実行できるようになります。 +The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the Subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. ```ts export function handleOnce(block: ethereum.Block): void { @@ -288,7 +288,7 @@ export function handleOnce(block: ethereum.Block): void { ### マッピング関数 -The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing subgraph entities in the store, call smart contracts and create or update entities. +The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing Subgraph entities in the store, call smart contracts and create or update entities. ```typescript import { ethereum } from '@graphprotocol/graph-ts' @@ -317,7 +317,7 @@ An event will only be triggered when both the signature and topic 0 match. By de Starting from `specVersion` `0.0.5` and `apiVersion` `0.0.7`, event handlers can have access to the receipt for the transaction which emitted them. -To do so, event handlers must be declared in the subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. +To do so, event handlers must be declared in the Subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. ```yaml eventHandlers: @@ -454,7 +454,7 @@ There are setters and getters like `setString` and `getString` for all value typ ## スタートブロック(start Blocks) -The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. +The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a Subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. ```yaml dataSources: @@ -488,13 +488,13 @@ dataSources: ## Indexer Hints -The `indexerHints` setting in a subgraph's manifest provides directives for indexers on processing and managing a subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. +The `indexerHints` setting in a Subgraph's manifest provides directives for indexers on processing and managing a Subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. > This feature is available from `specVersion: 1.0.0` ### Prune -`indexerHints.prune`: Defines the retention of historical block data for a subgraph. Options include: +`indexerHints.prune`: Defines the retention of historical block data for a Subgraph. Options include: 1. `"never"`: No pruning of historical data; retains the entire history. 2. `"auto"`: Retains the minimum necessary history as set by the indexer, optimizing query performance. @@ -505,19 +505,19 @@ The `indexerHints` setting in a subgraph's manifest provides directives for inde prune: auto ``` -> The term "history" in this context of subgraphs is about storing data that reflects the old states of mutable entities. +> The term "history" in this context of Subgraphs is about storing data that reflects the old states of mutable entities. History as of a given block is required for: -- [Time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the subgraph's history -- Using the subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another subgraph, at that block -- Rewinding the subgraph back to that block +- [Time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the Subgraph's history +- Using the Subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another Subgraph, at that block +- Rewinding the Subgraph back to that block If historical data as of the block has been pruned, the above capabilities will not be available. > Using `"auto"` is generally recommended as it maximizes query performance and is sufficient for most users who do not require access to extensive historical data. -For subgraphs leveraging [time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your subgraph's settings: +For Subgraphs leveraging [time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your Subgraph's settings: To retain a specific amount of historical data: @@ -532,3 +532,17 @@ To preserve the complete history of entity states: indexerHints: prune: never ``` + +## SpecVersion Releases + +| バージョン | リリースノート | +| :---: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune Subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From cd16ed4b00ae4e6f956ae18c2a125e61d41a1fd2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:22:34 -0500 Subject: [PATCH 0810/1789] New translations subgraph-manifest.mdx (Korean) --- .../developing/creating/subgraph-manifest.mdx | 88 +++++++++++-------- 1 file changed, 51 insertions(+), 37 deletions(-) diff --git a/website/src/pages/ko/subgraphs/developing/creating/subgraph-manifest.mdx b/website/src/pages/ko/subgraphs/developing/creating/subgraph-manifest.mdx index a42a50973690..428ff5332baf 100644 --- a/website/src/pages/ko/subgraphs/developing/creating/subgraph-manifest.mdx +++ b/website/src/pages/ko/subgraphs/developing/creating/subgraph-manifest.mdx @@ -4,19 +4,19 @@ title: Subgraph Manifest ## Overview -The subgraph manifest, `subgraph.yaml`, defines the smart contracts & network your subgraph will index, the events from these contracts to pay attention to, and how to map event data to entities that Graph Node stores and allows to query. +The Subgraph manifest, `subgraph.yaml`, defines the smart contracts & network your Subgraph will index, the events from these contracts to pay attention to, and how to map event data to entities that Graph Node stores and allows to query. -The **subgraph definition** consists of the following files: +The **Subgraph definition** consists of the following files: -- `subgraph.yaml`: Contains the subgraph manifest +- `subgraph.yaml`: Contains the Subgraph manifest -- `schema.graphql`: A GraphQL schema defining the data stored for your subgraph and how to query it via GraphQL +- `schema.graphql`: A GraphQL schema defining the data stored for your Subgraph and how to query it via GraphQL - `mapping.ts`: [AssemblyScript Mappings](https://github.com/AssemblyScript/assemblyscript) code that translates event data into entities defined in your schema (e.g. `mapping.ts` in this guide) ### Subgraph Capabilities -A single subgraph can: +A single Subgraph can: - Index data from multiple smart contracts (but not multiple networks). @@ -24,9 +24,9 @@ A single subgraph can: - Add an entry for each contract that requires indexing to the `dataSources` array. -The full specification for subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). +The full specification for Subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). -For the example subgraph listed above, `subgraph.yaml` is: +For the example Subgraph listed above, `subgraph.yaml` is: ```yaml specVersion: 0.0.4 @@ -79,47 +79,47 @@ dataSources: ## Subgraph Entries -> Important Note: Be sure you populate your subgraph manifest with all handlers and [entities](/subgraphs/developing/creating/ql-schema/). +> Important Note: Be sure you populate your Subgraph manifest with all handlers and [entities](/subgraphs/developing/creating/ql-schema/). The important entries to update for the manifest are: -- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. +- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the Subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. -- `description`: a human-readable description of what the subgraph is. This description is displayed in Graph Explorer when the subgraph is deployed to Subgraph Studio. +- `description`: a human-readable description of what the Subgraph is. This description is displayed in Graph Explorer when the Subgraph is deployed to Subgraph Studio. -- `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed in Graph Explorer. +- `repository`: the URL of the repository where the Subgraph manifest can be found. This is also displayed in Graph Explorer. - `features`: a list of all used [feature](#experimental-features) names. -- `indexerHints.prune`: Defines the retention of historical block data for a subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. +- `indexerHints.prune`: Defines the retention of historical block data for a Subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. -- `dataSources.source`: the address of the smart contract the subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. +- `dataSources.source`: the address of the smart contract the Subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. - `dataSources.source.startBlock`: the optional number of the block that the data source starts indexing from. In most cases, we suggest using the block in which the contract was created. - `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. -- `dataSources.context`: key-value pairs that can be used within subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for subgraph development. +- `dataSources.context`: key-value pairs that can be used within Subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for Subgraph development. - `dataSources.mapping.entities`: the entities that the data source writes to the store. The schema for each entity is defined in the schema.graphql file. - `dataSources.mapping.abis`: one or more named ABI files for the source contract as well as any other smart contracts that you interact with from within the mappings. -- `dataSources.mapping.eventHandlers`: lists the smart contract events this subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. +- `dataSources.mapping.eventHandlers`: lists the smart contract events this Subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. -- `dataSources.mapping.callHandlers`: lists the smart contract functions this subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. +- `dataSources.mapping.callHandlers`: lists the smart contract functions this Subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. -- `dataSources.mapping.blockHandlers`: lists the blocks this subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. +- `dataSources.mapping.blockHandlers`: lists the blocks this Subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. -A single subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. +A single Subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. ## Event Handlers -Event handlers in a subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the subgraph's manifest. This enables subgraphs to process and store event data according to defined logic. +Event handlers in a Subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the Subgraph's manifest. This enables Subgraphs to process and store event data according to defined logic. ### Defining an Event Handler -An event handler is declared within a data source in the subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. +An event handler is declared within a data source in the Subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. ```yaml dataSources: @@ -149,11 +149,11 @@ dataSources: ## Call Handlers -While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. +While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a Subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. Call handlers will only trigger in one of two cases: when the function specified is called by an account other than the contract itself or when it is marked as external in Solidity and called as part of another function in the same contract. -> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. +> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a Subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. ### Defining a Call Handler @@ -186,7 +186,7 @@ The `function` is the normalized function signature to filter calls by. The `han ### Mapping Function -Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: +Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example Subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: ```typescript import { CreateGravatarCall } from '../generated/Gravity/Gravity' @@ -205,7 +205,7 @@ The `handleCreateGravatar` function takes a new `CreateGravatarCall` which is a ## Block Handlers -In addition to subscribing to contract events or function calls, a subgraph may want to update its data as new blocks are appended to the chain. To achieve this a subgraph can run a function after every block or after blocks that match a pre-defined filter. +In addition to subscribing to contract events or function calls, a Subgraph may want to update its data as new blocks are appended to the chain. To achieve this a Subgraph can run a function after every block or after blocks that match a pre-defined filter. ### Supported Filters @@ -218,7 +218,7 @@ filter: _The defined handler will be called once for every block which contains a call to the contract (data source) the handler is defined under._ -> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. +> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a Subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. The absence of a filter for a block handler will ensure that the handler is called every block. A data source can only contain one block handler for each filter type. @@ -261,7 +261,7 @@ blockHandlers: every: 10 ``` -The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the subgraph to perform specific operations at regular block intervals. +The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the Subgraph to perform specific operations at regular block intervals. #### Once Filter @@ -276,7 +276,7 @@ blockHandlers: kind: once ``` -The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. +The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the Subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. ```ts export function handleOnce(block: ethereum.Block): void { @@ -288,7 +288,7 @@ export function handleOnce(block: ethereum.Block): void { ### Mapping Function -The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing subgraph entities in the store, call smart contracts and create or update entities. +The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing Subgraph entities in the store, call smart contracts and create or update entities. ```typescript import { ethereum } from '@graphprotocol/graph-ts' @@ -317,7 +317,7 @@ An event will only be triggered when both the signature and topic 0 match. By de Starting from `specVersion` `0.0.5` and `apiVersion` `0.0.7`, event handlers can have access to the receipt for the transaction which emitted them. -To do so, event handlers must be declared in the subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. +To do so, event handlers must be declared in the Subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. ```yaml eventHandlers: @@ -454,7 +454,7 @@ There are setters and getters like `setString` and `getString` for all value typ ## Start Blocks -The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. +The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a Subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. ```yaml dataSources: @@ -488,13 +488,13 @@ dataSources: ## Indexer Hints -The `indexerHints` setting in a subgraph's manifest provides directives for indexers on processing and managing a subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. +The `indexerHints` setting in a Subgraph's manifest provides directives for indexers on processing and managing a Subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. > This feature is available from `specVersion: 1.0.0` ### Prune -`indexerHints.prune`: Defines the retention of historical block data for a subgraph. Options include: +`indexerHints.prune`: Defines the retention of historical block data for a Subgraph. Options include: 1. `"never"`: No pruning of historical data; retains the entire history. 2. `"auto"`: Retains the minimum necessary history as set by the indexer, optimizing query performance. @@ -505,19 +505,19 @@ The `indexerHints` setting in a subgraph's manifest provides directives for inde prune: auto ``` -> The term "history" in this context of subgraphs is about storing data that reflects the old states of mutable entities. +> The term "history" in this context of Subgraphs is about storing data that reflects the old states of mutable entities. History as of a given block is required for: -- [Time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the subgraph's history -- Using the subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another subgraph, at that block -- Rewinding the subgraph back to that block +- [Time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the Subgraph's history +- Using the Subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another Subgraph, at that block +- Rewinding the Subgraph back to that block If historical data as of the block has been pruned, the above capabilities will not be available. > Using `"auto"` is generally recommended as it maximizes query performance and is sufficient for most users who do not require access to extensive historical data. -For subgraphs leveraging [time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your subgraph's settings: +For Subgraphs leveraging [time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your Subgraph's settings: To retain a specific amount of historical data: @@ -532,3 +532,17 @@ To preserve the complete history of entity states: indexerHints: prune: never ``` + +## SpecVersion Releases + +| Version | Release notes | +| :-----: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune Subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From 10289c9f01e7f75a9ba1f36b806f9a236514ba47 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:22:35 -0500 Subject: [PATCH 0811/1789] New translations subgraph-manifest.mdx (Dutch) --- .../developing/creating/subgraph-manifest.mdx | 88 +++++++++++-------- 1 file changed, 51 insertions(+), 37 deletions(-) diff --git a/website/src/pages/nl/subgraphs/developing/creating/subgraph-manifest.mdx b/website/src/pages/nl/subgraphs/developing/creating/subgraph-manifest.mdx index a42a50973690..428ff5332baf 100644 --- a/website/src/pages/nl/subgraphs/developing/creating/subgraph-manifest.mdx +++ b/website/src/pages/nl/subgraphs/developing/creating/subgraph-manifest.mdx @@ -4,19 +4,19 @@ title: Subgraph Manifest ## Overview -The subgraph manifest, `subgraph.yaml`, defines the smart contracts & network your subgraph will index, the events from these contracts to pay attention to, and how to map event data to entities that Graph Node stores and allows to query. +The Subgraph manifest, `subgraph.yaml`, defines the smart contracts & network your Subgraph will index, the events from these contracts to pay attention to, and how to map event data to entities that Graph Node stores and allows to query. -The **subgraph definition** consists of the following files: +The **Subgraph definition** consists of the following files: -- `subgraph.yaml`: Contains the subgraph manifest +- `subgraph.yaml`: Contains the Subgraph manifest -- `schema.graphql`: A GraphQL schema defining the data stored for your subgraph and how to query it via GraphQL +- `schema.graphql`: A GraphQL schema defining the data stored for your Subgraph and how to query it via GraphQL - `mapping.ts`: [AssemblyScript Mappings](https://github.com/AssemblyScript/assemblyscript) code that translates event data into entities defined in your schema (e.g. `mapping.ts` in this guide) ### Subgraph Capabilities -A single subgraph can: +A single Subgraph can: - Index data from multiple smart contracts (but not multiple networks). @@ -24,9 +24,9 @@ A single subgraph can: - Add an entry for each contract that requires indexing to the `dataSources` array. -The full specification for subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). +The full specification for Subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). -For the example subgraph listed above, `subgraph.yaml` is: +For the example Subgraph listed above, `subgraph.yaml` is: ```yaml specVersion: 0.0.4 @@ -79,47 +79,47 @@ dataSources: ## Subgraph Entries -> Important Note: Be sure you populate your subgraph manifest with all handlers and [entities](/subgraphs/developing/creating/ql-schema/). +> Important Note: Be sure you populate your Subgraph manifest with all handlers and [entities](/subgraphs/developing/creating/ql-schema/). The important entries to update for the manifest are: -- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. +- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the Subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. -- `description`: a human-readable description of what the subgraph is. This description is displayed in Graph Explorer when the subgraph is deployed to Subgraph Studio. +- `description`: a human-readable description of what the Subgraph is. This description is displayed in Graph Explorer when the Subgraph is deployed to Subgraph Studio. -- `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed in Graph Explorer. +- `repository`: the URL of the repository where the Subgraph manifest can be found. This is also displayed in Graph Explorer. - `features`: a list of all used [feature](#experimental-features) names. -- `indexerHints.prune`: Defines the retention of historical block data for a subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. +- `indexerHints.prune`: Defines the retention of historical block data for a Subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. -- `dataSources.source`: the address of the smart contract the subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. +- `dataSources.source`: the address of the smart contract the Subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. - `dataSources.source.startBlock`: the optional number of the block that the data source starts indexing from. In most cases, we suggest using the block in which the contract was created. - `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. -- `dataSources.context`: key-value pairs that can be used within subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for subgraph development. +- `dataSources.context`: key-value pairs that can be used within Subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for Subgraph development. - `dataSources.mapping.entities`: the entities that the data source writes to the store. The schema for each entity is defined in the schema.graphql file. - `dataSources.mapping.abis`: one or more named ABI files for the source contract as well as any other smart contracts that you interact with from within the mappings. -- `dataSources.mapping.eventHandlers`: lists the smart contract events this subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. +- `dataSources.mapping.eventHandlers`: lists the smart contract events this Subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. -- `dataSources.mapping.callHandlers`: lists the smart contract functions this subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. +- `dataSources.mapping.callHandlers`: lists the smart contract functions this Subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. -- `dataSources.mapping.blockHandlers`: lists the blocks this subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. +- `dataSources.mapping.blockHandlers`: lists the blocks this Subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. -A single subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. +A single Subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. ## Event Handlers -Event handlers in a subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the subgraph's manifest. This enables subgraphs to process and store event data according to defined logic. +Event handlers in a Subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the Subgraph's manifest. This enables Subgraphs to process and store event data according to defined logic. ### Defining an Event Handler -An event handler is declared within a data source in the subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. +An event handler is declared within a data source in the Subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. ```yaml dataSources: @@ -149,11 +149,11 @@ dataSources: ## Call Handlers -While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. +While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a Subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. Call handlers will only trigger in one of two cases: when the function specified is called by an account other than the contract itself or when it is marked as external in Solidity and called as part of another function in the same contract. -> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. +> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a Subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. ### Defining a Call Handler @@ -186,7 +186,7 @@ The `function` is the normalized function signature to filter calls by. The `han ### Mapping Function -Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: +Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example Subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: ```typescript import { CreateGravatarCall } from '../generated/Gravity/Gravity' @@ -205,7 +205,7 @@ The `handleCreateGravatar` function takes a new `CreateGravatarCall` which is a ## Block Handlers -In addition to subscribing to contract events or function calls, a subgraph may want to update its data as new blocks are appended to the chain. To achieve this a subgraph can run a function after every block or after blocks that match a pre-defined filter. +In addition to subscribing to contract events or function calls, a Subgraph may want to update its data as new blocks are appended to the chain. To achieve this a Subgraph can run a function after every block or after blocks that match a pre-defined filter. ### Supported Filters @@ -218,7 +218,7 @@ filter: _The defined handler will be called once for every block which contains a call to the contract (data source) the handler is defined under._ -> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. +> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a Subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. The absence of a filter for a block handler will ensure that the handler is called every block. A data source can only contain one block handler for each filter type. @@ -261,7 +261,7 @@ blockHandlers: every: 10 ``` -The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the subgraph to perform specific operations at regular block intervals. +The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the Subgraph to perform specific operations at regular block intervals. #### Once Filter @@ -276,7 +276,7 @@ blockHandlers: kind: once ``` -The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. +The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the Subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. ```ts export function handleOnce(block: ethereum.Block): void { @@ -288,7 +288,7 @@ export function handleOnce(block: ethereum.Block): void { ### Mapping Function -The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing subgraph entities in the store, call smart contracts and create or update entities. +The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing Subgraph entities in the store, call smart contracts and create or update entities. ```typescript import { ethereum } from '@graphprotocol/graph-ts' @@ -317,7 +317,7 @@ An event will only be triggered when both the signature and topic 0 match. By de Starting from `specVersion` `0.0.5` and `apiVersion` `0.0.7`, event handlers can have access to the receipt for the transaction which emitted them. -To do so, event handlers must be declared in the subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. +To do so, event handlers must be declared in the Subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. ```yaml eventHandlers: @@ -454,7 +454,7 @@ There are setters and getters like `setString` and `getString` for all value typ ## Start Blocks -The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. +The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a Subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. ```yaml dataSources: @@ -488,13 +488,13 @@ dataSources: ## Indexer Hints -The `indexerHints` setting in a subgraph's manifest provides directives for indexers on processing and managing a subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. +The `indexerHints` setting in a Subgraph's manifest provides directives for indexers on processing and managing a Subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. > This feature is available from `specVersion: 1.0.0` ### Prune -`indexerHints.prune`: Defines the retention of historical block data for a subgraph. Options include: +`indexerHints.prune`: Defines the retention of historical block data for a Subgraph. Options include: 1. `"never"`: No pruning of historical data; retains the entire history. 2. `"auto"`: Retains the minimum necessary history as set by the indexer, optimizing query performance. @@ -505,19 +505,19 @@ The `indexerHints` setting in a subgraph's manifest provides directives for inde prune: auto ``` -> The term "history" in this context of subgraphs is about storing data that reflects the old states of mutable entities. +> The term "history" in this context of Subgraphs is about storing data that reflects the old states of mutable entities. History as of a given block is required for: -- [Time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the subgraph's history -- Using the subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another subgraph, at that block -- Rewinding the subgraph back to that block +- [Time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the Subgraph's history +- Using the Subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another Subgraph, at that block +- Rewinding the Subgraph back to that block If historical data as of the block has been pruned, the above capabilities will not be available. > Using `"auto"` is generally recommended as it maximizes query performance and is sufficient for most users who do not require access to extensive historical data. -For subgraphs leveraging [time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your subgraph's settings: +For Subgraphs leveraging [time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your Subgraph's settings: To retain a specific amount of historical data: @@ -532,3 +532,17 @@ To preserve the complete history of entity states: indexerHints: prune: never ``` + +## SpecVersion Releases + +| Version | Release notes | +| :-----: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune Subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From f9c411cde4919453d9103f590821304843249e49 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:22:36 -0500 Subject: [PATCH 0812/1789] New translations subgraph-manifest.mdx (Polish) --- .../developing/creating/subgraph-manifest.mdx | 88 +++++++++++-------- 1 file changed, 51 insertions(+), 37 deletions(-) diff --git a/website/src/pages/pl/subgraphs/developing/creating/subgraph-manifest.mdx b/website/src/pages/pl/subgraphs/developing/creating/subgraph-manifest.mdx index a42a50973690..428ff5332baf 100644 --- a/website/src/pages/pl/subgraphs/developing/creating/subgraph-manifest.mdx +++ b/website/src/pages/pl/subgraphs/developing/creating/subgraph-manifest.mdx @@ -4,19 +4,19 @@ title: Subgraph Manifest ## Overview -The subgraph manifest, `subgraph.yaml`, defines the smart contracts & network your subgraph will index, the events from these contracts to pay attention to, and how to map event data to entities that Graph Node stores and allows to query. +The Subgraph manifest, `subgraph.yaml`, defines the smart contracts & network your Subgraph will index, the events from these contracts to pay attention to, and how to map event data to entities that Graph Node stores and allows to query. -The **subgraph definition** consists of the following files: +The **Subgraph definition** consists of the following files: -- `subgraph.yaml`: Contains the subgraph manifest +- `subgraph.yaml`: Contains the Subgraph manifest -- `schema.graphql`: A GraphQL schema defining the data stored for your subgraph and how to query it via GraphQL +- `schema.graphql`: A GraphQL schema defining the data stored for your Subgraph and how to query it via GraphQL - `mapping.ts`: [AssemblyScript Mappings](https://github.com/AssemblyScript/assemblyscript) code that translates event data into entities defined in your schema (e.g. `mapping.ts` in this guide) ### Subgraph Capabilities -A single subgraph can: +A single Subgraph can: - Index data from multiple smart contracts (but not multiple networks). @@ -24,9 +24,9 @@ A single subgraph can: - Add an entry for each contract that requires indexing to the `dataSources` array. -The full specification for subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). +The full specification for Subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). -For the example subgraph listed above, `subgraph.yaml` is: +For the example Subgraph listed above, `subgraph.yaml` is: ```yaml specVersion: 0.0.4 @@ -79,47 +79,47 @@ dataSources: ## Subgraph Entries -> Important Note: Be sure you populate your subgraph manifest with all handlers and [entities](/subgraphs/developing/creating/ql-schema/). +> Important Note: Be sure you populate your Subgraph manifest with all handlers and [entities](/subgraphs/developing/creating/ql-schema/). The important entries to update for the manifest are: -- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. +- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the Subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. -- `description`: a human-readable description of what the subgraph is. This description is displayed in Graph Explorer when the subgraph is deployed to Subgraph Studio. +- `description`: a human-readable description of what the Subgraph is. This description is displayed in Graph Explorer when the Subgraph is deployed to Subgraph Studio. -- `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed in Graph Explorer. +- `repository`: the URL of the repository where the Subgraph manifest can be found. This is also displayed in Graph Explorer. - `features`: a list of all used [feature](#experimental-features) names. -- `indexerHints.prune`: Defines the retention of historical block data for a subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. +- `indexerHints.prune`: Defines the retention of historical block data for a Subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. -- `dataSources.source`: the address of the smart contract the subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. +- `dataSources.source`: the address of the smart contract the Subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. - `dataSources.source.startBlock`: the optional number of the block that the data source starts indexing from. In most cases, we suggest using the block in which the contract was created. - `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. -- `dataSources.context`: key-value pairs that can be used within subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for subgraph development. +- `dataSources.context`: key-value pairs that can be used within Subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for Subgraph development. - `dataSources.mapping.entities`: the entities that the data source writes to the store. The schema for each entity is defined in the schema.graphql file. - `dataSources.mapping.abis`: one or more named ABI files for the source contract as well as any other smart contracts that you interact with from within the mappings. -- `dataSources.mapping.eventHandlers`: lists the smart contract events this subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. +- `dataSources.mapping.eventHandlers`: lists the smart contract events this Subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. -- `dataSources.mapping.callHandlers`: lists the smart contract functions this subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. +- `dataSources.mapping.callHandlers`: lists the smart contract functions this Subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. -- `dataSources.mapping.blockHandlers`: lists the blocks this subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. +- `dataSources.mapping.blockHandlers`: lists the blocks this Subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. -A single subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. +A single Subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. ## Event Handlers -Event handlers in a subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the subgraph's manifest. This enables subgraphs to process and store event data according to defined logic. +Event handlers in a Subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the Subgraph's manifest. This enables Subgraphs to process and store event data according to defined logic. ### Defining an Event Handler -An event handler is declared within a data source in the subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. +An event handler is declared within a data source in the Subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. ```yaml dataSources: @@ -149,11 +149,11 @@ dataSources: ## Call Handlers -While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. +While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a Subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. Call handlers will only trigger in one of two cases: when the function specified is called by an account other than the contract itself or when it is marked as external in Solidity and called as part of another function in the same contract. -> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. +> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a Subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. ### Defining a Call Handler @@ -186,7 +186,7 @@ The `function` is the normalized function signature to filter calls by. The `han ### Mapping Function -Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: +Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example Subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: ```typescript import { CreateGravatarCall } from '../generated/Gravity/Gravity' @@ -205,7 +205,7 @@ The `handleCreateGravatar` function takes a new `CreateGravatarCall` which is a ## Block Handlers -In addition to subscribing to contract events or function calls, a subgraph may want to update its data as new blocks are appended to the chain. To achieve this a subgraph can run a function after every block or after blocks that match a pre-defined filter. +In addition to subscribing to contract events or function calls, a Subgraph may want to update its data as new blocks are appended to the chain. To achieve this a Subgraph can run a function after every block or after blocks that match a pre-defined filter. ### Supported Filters @@ -218,7 +218,7 @@ filter: _The defined handler will be called once for every block which contains a call to the contract (data source) the handler is defined under._ -> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. +> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a Subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. The absence of a filter for a block handler will ensure that the handler is called every block. A data source can only contain one block handler for each filter type. @@ -261,7 +261,7 @@ blockHandlers: every: 10 ``` -The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the subgraph to perform specific operations at regular block intervals. +The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the Subgraph to perform specific operations at regular block intervals. #### Once Filter @@ -276,7 +276,7 @@ blockHandlers: kind: once ``` -The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. +The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the Subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. ```ts export function handleOnce(block: ethereum.Block): void { @@ -288,7 +288,7 @@ export function handleOnce(block: ethereum.Block): void { ### Mapping Function -The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing subgraph entities in the store, call smart contracts and create or update entities. +The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing Subgraph entities in the store, call smart contracts and create or update entities. ```typescript import { ethereum } from '@graphprotocol/graph-ts' @@ -317,7 +317,7 @@ An event will only be triggered when both the signature and topic 0 match. By de Starting from `specVersion` `0.0.5` and `apiVersion` `0.0.7`, event handlers can have access to the receipt for the transaction which emitted them. -To do so, event handlers must be declared in the subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. +To do so, event handlers must be declared in the Subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. ```yaml eventHandlers: @@ -454,7 +454,7 @@ There are setters and getters like `setString` and `getString` for all value typ ## Start Blocks -The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. +The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a Subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. ```yaml dataSources: @@ -488,13 +488,13 @@ dataSources: ## Indexer Hints -The `indexerHints` setting in a subgraph's manifest provides directives for indexers on processing and managing a subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. +The `indexerHints` setting in a Subgraph's manifest provides directives for indexers on processing and managing a Subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. > This feature is available from `specVersion: 1.0.0` ### Prune -`indexerHints.prune`: Defines the retention of historical block data for a subgraph. Options include: +`indexerHints.prune`: Defines the retention of historical block data for a Subgraph. Options include: 1. `"never"`: No pruning of historical data; retains the entire history. 2. `"auto"`: Retains the minimum necessary history as set by the indexer, optimizing query performance. @@ -505,19 +505,19 @@ The `indexerHints` setting in a subgraph's manifest provides directives for inde prune: auto ``` -> The term "history" in this context of subgraphs is about storing data that reflects the old states of mutable entities. +> The term "history" in this context of Subgraphs is about storing data that reflects the old states of mutable entities. History as of a given block is required for: -- [Time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the subgraph's history -- Using the subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another subgraph, at that block -- Rewinding the subgraph back to that block +- [Time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the Subgraph's history +- Using the Subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another Subgraph, at that block +- Rewinding the Subgraph back to that block If historical data as of the block has been pruned, the above capabilities will not be available. > Using `"auto"` is generally recommended as it maximizes query performance and is sufficient for most users who do not require access to extensive historical data. -For subgraphs leveraging [time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your subgraph's settings: +For Subgraphs leveraging [time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your Subgraph's settings: To retain a specific amount of historical data: @@ -532,3 +532,17 @@ To preserve the complete history of entity states: indexerHints: prune: never ``` + +## SpecVersion Releases + +| Version | Release notes | +| :-----: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune Subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From 039390f5b2a56864b9db3ef3b2cb3f99954653ed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:22:38 -0500 Subject: [PATCH 0813/1789] New translations subgraph-manifest.mdx (Portuguese) --- .../developing/creating/subgraph-manifest.mdx | 88 +++++++++++-------- 1 file changed, 51 insertions(+), 37 deletions(-) diff --git a/website/src/pages/pt/subgraphs/developing/creating/subgraph-manifest.mdx b/website/src/pages/pt/subgraphs/developing/creating/subgraph-manifest.mdx index 2a4c3af44fe4..127a7a1a6baf 100644 --- a/website/src/pages/pt/subgraphs/developing/creating/subgraph-manifest.mdx +++ b/website/src/pages/pt/subgraphs/developing/creating/subgraph-manifest.mdx @@ -4,19 +4,19 @@ title: Subgraph Manifest ## Visão geral -The subgraph manifest, `subgraph.yaml`, defines the smart contracts & network your subgraph will index, the events from these contracts to pay attention to, and how to map event data to entities that Graph Node stores and allows to query. +The Subgraph manifest, `subgraph.yaml`, defines the smart contracts & network your Subgraph will index, the events from these contracts to pay attention to, and how to map event data to entities that Graph Node stores and allows to query. -The **subgraph definition** consists of the following files: +The **Subgraph definition** consists of the following files: -- `subgraph.yaml`: Contains the subgraph manifest +- `subgraph.yaml`: Contains the Subgraph manifest -- `schema.graphql`: A GraphQL schema defining the data stored for your subgraph and how to query it via GraphQL +- `schema.graphql`: A GraphQL schema defining the data stored for your Subgraph and how to query it via GraphQL - `mapping.ts`: [AssemblyScript Mappings](https://github.com/AssemblyScript/assemblyscript) code that translates event data into entities defined in your schema (e.g. `mapping.ts` in this guide) ### Subgraph Capabilities -A single subgraph can: +A single Subgraph can: - Index data from multiple smart contracts (but not multiple networks). @@ -24,9 +24,9 @@ A single subgraph can: - Add an entry for each contract that requires indexing to the `dataSources` array. -The full specification for subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). +The full specification for Subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). -For the example subgraph listed above, `subgraph.yaml` is: +For the example Subgraph listed above, `subgraph.yaml` is: ```yaml specVersion: 0.0.4 @@ -79,47 +79,47 @@ dataSources: ## Subgraph Entries -> Important Note: Be sure you populate your subgraph manifest with all handlers and [entities](/subgraphs/developing/creating/ql-schema/). +> Important Note: Be sure you populate your Subgraph manifest with all handlers and [entities](/subgraphs/developing/creating/ql-schema/). As entradas importantes para atualizar para o manifest são: -- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. +- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the Subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. -- `description`: a human-readable description of what the subgraph is. This description is displayed in Graph Explorer when the subgraph is deployed to Subgraph Studio. +- `description`: a human-readable description of what the Subgraph is. This description is displayed in Graph Explorer when the Subgraph is deployed to Subgraph Studio. -- `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed in Graph Explorer. +- `repository`: the URL of the repository where the Subgraph manifest can be found. This is also displayed in Graph Explorer. - `features`: a list of all used [feature](#experimental-features) names. -- `indexerHints.prune`: Defines the retention of historical block data for a subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. +- `indexerHints.prune`: Defines the retention of historical block data for a Subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. -- `dataSources.source`: the address of the smart contract the subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. +- `dataSources.source`: the address of the smart contract the Subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. - `dataSources.source.startBlock`: the optional number of the block that the data source starts indexing from. In most cases, we suggest using the block in which the contract was created. - `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. -- `dataSources.context`: key-value pairs that can be used within subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for subgraph development. +- `dataSources.context`: key-value pairs that can be used within Subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for Subgraph development. - `dataSources.mapping.entities`: the entities that the data source writes to the store. The schema for each entity is defined in the schema.graphql file. - `dataSources.mapping.abis`: one or more named ABI files for the source contract as well as any other smart contracts that you interact with from within the mappings. -- `dataSources.mapping.eventHandlers`: lists the smart contract events this subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. +- `dataSources.mapping.eventHandlers`: lists the smart contract events this Subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. -- `dataSources.mapping.callHandlers`: lists the smart contract functions this subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. +- `dataSources.mapping.callHandlers`: lists the smart contract functions this Subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. -- `dataSources.mapping.blockHandlers`: lists the blocks this subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. +- `dataSources.mapping.blockHandlers`: lists the blocks this Subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. -A single subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. +A single Subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. ## Handlers de Eventos -Handlers de eventos em um subgraph reagem a eventos específicos emitidos por contratos inteligentes na blockchain e acionam handlers definidos no manifest do subgraph. Isto permite que subgraphs processem e armazenem dados conforme a lógica definida. +Event handlers in a Subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the Subgraph's manifest. This enables Subgraphs to process and store event data according to defined logic. ### Como Definir um Handler de Evento -Um handler de evento é declarado dentro de uma fonte de dados na configuração YAML do subgraph. Ele especifica quais eventos devem ser escutados e a função correspondente a ser executada quando estes eventos forem detetados. +An event handler is declared within a data source in the Subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. ```yaml dataSources: @@ -149,11 +149,11 @@ dataSources: ## Handlers de chamada -While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. +While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a Subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. Handlers de chamadas só serão ativados em um de dois casos: quando a função especificada é chamada por uma conta que não for do próprio contrato, ou quando ela é marcada como externa no Solidity e chamada como parte de outra função no mesmo contrato. -> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. +> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a Subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. ### Como Definir um Handler de Chamada @@ -186,7 +186,7 @@ The `function` is the normalized function signature to filter calls by. The `han ### Função de Mapeamento -Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: +Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example Subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: ```typescript import { CreateGravatarCall } from '../generated/Gravity/Gravity' @@ -205,7 +205,7 @@ The `handleCreateGravatar` function takes a new `CreateGravatarCall` which is a ## Handlers de Blocos -Além de se inscrever a eventos de contratos ou chamadas para funções, um subgraph também pode querer atualizar os seus dados enquanto novos blocos são afixados à chain. Para isto, um subgraph pode executar uma função após cada bloco, ou após blocos que correspondem a um filtro predefinido. +In addition to subscribing to contract events or function calls, a Subgraph may want to update its data as new blocks are appended to the chain. To achieve this a Subgraph can run a function after every block or after blocks that match a pre-defined filter. ### Filtros Apoiados @@ -218,7 +218,7 @@ filter: _The defined handler will be called once for every block which contains a call to the contract (data source) the handler is defined under._ -> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. +> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a Subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. A ausência de um filtro para um handler de blocos garantirá que o handler seja chamado a todos os blocos. Uma fonte de dados só pode conter um handler de bloco para cada tipo de filtro. @@ -261,7 +261,7 @@ blockHandlers: every: 10 ``` -The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the subgraph to perform specific operations at regular block intervals. +The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the Subgraph to perform specific operations at regular block intervals. #### Filtro Once @@ -276,7 +276,7 @@ blockHandlers: kind: once ``` -O handler definido com o filtro once só será chamado uma única vez antes da execução de todos os outros handlers (por isto, o nome "once" / "uma vez"). Esta configuração permite que o subgraph use o handler como um handler de inicialização, para realizar tarefas específicas no começo da indexação. +The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the Subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. ```ts export function handleOnce(block: ethereum.Block): void { @@ -288,7 +288,7 @@ export function handleOnce(block: ethereum.Block): void { ### Função de Mapeamento -The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing subgraph entities in the store, call smart contracts and create or update entities. +The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing Subgraph entities in the store, call smart contracts and create or update entities. ```typescript import { ethereum } from '@graphprotocol/graph-ts' @@ -317,7 +317,7 @@ An event will only be triggered when both the signature and topic 0 match. By de Starting from `specVersion` `0.0.5` and `apiVersion` `0.0.7`, event handlers can have access to the receipt for the transaction which emitted them. -To do so, event handlers must be declared in the subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. +To do so, event handlers must be declared in the Subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. ```yaml eventHandlers: @@ -454,7 +454,7 @@ There are setters and getters like `setString` and `getString` for all value typ ## Blocos Iniciais -The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. +The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a Subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. ```yaml dataSources: @@ -488,13 +488,13 @@ dataSources: ## IndexerHints -The `indexerHints` setting in a subgraph's manifest provides directives for indexers on processing and managing a subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. +The `indexerHints` setting in a Subgraph's manifest provides directives for indexers on processing and managing a Subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. > This feature is available from `specVersion: 1.0.0` ### Prune -`indexerHints.prune`: Defines the retention of historical block data for a subgraph. Options include: +`indexerHints.prune`: Defines the retention of historical block data for a Subgraph. Options include: 1. `"never"`: No pruning of historical data; retains the entire history. 2. `"auto"`: Retains the minimum necessary history as set by the indexer, optimizing query performance. @@ -505,19 +505,19 @@ The `indexerHints` setting in a subgraph's manifest provides directives for inde prune: auto ``` -> O termo "histórico", neste contexto de subgraphs, refere-se ao armazenamento de dados que refletem os estados antigos de entidades mutáveis. +> The term "history" in this context of Subgraphs is about storing data that reflects the old states of mutable entities. O histórico, desde um bloco especificado, é necessário para: -- [Time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the subgraph's history -- Using the subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another subgraph, at that block -- Rebobinar o subgraph de volta àquele bloco +- [Time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the Subgraph's history +- Using the Subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another Subgraph, at that block +- Rewinding the Subgraph back to that block Se os dados históricos desde aquele bloco tiverem passado por pruning, as capacidades acima não estarão disponíveis. > Using `"auto"` is generally recommended as it maximizes query performance and is sufficient for most users who do not require access to extensive historical data. -For subgraphs leveraging [time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your subgraph's settings: +For Subgraphs leveraging [time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your Subgraph's settings: Para reter uma quantidade específica de dados históricos: @@ -532,3 +532,17 @@ Para preservar o histórico completo dos estados da entidade: indexerHints: prune: never ``` + +## SpecVersion Releases + +| Versão | Notas de atualização | +| :----: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune Subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From 2ee512969807b7a46c0e060ebe0d9409d028b1de Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:22:39 -0500 Subject: [PATCH 0814/1789] New translations subgraph-manifest.mdx (Russian) --- .../developing/creating/subgraph-manifest.mdx | 88 +++++++++++-------- 1 file changed, 51 insertions(+), 37 deletions(-) diff --git a/website/src/pages/ru/subgraphs/developing/creating/subgraph-manifest.mdx b/website/src/pages/ru/subgraphs/developing/creating/subgraph-manifest.mdx index a8f1a728f47a..c7f8d5ba4538 100644 --- a/website/src/pages/ru/subgraphs/developing/creating/subgraph-manifest.mdx +++ b/website/src/pages/ru/subgraphs/developing/creating/subgraph-manifest.mdx @@ -4,19 +4,19 @@ title: Subgraph Manifest ## Обзор -The subgraph manifest, `subgraph.yaml`, defines the smart contracts & network your subgraph will index, the events from these contracts to pay attention to, and how to map event data to entities that Graph Node stores and allows to query. +The Subgraph manifest, `subgraph.yaml`, defines the smart contracts & network your Subgraph will index, the events from these contracts to pay attention to, and how to map event data to entities that Graph Node stores and allows to query. -The **subgraph definition** consists of the following files: +The **Subgraph definition** consists of the following files: -- `subgraph.yaml`: Contains the subgraph manifest +- `subgraph.yaml`: Contains the Subgraph manifest -- `schema.graphql`: A GraphQL schema defining the data stored for your subgraph and how to query it via GraphQL +- `schema.graphql`: A GraphQL schema defining the data stored for your Subgraph and how to query it via GraphQL - `mapping.ts`: [AssemblyScript Mappings](https://github.com/AssemblyScript/assemblyscript) code that translates event data into entities defined in your schema (e.g. `mapping.ts` in this guide) ### Subgraph Capabilities -Один субграф может: +A single Subgraph can: - Индексировать данные из нескольких смарт-контрактов (но не из нескольких сетей). @@ -24,9 +24,9 @@ The **subgraph definition** consists of the following files: - Add an entry for each contract that requires indexing to the `dataSources` array. -The full specification for subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). +The full specification for Subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). -For the example subgraph listed above, `subgraph.yaml` is: +For the example Subgraph listed above, `subgraph.yaml` is: ```yaml specVersion: 0.0.4 @@ -79,47 +79,47 @@ dataSources: ## Subgraph Entries -> Important Note: Be sure you populate your subgraph manifest with all handlers and [entities](/subgraphs/developing/creating/ql-schema/). +> Important Note: Be sure you populate your Subgraph manifest with all handlers and [entities](/subgraphs/developing/creating/ql-schema/). Важными элементами манифеста, которые необходимо обновить, являются: -- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. +- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the Subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. -- `description`: a human-readable description of what the subgraph is. This description is displayed in Graph Explorer when the subgraph is deployed to Subgraph Studio. +- `description`: a human-readable description of what the Subgraph is. This description is displayed in Graph Explorer when the Subgraph is deployed to Subgraph Studio. -- `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed in Graph Explorer. +- `repository`: the URL of the repository where the Subgraph manifest can be found. This is also displayed in Graph Explorer. - `features`: a list of all used [feature](#experimental-features) names. -- `indexerHints.prune`: Defines the retention of historical block data for a subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. +- `indexerHints.prune`: Defines the retention of historical block data for a Subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. -- `dataSources.source`: the address of the smart contract the subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. +- `dataSources.source`: the address of the smart contract the Subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. - `dataSources.source.startBlock`: the optional number of the block that the data source starts indexing from. In most cases, we suggest using the block in which the contract was created. - `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. -- `dataSources.context`: key-value pairs that can be used within subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for subgraph development. +- `dataSources.context`: key-value pairs that can be used within Subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for Subgraph development. - `dataSources.mapping.entities`: the entities that the data source writes to the store. The schema for each entity is defined in the schema.graphql file. - `dataSources.mapping.abis`: one or more named ABI files for the source contract as well as any other smart contracts that you interact with from within the mappings. -- `dataSources.mapping.eventHandlers`: lists the smart contract events this subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. +- `dataSources.mapping.eventHandlers`: lists the smart contract events this Subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. -- `dataSources.mapping.callHandlers`: lists the smart contract functions this subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. +- `dataSources.mapping.callHandlers`: lists the smart contract functions this Subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. -- `dataSources.mapping.blockHandlers`: lists the blocks this subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. +- `dataSources.mapping.blockHandlers`: lists the blocks this Subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. -A single subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. +A single Subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. ## Обработчики событий -Обработчики событий в субграфе реагируют на конкретные события, генерируемые смарт-контрактами в блокчейне, и запускают обработчики, определенные в манифесте подграфа. Это позволяет субграфам обрабатывать и хранить данные о событиях в соответствии с определенной логикой. +Event handlers in a Subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the Subgraph's manifest. This enables Subgraphs to process and store event data according to defined logic. ### Определение обработчика событий -Обработчик событий объявлен внутри источника данных в конфигурации YAML субграфа. Он определяет, какие события следует прослушивать, и соответствующую функцию, которую необходимо выполнить при обнаружении этих событий. +An event handler is declared within a data source in the Subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. ```yaml dataSources: @@ -149,11 +149,11 @@ dataSources: ## Обработчики вызовов -While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. +While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a Subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. Обработчики вызовов срабатывают только в одном из двух случаев: когда указанная функция вызывается учетной записью, отличной от самого контракта, или когда она помечена как внешняя в Solidity и вызывается как часть другой функции в том же контракте. -> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. +> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a Subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. ### Определение обработчика вызова @@ -186,7 +186,7 @@ The `function` is the normalized function signature to filter calls by. The `han ### Функция мэппинга -Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: +Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example Subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: ```typescript import { CreateGravatarCall } from '../generated/Gravity/Gravity' @@ -205,7 +205,7 @@ The `handleCreateGravatar` function takes a new `CreateGravatarCall` which is a ## Обработчики блоков -В дополнение к подписке на события контракта или вызовы функций, субграф может захотеть обновить свои данные по мере добавления в цепочку новых блоков. Чтобы добиться этого, субграф может запускать функцию после каждого блока или после блоков, соответствующих заранее определенному фильтру. +In addition to subscribing to contract events or function calls, a Subgraph may want to update its data as new blocks are appended to the chain. To achieve this a Subgraph can run a function after every block or after blocks that match a pre-defined filter. ### Поддерживаемые фильтры @@ -218,7 +218,7 @@ filter: _The defined handler will be called once for every block which contains a call to the contract (data source) the handler is defined under._ -> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. +> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a Subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. Отсутствие фильтра для обработчика блоков гарантирует, что обработчик вызывается для каждого блока. Источник данных может содержать только один обработчик блоков для каждого типа фильтра. @@ -261,7 +261,7 @@ blockHandlers: every: 10 ``` -The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the subgraph to perform specific operations at regular block intervals. +The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the Subgraph to perform specific operations at regular block intervals. #### Однократный фильтр @@ -276,7 +276,7 @@ blockHandlers: kind: once ``` -Определенный обработчик с однократным фильтром будет вызываться только один раз перед запуском всех остальных обработчиков. Эта конфигурация позволяет субграфу использовать обработчик в качестве обработчика инициализации, выполняя определенные задачи в начале индексирования. +The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the Subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. ```ts export function handleOnce(block: ethereum.Block): void { @@ -288,7 +288,7 @@ export function handleOnce(block: ethereum.Block): void { ### Функция мэппинга -The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing subgraph entities in the store, call smart contracts and create or update entities. +The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing Subgraph entities in the store, call smart contracts and create or update entities. ```typescript import { ethereum } from '@graphprotocol/graph-ts' @@ -317,7 +317,7 @@ An event will only be triggered when both the signature and topic 0 match. By de Starting from `specVersion` `0.0.5` and `apiVersion` `0.0.7`, event handlers can have access to the receipt for the transaction which emitted them. -To do so, event handlers must be declared in the subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. +To do so, event handlers must be declared in the Subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. ```yaml eventHandlers: @@ -454,7 +454,7 @@ There are setters and getters like `setString` and `getString` for all value typ ## Стартовые блоки -The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. +The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a Subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. ```yaml dataSources: @@ -488,13 +488,13 @@ dataSources: ## Подсказки индексатору -The `indexerHints` setting in a subgraph's manifest provides directives for indexers on processing and managing a subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. +The `indexerHints` setting in a Subgraph's manifest provides directives for indexers on processing and managing a Subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. > This feature is available from `specVersion: 1.0.0` ### Сокращение -`indexerHints.prune`: Defines the retention of historical block data for a subgraph. Options include: +`indexerHints.prune`: Defines the retention of historical block data for a Subgraph. Options include: 1. `"never"`: No pruning of historical data; retains the entire history. 2. `"auto"`: Retains the minimum necessary history as set by the indexer, optimizing query performance. @@ -505,19 +505,19 @@ The `indexerHints` setting in a subgraph's manifest provides directives for inde prune: auto ``` -> Термин «история» в контексте субграфов означает хранение данных, отражающих старые состояния изменяемых объектов. +> The term "history" in this context of Subgraphs is about storing data that reflects the old states of mutable entities. История данного блока необходима для: -- [Time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the subgraph's history -- Using the subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another subgraph, at that block -- Отката субграфа обратно к этому блоку +- [Time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the Subgraph's history +- Using the Subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another Subgraph, at that block +- Rewinding the Subgraph back to that block Если исторические данные на момент создания блока были удалены, вышеупомянутые возможности будут недоступны. > Using `"auto"` is generally recommended as it maximizes query performance and is sufficient for most users who do not require access to extensive historical data. -For subgraphs leveraging [time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your subgraph's settings: +For Subgraphs leveraging [time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your Subgraph's settings: Чтобы сохранить определенный объем исторических данных: @@ -532,3 +532,17 @@ For subgraphs leveraging [time travel queries](/subgraphs/querying/graphql-api/# indexerHints: prune: never ``` + +## SpecVersion Releases + +| Версия | Примечания к релизу | +| :----: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune Subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From 36cb7fa809b6892f990fa6288c99fa96cd5e4619 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:22:40 -0500 Subject: [PATCH 0815/1789] New translations subgraph-manifest.mdx (Swedish) --- .../developing/creating/subgraph-manifest.mdx | 142 ++++++++++-------- 1 file changed, 78 insertions(+), 64 deletions(-) diff --git a/website/src/pages/sv/subgraphs/developing/creating/subgraph-manifest.mdx b/website/src/pages/sv/subgraphs/developing/creating/subgraph-manifest.mdx index e9bac4f876b1..da9f58d6781a 100644 --- a/website/src/pages/sv/subgraphs/developing/creating/subgraph-manifest.mdx +++ b/website/src/pages/sv/subgraphs/developing/creating/subgraph-manifest.mdx @@ -4,19 +4,19 @@ title: Subgraph Manifest ## Översikt -The subgraph manifest, `subgraph.yaml`, defines the smart contracts & network your subgraph will index, the events from these contracts to pay attention to, and how to map event data to entities that Graph Node stores and allows to query. +The Subgraph manifest, `subgraph.yaml`, defines the smart contracts & network your Subgraph will index, the events from these contracts to pay attention to, and how to map event data to entities that Graph Node stores and allows to query. -The **subgraph definition** consists of the following files: +The **Subgraph definition** consists of the following files: -- `subgraph.yaml`: Contains the subgraph manifest +- `subgraph.yaml`: Contains the Subgraph manifest -- `schema.graphql`: A GraphQL schema defining the data stored for your subgraph and how to query it via GraphQL +- `schema.graphql`: A GraphQL schema defining the data stored for your Subgraph and how to query it via GraphQL - `mapping.ts`: [AssemblyScript Mappings](https://github.com/AssemblyScript/assemblyscript) code that translates event data into entities defined in your schema (e.g. `mapping.ts` in this guide) ### Subgraph Capabilities -A single subgraph can: +A single Subgraph can: - Index data from multiple smart contracts (but not multiple networks). @@ -24,9 +24,9 @@ A single subgraph can: - Add an entry for each contract that requires indexing to the `dataSources` array. -The full specification for subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). +The full specification for Subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). -For the example subgraph listed above, `subgraph.yaml` is: +For the example Subgraph listed above, `subgraph.yaml` is: ```yaml specVersion: 0.0.4 @@ -79,47 +79,47 @@ dataSources: ## Subgraph Entries -> Important Note: Be sure you populate your subgraph manifest with all handlers and [entities](/subgraphs/developing/creating/ql-schema/). +> Important Note: Be sure you populate your Subgraph manifest with all handlers and [entities](/subgraphs/developing/creating/ql-schema/). De viktiga posterna att uppdatera för manifestet är: -- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. +- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the Subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. -- `description`: a human-readable description of what the subgraph is. This description is displayed in Graph Explorer when the subgraph is deployed to Subgraph Studio. +- `description`: a human-readable description of what the Subgraph is. This description is displayed in Graph Explorer when the Subgraph is deployed to Subgraph Studio. -- `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed in Graph Explorer. +- `repository`: the URL of the repository where the Subgraph manifest can be found. This is also displayed in Graph Explorer. - `features`: a list of all used [feature](#experimental-features) names. -- `indexerHints.prune`: Defines the retention of historical block data for a subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. +- `indexerHints.prune`: Defines the retention of historical block data for a Subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. -- `dataSources.source`: the address of the smart contract the subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. +- `dataSources.source`: the address of the smart contract the Subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. - `dataSources.source.startBlock`: the optional number of the block that the data source starts indexing from. In most cases, we suggest using the block in which the contract was created. - `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. -- `dataSources.context`: key-value pairs that can be used within subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for subgraph development. +- `dataSources.context`: key-value pairs that can be used within Subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for Subgraph development. - `dataSources.mapping.entities`: the entities that the data source writes to the store. The schema for each entity is defined in the schema.graphql file. - `dataSources.mapping.abis`: one or more named ABI files for the source contract as well as any other smart contracts that you interact with from within the mappings. -- `dataSources.mapping.eventHandlers`: lists the smart contract events this subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. +- `dataSources.mapping.eventHandlers`: lists the smart contract events this Subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. -- `dataSources.mapping.callHandlers`: lists the smart contract functions this subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. +- `dataSources.mapping.callHandlers`: lists the smart contract functions this Subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. -- `dataSources.mapping.blockHandlers`: lists the blocks this subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. +- `dataSources.mapping.blockHandlers`: lists the blocks this Subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. -A single subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. +A single Subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. ## Event Handlers -Event handlers in a subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the subgraph's manifest. This enables subgraphs to process and store event data according to defined logic. +Event handlers in a Subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the Subgraph's manifest. This enables Subgraphs to process and store event data according to defined logic. ### Defining an Event Handler -An event handler is declared within a data source in the subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. +An event handler is declared within a data source in the Subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. ```yaml dataSources: @@ -149,11 +149,11 @@ dataSources: ## Anropsbehandlare -While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. +While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a Subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. Anropsbehandlare utlöses endast i ett av två fall: när den specificerade funktionen anropas av ett konto som inte är kontraktet självt eller när den är markerad som extern i Solidity och anropas som en del av en annan funktion i samma kontrakt. -> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. +> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a Subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. ### Definiera en Anropsbehandlare @@ -165,7 +165,7 @@ dataSources: name: Gravity network: mainnet source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' + address: "0x731a10897d267e19b34503ad902d0a29173ba4b1" abi: Gravity mapping: kind: ethereum/events @@ -186,18 +186,18 @@ The `function` is the normalized function signature to filter calls by. The `han ### Kartläggningsfunktion -Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: +Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example Subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: ```typescript -import { CreateGravatarCall } from '../generated/Gravity/Gravity' -import { Transaction } from '../generated/schema' +import { CreateGravatarCall } from "../generated/Gravity/Gravity"; +import { Transaction } from "../generated/schema"; export function handleCreateGravatar(call: CreateGravatarCall): void { - let id = call.transaction.hash - let transaction = new Transaction(id) - transaction.displayName = call.inputs._displayName - transaction.imageUrl = call.inputs._imageUrl - transaction.save() + let id = call.transaction.hash; + let transaction = new Transaction(id); + transaction.displayName = call.inputs._displayName; + transaction.imageUrl = call.inputs._imageUrl; + transaction.save(); } ``` @@ -205,7 +205,7 @@ The `handleCreateGravatar` function takes a new `CreateGravatarCall` which is a ## Blockbehandlare -Förutom att prenumerera på kontrakts händelser eller funktionsanrop kan en subgraf vilja uppdatera sina data när nya block läggs till i kedjan. För att uppnå detta kan en subgraf köra en funktion efter varje block eller efter block som matchar en fördefinierad filter. +In addition to subscribing to contract events or function calls, a Subgraph may want to update its data as new blocks are appended to the chain. To achieve this a Subgraph can run a function after every block or after blocks that match a pre-defined filter. ### Stödda filter @@ -218,7 +218,7 @@ filter: _The defined handler will be called once for every block which contains a call to the contract (data source) the handler is defined under._ -> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. +> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a Subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. Avsaknaden av ett filter för en blockhanterare kommer att säkerställa att hanteraren kallas för varje block. En datakälla kan endast innehålla en blockhanterare för varje filttyp. @@ -228,7 +228,7 @@ dataSources: name: Gravity network: dev source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' + address: "0x731a10897d267e19b34503ad902d0a29173ba4b1" abi: Gravity mapping: kind: ethereum/events @@ -261,7 +261,7 @@ blockHandlers: every: 10 ``` -The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the subgraph to perform specific operations at regular block intervals. +The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the Subgraph to perform specific operations at regular block intervals. #### En Gång Filter @@ -276,27 +276,27 @@ blockHandlers: kind: once ``` -Den definierade hanteraren med filtret once kommer att anropas endast en gång innan alla andra hanterare körs. Denna konfiguration gör det möjligt för subgrafen att använda hanteraren som en initialiseringshanterare, som utför specifika uppgifter i början av indexeringen. +The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the Subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. ```ts export function handleOnce(block: ethereum.Block): void { - let data = new InitialData(Bytes.fromUTF8('initial')) - data.data = 'Setup data here' - data.save() + let data = new InitialData(Bytes.fromUTF8("initial")); + data.data = "Setup data here"; + data.save(); } ``` ### Kartläggningsfunktion -The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing subgraph entities in the store, call smart contracts and create or update entities. +The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing Subgraph entities in the store, call smart contracts and create or update entities. ```typescript -import { ethereum } from '@graphprotocol/graph-ts' +import { ethereum } from "@graphprotocol/graph-ts"; export function handleBlock(block: ethereum.Block): void { - let id = block.hash - let entity = new Block(id) - entity.save() + let id = block.hash; + let entity = new Block(id); + entity.save(); } ``` @@ -317,7 +317,7 @@ An event will only be triggered when both the signature and topic 0 match. By de Starting from `specVersion` `0.0.5` and `apiVersion` `0.0.7`, event handlers can have access to the receipt for the transaction which emitted them. -To do so, event handlers must be declared in the subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. +To do so, event handlers must be declared in the Subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. ```yaml eventHandlers: @@ -356,7 +356,7 @@ dataSources: name: Factory network: mainnet source: - address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' + address: "0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95" abi: Factory mapping: kind: ethereum/events @@ -414,12 +414,12 @@ templates: In the final step, you update your main contract mapping to create a dynamic data source instance from one of the templates. In this example, you would change the main contract mapping to import the `Exchange` template and call the `Exchange.create(address)` method on it to start indexing the new exchange contract. ```typescript -import { Exchange } from '../generated/templates' +import { Exchange } from "../generated/templates"; export function handleNewExchange(event: NewExchange): void { // Start indexing the exchange; `event.params.exchange` is the // address of the new exchange contract - Exchange.create(event.params.exchange) + Exchange.create(event.params.exchange); } ``` @@ -432,29 +432,29 @@ export function handleNewExchange(event: NewExchange): void { Data source contexts allow passing extra configuration when instantiating a template. In our example, let's say exchanges are associated with a particular trading pair, which is included in the `NewExchange` event. That information can be passed into the instantiated data source, like so: ```typescript -import { Exchange } from '../generated/templates' +import { Exchange } from "../generated/templates"; export function handleNewExchange(event: NewExchange): void { - let context = new DataSourceContext() - context.setString('tradingPair', event.params.tradingPair) - Exchange.createWithContext(event.params.exchange, context) + let context = new DataSourceContext(); + context.setString("tradingPair", event.params.tradingPair); + Exchange.createWithContext(event.params.exchange, context); } ``` Inside a mapping of the `Exchange` template, the context can then be accessed: ```typescript -import { dataSource } from '@graphprotocol/graph-ts' +import { dataSource } from "@graphprotocol/graph-ts"; -let context = dataSource.context() -let tradingPair = context.getString('tradingPair') +let context = dataSource.context(); +let tradingPair = context.getString("tradingPair") ``` There are setters and getters like `setString` and `getString` for all value types. ## Startblock -The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. +The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a Subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. ```yaml dataSources: @@ -462,7 +462,7 @@ dataSources: name: ExampleSource network: mainnet source: - address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' + address: "0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95" abi: ExampleContract startBlock: 6627917 mapping: @@ -488,13 +488,13 @@ dataSources: ## Indexer Hints -The `indexerHints` setting in a subgraph's manifest provides directives for indexers on processing and managing a subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. +The `indexerHints` setting in a Subgraph's manifest provides directives for indexers on processing and managing a Subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. > This feature is available from `specVersion: 1.0.0` ### Prune -`indexerHints.prune`: Defines the retention of historical block data for a subgraph. Options include: +`indexerHints.prune`: Defines the retention of historical block data for a Subgraph. Options include: 1. `"never"`: No pruning of historical data; retains the entire history. 2. `"auto"`: Retains the minimum necessary history as set by the indexer, optimizing query performance. @@ -505,19 +505,19 @@ The `indexerHints` setting in a subgraph's manifest provides directives for inde prune: auto ``` -> The term "history" in this context of subgraphs is about storing data that reflects the old states of mutable entities. +> The term "history" in this context of Subgraphs is about storing data that reflects the old states of mutable entities. History as of a given block is required for: -- [Time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the subgraph's history -- Using the subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another subgraph, at that block -- Rewinding the subgraph back to that block +- [Time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the Subgraph's history +- Using the Subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another Subgraph, at that block +- Rewinding the Subgraph back to that block If historical data as of the block has been pruned, the above capabilities will not be available. > Using `"auto"` is generally recommended as it maximizes query performance and is sufficient for most users who do not require access to extensive historical data. -For subgraphs leveraging [time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your subgraph's settings: +For Subgraphs leveraging [time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your Subgraph's settings: To retain a specific amount of historical data: @@ -532,3 +532,17 @@ To preserve the complete history of entity states: indexerHints: prune: never ``` + +## SpecVersion Releases + +| Version | Versionsanteckningar | +| :-----: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune Subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From bfb3e3e50117e2ead055cf7670e09ee8b56ba847 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:22:41 -0500 Subject: [PATCH 0816/1789] New translations subgraph-manifest.mdx (Turkish) --- .../developing/creating/subgraph-manifest.mdx | 88 +++++++++++-------- 1 file changed, 51 insertions(+), 37 deletions(-) diff --git a/website/src/pages/tr/subgraphs/developing/creating/subgraph-manifest.mdx b/website/src/pages/tr/subgraphs/developing/creating/subgraph-manifest.mdx index 88693e796ef6..d5b9cfcb276d 100644 --- a/website/src/pages/tr/subgraphs/developing/creating/subgraph-manifest.mdx +++ b/website/src/pages/tr/subgraphs/developing/creating/subgraph-manifest.mdx @@ -4,19 +4,19 @@ title: Subgraph Manifest ## Genel Bakış -The subgraph manifest, `subgraph.yaml`, defines the smart contracts & network your subgraph will index, the events from these contracts to pay attention to, and how to map event data to entities that Graph Node stores and allows to query. +The Subgraph manifest, `subgraph.yaml`, defines the smart contracts & network your Subgraph will index, the events from these contracts to pay attention to, and how to map event data to entities that Graph Node stores and allows to query. -The **subgraph definition** consists of the following files: +The **Subgraph definition** consists of the following files: -- `subgraph.yaml`: Contains the subgraph manifest +- `subgraph.yaml`: Contains the Subgraph manifest -- `schema.graphql`: A GraphQL schema defining the data stored for your subgraph and how to query it via GraphQL +- `schema.graphql`: A GraphQL schema defining the data stored for your Subgraph and how to query it via GraphQL - `mapping.ts`: [AssemblyScript Mappings](https://github.com/AssemblyScript/assemblyscript) code that translates event data into entities defined in your schema (e.g. `mapping.ts` in this guide) ### Subgraph Capabilities -Tek bir subgraph: +A single Subgraph can: - Birden fazla akıllı sözleşmeden veri endeksleyebilir (fakat birden fazla ağdan endeksleyemez). @@ -24,9 +24,9 @@ Tek bir subgraph: - Add an entry for each contract that requires indexing to the `dataSources` array. -The full specification for subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). +The full specification for Subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). -For the example subgraph listed above, `subgraph.yaml` is: +For the example Subgraph listed above, `subgraph.yaml` is: ```yaml specVersion: 0.0.4 @@ -79,47 +79,47 @@ dataSources: ## Subgraph Entries -> Important Note: Be sure you populate your subgraph manifest with all handlers and [entities](/subgraphs/developing/creating/ql-schema/). +> Important Note: Be sure you populate your Subgraph manifest with all handlers and [entities](/subgraphs/developing/creating/ql-schema/). Manifest için güncellenmesi gereken önemli girdiler şunlardır: -- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. +- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the Subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. -- `description`: a human-readable description of what the subgraph is. This description is displayed in Graph Explorer when the subgraph is deployed to Subgraph Studio. +- `description`: a human-readable description of what the Subgraph is. This description is displayed in Graph Explorer when the Subgraph is deployed to Subgraph Studio. -- `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed in Graph Explorer. +- `repository`: the URL of the repository where the Subgraph manifest can be found. This is also displayed in Graph Explorer. - `features`: a list of all used [feature](#experimental-features) names. -- `indexerHints.prune`: Defines the retention of historical block data for a subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. +- `indexerHints.prune`: Defines the retention of historical block data for a Subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. -- `dataSources.source`: the address of the smart contract the subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. +- `dataSources.source`: the address of the smart contract the Subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. - `dataSources.source.startBlock`: the optional number of the block that the data source starts indexing from. In most cases, we suggest using the block in which the contract was created. - `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. -- `dataSources.context`: key-value pairs that can be used within subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for subgraph development. +- `dataSources.context`: key-value pairs that can be used within Subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for Subgraph development. - `dataSources.mapping.entities`: the entities that the data source writes to the store. The schema for each entity is defined in the schema.graphql file. - `dataSources.mapping.abis`: one or more named ABI files for the source contract as well as any other smart contracts that you interact with from within the mappings. -- `dataSources.mapping.eventHandlers`: lists the smart contract events this subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. +- `dataSources.mapping.eventHandlers`: lists the smart contract events this Subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. -- `dataSources.mapping.callHandlers`: lists the smart contract functions this subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. +- `dataSources.mapping.callHandlers`: lists the smart contract functions this Subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. -- `dataSources.mapping.blockHandlers`: lists the blocks this subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. +- `dataSources.mapping.blockHandlers`: lists the blocks this Subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. -A single subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. +A single Subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. ## Olay İşleyicileri -Bir subgraph'in olay işleyicileri, blokzincir üzerindeki akıllı sözleşmeler tarafından yayılan belirli olaylara tepki verir, ve subgraph'in manifesto dosyasında tanımlanan işleyicileri tetikler. Bu, subgraph'lerin tanımlanmış mantığa göre olay verilerini işlemesini ve depolamasını sağlar. +Event handlers in a Subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the Subgraph's manifest. This enables Subgraphs to process and store event data according to defined logic. ### Olay İşleyici Tanımlama -Bir olay işleyici, subgraph'in YAML yapılandırmasında bir veri kaynağı içinde tanımlanır. Hangi olayların dinleneceğini ve bu olaylar algılandığında hangi fonksiyonun çalıştırılacağını belirtir. +An event handler is declared within a data source in the Subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. ```yaml dataSources: @@ -149,11 +149,11 @@ dataSources: ## Çağrı İşleyicileri -While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. +While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a Subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. Çağrı işleyicileri yalnızca iki durumdan birinde tetiklenir: belirtilen işlevin sözleşme tarafından değil, başka bir hesap tarafından çağrılması durumunda veya Solidity'de harici olarak işaretlenip aynı sözleşmenin başka bir işlevinin bir parçası olarak çağrılması durumunda yalnızca tetiklenir. -> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. +> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a Subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. ### Bir Çağrı İşleyici Tanımlama @@ -186,7 +186,7 @@ The `function` is the normalized function signature to filter calls by. The `han ### Eşleştirme fonksiyonu -Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: +Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example Subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: ```typescript import { CreateGravatarCall } from '../generated/Gravity/Gravity' @@ -205,7 +205,7 @@ The `handleCreateGravatar` function takes a new `CreateGravatarCall` which is a ## Blok İşleyicileri -Bir subgraph, sözleşme olaylarına veya işlev çağrılarına abone olmanın yanı sıra, zincire yeni bloklar eklendikçe verilerini güncellemek isteyebilir. Bu işlemi gerçekleştirmek için a subgraph, her blok sonrasında veya önceden tanımlanmış bir filtreye uygun bloklardan sonra bir işlev çalıştırabilir. +In addition to subscribing to contract events or function calls, a Subgraph may want to update its data as new blocks are appended to the chain. To achieve this a Subgraph can run a function after every block or after blocks that match a pre-defined filter. ### Desteklenen Filtreler @@ -218,7 +218,7 @@ filter: _The defined handler will be called once for every block which contains a call to the contract (data source) the handler is defined under._ -> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. +> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a Subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. Bir blok işleyicisi için filtre olmaması, işleyicinin her blok için çağrılacağı anlamına gelir. Bir veri kaynağı, her filtre türü için yalnızca bir blok işleyicisi içerebilir. @@ -261,7 +261,7 @@ blockHandlers: every: 10 ``` -The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the subgraph to perform specific operations at regular block intervals. +The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the Subgraph to perform specific operations at regular block intervals. #### Once Filtresi @@ -276,7 +276,7 @@ blockHandlers: kind: once ``` -Once filtresi ile tanımlanan işleyici, diğer tüm işleyiciler çalışmadan önce yalnızca bir kez çağrılacaktır. Bu yapılandırma, subgraph'ın işleyiciyi indekslemenin başlangıcında belirli görevleri yerine getirmesine olanak sağlayan bir başlatma işleyicisi olarak kullanmasına yarar. +The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the Subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. ```ts export function handleOnce(block: ethereum.Block): void { @@ -288,7 +288,7 @@ export function handleOnce(block: ethereum.Block): void { ### Eşleştirme fonksiyonu -The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing subgraph entities in the store, call smart contracts and create or update entities. +The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing Subgraph entities in the store, call smart contracts and create or update entities. ```typescript import { ethereum } from '@graphprotocol/graph-ts' @@ -317,7 +317,7 @@ An event will only be triggered when both the signature and topic 0 match. By de Starting from `specVersion` `0.0.5` and `apiVersion` `0.0.7`, event handlers can have access to the receipt for the transaction which emitted them. -To do so, event handlers must be declared in the subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. +To do so, event handlers must be declared in the Subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. ```yaml eventHandlers: @@ -454,7 +454,7 @@ There are setters and getters like `setString` and `getString` for all value typ ## Başlangıç Blokları -The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. +The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a Subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. ```yaml dataSources: @@ -488,13 +488,13 @@ dataSources: ## Endeksleyici İpuçları -The `indexerHints` setting in a subgraph's manifest provides directives for indexers on processing and managing a subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. +The `indexerHints` setting in a Subgraph's manifest provides directives for indexers on processing and managing a Subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. > This feature is available from `specVersion: 1.0.0` ### Budama -`indexerHints.prune`: Defines the retention of historical block data for a subgraph. Options include: +`indexerHints.prune`: Defines the retention of historical block data for a Subgraph. Options include: 1. `"never"`: No pruning of historical data; retains the entire history. 2. `"auto"`: Retains the minimum necessary history as set by the indexer, optimizing query performance. @@ -505,19 +505,19 @@ The `indexerHints` setting in a subgraph's manifest provides directives for inde prune: auto ``` -> Subgraph'lerde "geçmiş" terimi, bu bağlamda, değiştirilebilir varlıkların eski durumlarına dair verilerin saklanmasıyla ilgilidir. +> The term "history" in this context of Subgraphs is about storing data that reflects the old states of mutable entities. Verilen bir bloktaki geçmiş, şu durumlar için gereklidir: -- [Time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the subgraph's history -- Using the subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another subgraph, at that block -- Subgraph'i verilen bloka geri sarmak +- [Time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the Subgraph's history +- Using the Subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another Subgraph, at that block +- Rewinding the Subgraph back to that block Eğer verilen bloktaki tarihsel veri budanmışsa yukarıdaki özellikler kullanılamayacaktır. > Using `"auto"` is generally recommended as it maximizes query performance and is sufficient for most users who do not require access to extensive historical data. -For subgraphs leveraging [time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your subgraph's settings: +For Subgraphs leveraging [time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your Subgraph's settings: Belirli bir miktarda tarihsel veri saklamak için: @@ -532,3 +532,17 @@ Varlık durumlarının tam geçmişini korumak için: indexerHints: prune: never ``` + +## SpecVersion Releases + +| Sürüm | Sürüm Notları | +| :---: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune Subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From f2fc67d800008f94d6d8d956fc1f7092031d73fc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:22:42 -0500 Subject: [PATCH 0817/1789] New translations subgraph-manifest.mdx (Ukrainian) --- .../developing/creating/subgraph-manifest.mdx | 88 +++++++++++-------- 1 file changed, 51 insertions(+), 37 deletions(-) diff --git a/website/src/pages/uk/subgraphs/developing/creating/subgraph-manifest.mdx b/website/src/pages/uk/subgraphs/developing/creating/subgraph-manifest.mdx index a42a50973690..428ff5332baf 100644 --- a/website/src/pages/uk/subgraphs/developing/creating/subgraph-manifest.mdx +++ b/website/src/pages/uk/subgraphs/developing/creating/subgraph-manifest.mdx @@ -4,19 +4,19 @@ title: Subgraph Manifest ## Overview -The subgraph manifest, `subgraph.yaml`, defines the smart contracts & network your subgraph will index, the events from these contracts to pay attention to, and how to map event data to entities that Graph Node stores and allows to query. +The Subgraph manifest, `subgraph.yaml`, defines the smart contracts & network your Subgraph will index, the events from these contracts to pay attention to, and how to map event data to entities that Graph Node stores and allows to query. -The **subgraph definition** consists of the following files: +The **Subgraph definition** consists of the following files: -- `subgraph.yaml`: Contains the subgraph manifest +- `subgraph.yaml`: Contains the Subgraph manifest -- `schema.graphql`: A GraphQL schema defining the data stored for your subgraph and how to query it via GraphQL +- `schema.graphql`: A GraphQL schema defining the data stored for your Subgraph and how to query it via GraphQL - `mapping.ts`: [AssemblyScript Mappings](https://github.com/AssemblyScript/assemblyscript) code that translates event data into entities defined in your schema (e.g. `mapping.ts` in this guide) ### Subgraph Capabilities -A single subgraph can: +A single Subgraph can: - Index data from multiple smart contracts (but not multiple networks). @@ -24,9 +24,9 @@ A single subgraph can: - Add an entry for each contract that requires indexing to the `dataSources` array. -The full specification for subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). +The full specification for Subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). -For the example subgraph listed above, `subgraph.yaml` is: +For the example Subgraph listed above, `subgraph.yaml` is: ```yaml specVersion: 0.0.4 @@ -79,47 +79,47 @@ dataSources: ## Subgraph Entries -> Important Note: Be sure you populate your subgraph manifest with all handlers and [entities](/subgraphs/developing/creating/ql-schema/). +> Important Note: Be sure you populate your Subgraph manifest with all handlers and [entities](/subgraphs/developing/creating/ql-schema/). The important entries to update for the manifest are: -- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. +- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the Subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. -- `description`: a human-readable description of what the subgraph is. This description is displayed in Graph Explorer when the subgraph is deployed to Subgraph Studio. +- `description`: a human-readable description of what the Subgraph is. This description is displayed in Graph Explorer when the Subgraph is deployed to Subgraph Studio. -- `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed in Graph Explorer. +- `repository`: the URL of the repository where the Subgraph manifest can be found. This is also displayed in Graph Explorer. - `features`: a list of all used [feature](#experimental-features) names. -- `indexerHints.prune`: Defines the retention of historical block data for a subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. +- `indexerHints.prune`: Defines the retention of historical block data for a Subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. -- `dataSources.source`: the address of the smart contract the subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. +- `dataSources.source`: the address of the smart contract the Subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. - `dataSources.source.startBlock`: the optional number of the block that the data source starts indexing from. In most cases, we suggest using the block in which the contract was created. - `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. -- `dataSources.context`: key-value pairs that can be used within subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for subgraph development. +- `dataSources.context`: key-value pairs that can be used within Subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for Subgraph development. - `dataSources.mapping.entities`: the entities that the data source writes to the store. The schema for each entity is defined in the schema.graphql file. - `dataSources.mapping.abis`: one or more named ABI files for the source contract as well as any other smart contracts that you interact with from within the mappings. -- `dataSources.mapping.eventHandlers`: lists the smart contract events this subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. +- `dataSources.mapping.eventHandlers`: lists the smart contract events this Subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. -- `dataSources.mapping.callHandlers`: lists the smart contract functions this subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. +- `dataSources.mapping.callHandlers`: lists the smart contract functions this Subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. -- `dataSources.mapping.blockHandlers`: lists the blocks this subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. +- `dataSources.mapping.blockHandlers`: lists the blocks this Subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. -A single subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. +A single Subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. ## Event Handlers -Event handlers in a subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the subgraph's manifest. This enables subgraphs to process and store event data according to defined logic. +Event handlers in a Subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the Subgraph's manifest. This enables Subgraphs to process and store event data according to defined logic. ### Defining an Event Handler -An event handler is declared within a data source in the subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. +An event handler is declared within a data source in the Subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. ```yaml dataSources: @@ -149,11 +149,11 @@ dataSources: ## Call Handlers -While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. +While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a Subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. Call handlers will only trigger in one of two cases: when the function specified is called by an account other than the contract itself or when it is marked as external in Solidity and called as part of another function in the same contract. -> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. +> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a Subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. ### Defining a Call Handler @@ -186,7 +186,7 @@ The `function` is the normalized function signature to filter calls by. The `han ### Mapping Function -Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: +Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example Subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: ```typescript import { CreateGravatarCall } from '../generated/Gravity/Gravity' @@ -205,7 +205,7 @@ The `handleCreateGravatar` function takes a new `CreateGravatarCall` which is a ## Block Handlers -In addition to subscribing to contract events or function calls, a subgraph may want to update its data as new blocks are appended to the chain. To achieve this a subgraph can run a function after every block or after blocks that match a pre-defined filter. +In addition to subscribing to contract events or function calls, a Subgraph may want to update its data as new blocks are appended to the chain. To achieve this a Subgraph can run a function after every block or after blocks that match a pre-defined filter. ### Supported Filters @@ -218,7 +218,7 @@ filter: _The defined handler will be called once for every block which contains a call to the contract (data source) the handler is defined under._ -> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. +> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a Subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. The absence of a filter for a block handler will ensure that the handler is called every block. A data source can only contain one block handler for each filter type. @@ -261,7 +261,7 @@ blockHandlers: every: 10 ``` -The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the subgraph to perform specific operations at regular block intervals. +The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the Subgraph to perform specific operations at regular block intervals. #### Once Filter @@ -276,7 +276,7 @@ blockHandlers: kind: once ``` -The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. +The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the Subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. ```ts export function handleOnce(block: ethereum.Block): void { @@ -288,7 +288,7 @@ export function handleOnce(block: ethereum.Block): void { ### Mapping Function -The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing subgraph entities in the store, call smart contracts and create or update entities. +The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing Subgraph entities in the store, call smart contracts and create or update entities. ```typescript import { ethereum } from '@graphprotocol/graph-ts' @@ -317,7 +317,7 @@ An event will only be triggered when both the signature and topic 0 match. By de Starting from `specVersion` `0.0.5` and `apiVersion` `0.0.7`, event handlers can have access to the receipt for the transaction which emitted them. -To do so, event handlers must be declared in the subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. +To do so, event handlers must be declared in the Subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. ```yaml eventHandlers: @@ -454,7 +454,7 @@ There are setters and getters like `setString` and `getString` for all value typ ## Start Blocks -The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. +The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a Subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. ```yaml dataSources: @@ -488,13 +488,13 @@ dataSources: ## Indexer Hints -The `indexerHints` setting in a subgraph's manifest provides directives for indexers on processing and managing a subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. +The `indexerHints` setting in a Subgraph's manifest provides directives for indexers on processing and managing a Subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. > This feature is available from `specVersion: 1.0.0` ### Prune -`indexerHints.prune`: Defines the retention of historical block data for a subgraph. Options include: +`indexerHints.prune`: Defines the retention of historical block data for a Subgraph. Options include: 1. `"never"`: No pruning of historical data; retains the entire history. 2. `"auto"`: Retains the minimum necessary history as set by the indexer, optimizing query performance. @@ -505,19 +505,19 @@ The `indexerHints` setting in a subgraph's manifest provides directives for inde prune: auto ``` -> The term "history" in this context of subgraphs is about storing data that reflects the old states of mutable entities. +> The term "history" in this context of Subgraphs is about storing data that reflects the old states of mutable entities. History as of a given block is required for: -- [Time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the subgraph's history -- Using the subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another subgraph, at that block -- Rewinding the subgraph back to that block +- [Time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the Subgraph's history +- Using the Subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another Subgraph, at that block +- Rewinding the Subgraph back to that block If historical data as of the block has been pruned, the above capabilities will not be available. > Using `"auto"` is generally recommended as it maximizes query performance and is sufficient for most users who do not require access to extensive historical data. -For subgraphs leveraging [time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your subgraph's settings: +For Subgraphs leveraging [time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your Subgraph's settings: To retain a specific amount of historical data: @@ -532,3 +532,17 @@ To preserve the complete history of entity states: indexerHints: prune: never ``` + +## SpecVersion Releases + +| Version | Release notes | +| :-----: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune Subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From 7bcca87d860623a51c59eedab09fbcde7f1ecfd6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:22:43 -0500 Subject: [PATCH 0818/1789] New translations subgraph-manifest.mdx (Chinese Simplified) --- .../developing/creating/subgraph-manifest.mdx | 92 +++++++++++-------- 1 file changed, 53 insertions(+), 39 deletions(-) diff --git a/website/src/pages/zh/subgraphs/developing/creating/subgraph-manifest.mdx b/website/src/pages/zh/subgraphs/developing/creating/subgraph-manifest.mdx index 486f06a4c248..26e7cf21153b 100644 --- a/website/src/pages/zh/subgraphs/developing/creating/subgraph-manifest.mdx +++ b/website/src/pages/zh/subgraphs/developing/creating/subgraph-manifest.mdx @@ -2,21 +2,21 @@ title: Subgraph Manifest --- -## 概述 +## Overview -The subgraph manifest, `subgraph.yaml`, defines the smart contracts & network your subgraph will index, the events from these contracts to pay attention to, and how to map event data to entities that Graph Node stores and allows to query. +The Subgraph manifest, `subgraph.yaml`, defines the smart contracts & network your Subgraph will index, the events from these contracts to pay attention to, and how to map event data to entities that Graph Node stores and allows to query. -The **subgraph definition** consists of the following files: +The **Subgraph definition** consists of the following files: -- `subgraph.yaml`: Contains the subgraph manifest +- `subgraph.yaml`: Contains the Subgraph manifest -- `schema.graphql`: A GraphQL schema defining the data stored for your subgraph and how to query it via GraphQL +- `schema.graphql`: A GraphQL schema defining the data stored for your Subgraph and how to query it via GraphQL - `mapping.ts`: [AssemblyScript Mappings](https://github.com/AssemblyScript/assemblyscript) code that translates event data into entities defined in your schema (e.g. `mapping.ts` in this guide) ### Subgraph Capabilities -A single subgraph can: +A single Subgraph can: - Index data from multiple smart contracts (but not multiple networks). @@ -24,9 +24,9 @@ A single subgraph can: - Add an entry for each contract that requires indexing to the `dataSources` array. -The full specification for subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). +The full specification for Subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). -For the example subgraph listed above, `subgraph.yaml` is: +For the example Subgraph listed above, `subgraph.yaml` is: ```yaml specVersion: 0.0.4 @@ -79,47 +79,47 @@ dataSources: ## Subgraph Entries -> Important Note: Be sure you populate your subgraph manifest with all handlers and [entities](/subgraphs/developing/creating/ql-schema/). +> Important Note: Be sure you populate your Subgraph manifest with all handlers and [entities](/subgraphs/developing/creating/ql-schema/). 清单中要更新的重要条目是: -- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. +- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the Subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. -- `description`: a human-readable description of what the subgraph is. This description is displayed in Graph Explorer when the subgraph is deployed to Subgraph Studio. +- `description`: a human-readable description of what the Subgraph is. This description is displayed in Graph Explorer when the Subgraph is deployed to Subgraph Studio. -- `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed in Graph Explorer. +- `repository`: the URL of the repository where the Subgraph manifest can be found. This is also displayed in Graph Explorer. - `features`: a list of all used [feature](#experimental-features) names. -- `indexerHints.prune`: Defines the retention of historical block data for a subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. +- `indexerHints.prune`: Defines the retention of historical block data for a Subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. -- `dataSources.source`: the address of the smart contract the subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. +- `dataSources.source`: the address of the smart contract the Subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. - `dataSources.source.startBlock`: the optional number of the block that the data source starts indexing from. In most cases, we suggest using the block in which the contract was created. - `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. -- `dataSources.context`: key-value pairs that can be used within subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for subgraph development. +- `dataSources.context`: key-value pairs that can be used within Subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for Subgraph development. - `dataSources.mapping.entities`: the entities that the data source writes to the store. The schema for each entity is defined in the schema.graphql file. - `dataSources.mapping.abis`: one or more named ABI files for the source contract as well as any other smart contracts that you interact with from within the mappings. -- `dataSources.mapping.eventHandlers`: lists the smart contract events this subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. +- `dataSources.mapping.eventHandlers`: lists the smart contract events this Subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. -- `dataSources.mapping.callHandlers`: lists the smart contract functions this subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. +- `dataSources.mapping.callHandlers`: lists the smart contract functions this Subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. -- `dataSources.mapping.blockHandlers`: lists the blocks this subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. +- `dataSources.mapping.blockHandlers`: lists the blocks this Subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. -A single subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. +A single Subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. ## Event Handlers -Event handlers in a subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the subgraph's manifest. This enables subgraphs to process and store event data according to defined logic. +Event handlers in a Subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the Subgraph's manifest. This enables Subgraphs to process and store event data according to defined logic. ### Defining an Event Handler -An event handler is declared within a data source in the subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. +An event handler is declared within a data source in the Subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. ```yaml dataSources: @@ -149,11 +149,11 @@ dataSources: ## 调用处理程序 -While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. +While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a Subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. 调用处理程序只会在以下两种情况之一触发:当指定的函数被合约本身以外的账户调用时,或者当它在 Solidity 中被标记为外部,并作为同一合约中另一个函数的一部分被调用时。 -> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. +> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a Subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. ### 定义调用处理程序 @@ -186,7 +186,7 @@ The `function` is the normalized function signature to filter calls by. The `han ### 映射函数 -Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: +Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example Subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: ```typescript import { CreateGravatarCall } from '../generated/Gravity/Gravity' @@ -205,9 +205,9 @@ The `handleCreateGravatar` function takes a new `CreateGravatarCall` which is a ## 区块处理程序 -除了订阅合约事件或函数调用之外,子图可能还希望在将新区块附加到链上时更新其数据。 为了实现这一点,子图可以在每个区块之后,或匹配预定义过滤器的区块之后,运行一个函数。 +In addition to subscribing to contract events or function calls, a Subgraph may want to update its data as new blocks are appended to the chain. To achieve this a Subgraph can run a function after every block or after blocks that match a pre-defined filter. -### 支持的过滤器 +### 支持的筛选器 #### 调用筛选器 @@ -218,7 +218,7 @@ filter: _The defined handler will be called once for every block which contains a call to the contract (data source) the handler is defined under._ -> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. +> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a Subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. 块处理程序没有过滤器将确保每个块都调用处理程序。对于每种过滤器类型,一个数据源只能包含一个块处理程序。 @@ -261,7 +261,7 @@ blockHandlers: every: 10 ``` -The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the subgraph to perform specific operations at regular block intervals. +The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the Subgraph to perform specific operations at regular block intervals. #### 一次性筛选器 @@ -276,7 +276,7 @@ blockHandlers: kind: once ``` -带有 "once filter" 的所定义处理程序将在所有其他处理程序运行之前仅被调用一次。这种配置允许子图将该处理程序用作初始化处理程序,在索引开始时执行特定任务。 +The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the Subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. ```ts export function handleOnce(block: ethereum.Block): void { @@ -288,7 +288,7 @@ export function handleOnce(block: ethereum.Block): void { ### 映射函数 -The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing subgraph entities in the store, call smart contracts and create or update entities. +The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing Subgraph entities in the store, call smart contracts and create or update entities. ```typescript import { ethereum } from '@graphprotocol/graph-ts' @@ -317,7 +317,7 @@ An event will only be triggered when both the signature and topic 0 match. By de Starting from `specVersion` `0.0.5` and `apiVersion` `0.0.7`, event handlers can have access to the receipt for the transaction which emitted them. -To do so, event handlers must be declared in the subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. +To do so, event handlers must be declared in the Subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. ```yaml eventHandlers: @@ -454,7 +454,7 @@ There are setters and getters like `setString` and `getString` for all value typ ## 起始区块 -The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. +The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a Subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. ```yaml dataSources: @@ -488,13 +488,13 @@ dataSources: ## Indexer Hints -The `indexerHints` setting in a subgraph's manifest provides directives for indexers on processing and managing a subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. +The `indexerHints` setting in a Subgraph's manifest provides directives for indexers on processing and managing a Subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. > This feature is available from `specVersion: 1.0.0` ### Prune -`indexerHints.prune`: Defines the retention of historical block data for a subgraph. Options include: +`indexerHints.prune`: Defines the retention of historical block data for a Subgraph. Options include: 1. `"never"`: No pruning of historical data; retains the entire history. 2. `"auto"`: Retains the minimum necessary history as set by the indexer, optimizing query performance. @@ -505,19 +505,19 @@ The `indexerHints` setting in a subgraph's manifest provides directives for inde prune: auto ``` -> The term "history" in this context of subgraphs is about storing data that reflects the old states of mutable entities. +> The term "history" in this context of Subgraphs is about storing data that reflects the old states of mutable entities. History as of a given block is required for: -- [Time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the subgraph's history -- Using the subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another subgraph, at that block -- Rewinding the subgraph back to that block +- [Time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the Subgraph's history +- Using the Subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another Subgraph, at that block +- Rewinding the Subgraph back to that block If historical data as of the block has been pruned, the above capabilities will not be available. > Using `"auto"` is generally recommended as it maximizes query performance and is sufficient for most users who do not require access to extensive historical data. -For subgraphs leveraging [time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your subgraph's settings: +For Subgraphs leveraging [time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your Subgraph's settings: To retain a specific amount of historical data: @@ -532,3 +532,17 @@ To preserve the complete history of entity states: indexerHints: prune: never ``` + +## SpecVersion Releases + +| 版本 | Release 说明 | +| :---: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune Subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From 42e58b6846c7ef88e56028cf5592392d9bbc33e0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:22:44 -0500 Subject: [PATCH 0819/1789] New translations subgraph-manifest.mdx (Urdu (Pakistan)) --- .../developing/creating/subgraph-manifest.mdx | 88 +++++++++++-------- 1 file changed, 51 insertions(+), 37 deletions(-) diff --git a/website/src/pages/ur/subgraphs/developing/creating/subgraph-manifest.mdx b/website/src/pages/ur/subgraphs/developing/creating/subgraph-manifest.mdx index de8a303b302d..734d298d7ed4 100644 --- a/website/src/pages/ur/subgraphs/developing/creating/subgraph-manifest.mdx +++ b/website/src/pages/ur/subgraphs/developing/creating/subgraph-manifest.mdx @@ -4,19 +4,19 @@ title: Subgraph Manifest ## جائزہ -The subgraph manifest, `subgraph.yaml`, defines the smart contracts & network your subgraph will index, the events from these contracts to pay attention to, and how to map event data to entities that Graph Node stores and allows to query. +The Subgraph manifest, `subgraph.yaml`, defines the smart contracts & network your Subgraph will index, the events from these contracts to pay attention to, and how to map event data to entities that Graph Node stores and allows to query. -The **subgraph definition** consists of the following files: +The **Subgraph definition** consists of the following files: -- `subgraph.yaml`: Contains the subgraph manifest +- `subgraph.yaml`: Contains the Subgraph manifest -- `schema.graphql`: A GraphQL schema defining the data stored for your subgraph and how to query it via GraphQL +- `schema.graphql`: A GraphQL schema defining the data stored for your Subgraph and how to query it via GraphQL - `mapping.ts`: [AssemblyScript Mappings](https://github.com/AssemblyScript/assemblyscript) code that translates event data into entities defined in your schema (e.g. `mapping.ts` in this guide) ### Subgraph Capabilities -A single subgraph can: +A single Subgraph can: - Index data from multiple smart contracts (but not multiple networks). @@ -24,9 +24,9 @@ A single subgraph can: - Add an entry for each contract that requires indexing to the `dataSources` array. -The full specification for subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). +The full specification for Subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). -For the example subgraph listed above, `subgraph.yaml` is: +For the example Subgraph listed above, `subgraph.yaml` is: ```yaml specVersion: 0.0.4 @@ -79,47 +79,47 @@ dataSources: ## Subgraph Entries -> Important Note: Be sure you populate your subgraph manifest with all handlers and [entities](/subgraphs/developing/creating/ql-schema/). +> Important Note: Be sure you populate your Subgraph manifest with all handlers and [entities](/subgraphs/developing/creating/ql-schema/). مینی فیسٹ کے لیے اپ ڈیٹ کرنے کے لیے اہم اندراجات یہ ہیں: -- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. +- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the Subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. -- `description`: a human-readable description of what the subgraph is. This description is displayed in Graph Explorer when the subgraph is deployed to Subgraph Studio. +- `description`: a human-readable description of what the Subgraph is. This description is displayed in Graph Explorer when the Subgraph is deployed to Subgraph Studio. -- `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed in Graph Explorer. +- `repository`: the URL of the repository where the Subgraph manifest can be found. This is also displayed in Graph Explorer. - `features`: a list of all used [feature](#experimental-features) names. -- `indexerHints.prune`: Defines the retention of historical block data for a subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. +- `indexerHints.prune`: Defines the retention of historical block data for a Subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. -- `dataSources.source`: the address of the smart contract the subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. +- `dataSources.source`: the address of the smart contract the Subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. - `dataSources.source.startBlock`: the optional number of the block that the data source starts indexing from. In most cases, we suggest using the block in which the contract was created. - `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. -- `dataSources.context`: key-value pairs that can be used within subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for subgraph development. +- `dataSources.context`: key-value pairs that can be used within Subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for Subgraph development. - `dataSources.mapping.entities`: the entities that the data source writes to the store. The schema for each entity is defined in the schema.graphql file. - `dataSources.mapping.abis`: one or more named ABI files for the source contract as well as any other smart contracts that you interact with from within the mappings. -- `dataSources.mapping.eventHandlers`: lists the smart contract events this subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. +- `dataSources.mapping.eventHandlers`: lists the smart contract events this Subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. -- `dataSources.mapping.callHandlers`: lists the smart contract functions this subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. +- `dataSources.mapping.callHandlers`: lists the smart contract functions this Subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. -- `dataSources.mapping.blockHandlers`: lists the blocks this subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. +- `dataSources.mapping.blockHandlers`: lists the blocks this Subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. -A single subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. +A single Subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. ## Event Handlers -Event handlers in a subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the subgraph's manifest. This enables subgraphs to process and store event data according to defined logic. +Event handlers in a Subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the Subgraph's manifest. This enables Subgraphs to process and store event data according to defined logic. ### Defining an Event Handler -An event handler is declared within a data source in the subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. +An event handler is declared within a data source in the Subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. ```yaml dataSources: @@ -149,11 +149,11 @@ dataSources: ## کال ہینڈلرز -While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. +While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a Subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. کال ہینڈلرز صرف دو صورتوں میں سے ایک میں ٹرگر کریں گے: جب مخصوص کردہ فنکشن کو کنٹریکٹ کے علاوہ کسی دوسرے اکاؤنٹ سے کال جاتا ہے یا جب اسے سولیڈیٹی میں بیرونی کے طور پر نشان زد کیا جاتا ہے اور اسی کنٹریکٹ میں کسی دوسرے فنکشن کے حصے کے طور پر کال کیا جاتا ہے. -> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. +> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a Subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. ### کال ہینڈلر کی تعریف @@ -186,7 +186,7 @@ The `function` is the normalized function signature to filter calls by. The `han ### میپنگ فنکشن -Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: +Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example Subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: ```typescript import { CreateGravatarCall } from '../generated/Gravity/Gravity' @@ -205,7 +205,7 @@ The `handleCreateGravatar` function takes a new `CreateGravatarCall` which is a ## بلاک ہینڈلرز -کنٹریکٹ ایونٹس یا فنکشن کالز کو سبسکرائب کرنے کے علاوہ، ایک سب گراف اپنے ڈیٹا کو اپ ڈیٹ کرنا چاہتا ہے جیسے جیسے چین میں نئے بلاکس شامل ہوتے ہیں. اس کو حاصل کرنے کے لیے ایک سب گراف ہر بلاک کے بعد یا پہلے سے طے شدہ فلٹر سے مماثل بلاکس کے بعد ایک فنکشن چلا سکتا ہے. +In addition to subscribing to contract events or function calls, a Subgraph may want to update its data as new blocks are appended to the chain. To achieve this a Subgraph can run a function after every block or after blocks that match a pre-defined filter. ### معاون فلٹرز @@ -218,7 +218,7 @@ filter: _The defined handler will be called once for every block which contains a call to the contract (data source) the handler is defined under._ -> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. +> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a Subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. بلاک ہینڈلر کے لیے فلٹر کی عدم موجودگی اس بات کو یقینی بنائے گی کہ ہینڈلر کو ہر بلاک کے لیے کال کیا جاتا ہے. ڈیٹا سورس میں ہر فلٹر کی قسم کے لیے صرف ایک بلاک ہینڈلر ہو سکتا ہے. @@ -261,7 +261,7 @@ blockHandlers: every: 10 ``` -The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the subgraph to perform specific operations at regular block intervals. +The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the Subgraph to perform specific operations at regular block intervals. #### ونس فلٹر @@ -276,7 +276,7 @@ blockHandlers: kind: once ``` -ایک بار فلٹر کے ساتھ متعین ہینڈلر کو دوسرے تمام ہینڈلرز کے چلنے سے پہلے صرف ایک بار کال کیا جائے گا۔ یہ کنفیگریشن سب گراف کو انڈیکسنگ کے آغاز میں مخصوص کاموں کو انجام دیتے ہوئے، ہینڈلر کو ابتدائیہ ہینڈلر کے طور پر استعمال کرنے کی اجازت دیتی ہے. +The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the Subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. ```ts export function handleOnce(block: ethereum.Block): void { @@ -288,7 +288,7 @@ export function handleOnce(block: ethereum.Block): void { ### میپنگ فنکشن -The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing subgraph entities in the store, call smart contracts and create or update entities. +The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing Subgraph entities in the store, call smart contracts and create or update entities. ```typescript import { ethereum } from '@graphprotocol/graph-ts' @@ -317,7 +317,7 @@ An event will only be triggered when both the signature and topic 0 match. By de Starting from `specVersion` `0.0.5` and `apiVersion` `0.0.7`, event handlers can have access to the receipt for the transaction which emitted them. -To do so, event handlers must be declared in the subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. +To do so, event handlers must be declared in the Subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. ```yaml eventHandlers: @@ -454,7 +454,7 @@ There are setters and getters like `setString` and `getString` for all value typ ## بلاکس شروع کریں -The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. +The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a Subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. ```yaml dataSources: @@ -488,13 +488,13 @@ dataSources: ## Indexer Hints -The `indexerHints` setting in a subgraph's manifest provides directives for indexers on processing and managing a subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. +The `indexerHints` setting in a Subgraph's manifest provides directives for indexers on processing and managing a Subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. > This feature is available from `specVersion: 1.0.0` ### Prune -`indexerHints.prune`: Defines the retention of historical block data for a subgraph. Options include: +`indexerHints.prune`: Defines the retention of historical block data for a Subgraph. Options include: 1. `"never"`: No pruning of historical data; retains the entire history. 2. `"auto"`: Retains the minimum necessary history as set by the indexer, optimizing query performance. @@ -505,19 +505,19 @@ The `indexerHints` setting in a subgraph's manifest provides directives for inde prune: auto ``` -> The term "history" in this context of subgraphs is about storing data that reflects the old states of mutable entities. +> The term "history" in this context of Subgraphs is about storing data that reflects the old states of mutable entities. History as of a given block is required for: -- [Time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the subgraph's history -- Using the subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another subgraph, at that block -- Rewinding the subgraph back to that block +- [Time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the Subgraph's history +- Using the Subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another Subgraph, at that block +- Rewinding the Subgraph back to that block If historical data as of the block has been pruned, the above capabilities will not be available. > Using `"auto"` is generally recommended as it maximizes query performance and is sufficient for most users who do not require access to extensive historical data. -For subgraphs leveraging [time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your subgraph's settings: +For Subgraphs leveraging [time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your Subgraph's settings: To retain a specific amount of historical data: @@ -532,3 +532,17 @@ To preserve the complete history of entity states: indexerHints: prune: never ``` + +## SpecVersion Releases + +| ورزن | جاری کردہ نوٹس | +| :---: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune Subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From 4da9b97ec3f489ad0d997b979f72327789a24217 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:22:45 -0500 Subject: [PATCH 0820/1789] New translations subgraph-manifest.mdx (Vietnamese) --- .../developing/creating/subgraph-manifest.mdx | 88 +++++++++++-------- 1 file changed, 51 insertions(+), 37 deletions(-) diff --git a/website/src/pages/vi/subgraphs/developing/creating/subgraph-manifest.mdx b/website/src/pages/vi/subgraphs/developing/creating/subgraph-manifest.mdx index 01ca69dbcd4b..cb4a9b0dc6fa 100644 --- a/website/src/pages/vi/subgraphs/developing/creating/subgraph-manifest.mdx +++ b/website/src/pages/vi/subgraphs/developing/creating/subgraph-manifest.mdx @@ -4,19 +4,19 @@ title: Subgraph Manifest ## Tổng quan -The subgraph manifest, `subgraph.yaml`, defines the smart contracts & network your subgraph will index, the events from these contracts to pay attention to, and how to map event data to entities that Graph Node stores and allows to query. +The Subgraph manifest, `subgraph.yaml`, defines the smart contracts & network your Subgraph will index, the events from these contracts to pay attention to, and how to map event data to entities that Graph Node stores and allows to query. -The **subgraph definition** consists of the following files: +The **Subgraph definition** consists of the following files: -- `subgraph.yaml`: Contains the subgraph manifest +- `subgraph.yaml`: Contains the Subgraph manifest -- `schema.graphql`: A GraphQL schema defining the data stored for your subgraph and how to query it via GraphQL +- `schema.graphql`: A GraphQL schema defining the data stored for your Subgraph and how to query it via GraphQL - `mapping.ts`: [AssemblyScript Mappings](https://github.com/AssemblyScript/assemblyscript) code that translates event data into entities defined in your schema (e.g. `mapping.ts` in this guide) ### Subgraph Capabilities -A single subgraph can: +A single Subgraph can: - Index data from multiple smart contracts (but not multiple networks). @@ -24,9 +24,9 @@ A single subgraph can: - Add an entry for each contract that requires indexing to the `dataSources` array. -The full specification for subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). +The full specification for Subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). -For the example subgraph listed above, `subgraph.yaml` is: +For the example Subgraph listed above, `subgraph.yaml` is: ```yaml specVersion: 0.0.4 @@ -79,47 +79,47 @@ dataSources: ## Subgraph Entries -> Important Note: Be sure you populate your subgraph manifest with all handlers and [entities](/subgraphs/developing/creating/ql-schema/). +> Important Note: Be sure you populate your Subgraph manifest with all handlers and [entities](/subgraphs/developing/creating/ql-schema/). Các mục nhập quan trọng cần cập nhật cho tệp kê khai là: -- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. +- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the Subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. -- `description`: a human-readable description of what the subgraph is. This description is displayed in Graph Explorer when the subgraph is deployed to Subgraph Studio. +- `description`: a human-readable description of what the Subgraph is. This description is displayed in Graph Explorer when the Subgraph is deployed to Subgraph Studio. -- `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed in Graph Explorer. +- `repository`: the URL of the repository where the Subgraph manifest can be found. This is also displayed in Graph Explorer. - `features`: a list of all used [feature](#experimental-features) names. -- `indexerHints.prune`: Defines the retention of historical block data for a subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. +- `indexerHints.prune`: Defines the retention of historical block data for a Subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. -- `dataSources.source`: the address of the smart contract the subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. +- `dataSources.source`: the address of the smart contract the Subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. - `dataSources.source.startBlock`: the optional number of the block that the data source starts indexing from. In most cases, we suggest using the block in which the contract was created. - `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. -- `dataSources.context`: key-value pairs that can be used within subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for subgraph development. +- `dataSources.context`: key-value pairs that can be used within Subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for Subgraph development. - `dataSources.mapping.entities`: the entities that the data source writes to the store. The schema for each entity is defined in the schema.graphql file. - `dataSources.mapping.abis`: one or more named ABI files for the source contract as well as any other smart contracts that you interact with from within the mappings. -- `dataSources.mapping.eventHandlers`: lists the smart contract events this subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. +- `dataSources.mapping.eventHandlers`: lists the smart contract events this Subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. -- `dataSources.mapping.callHandlers`: lists the smart contract functions this subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. +- `dataSources.mapping.callHandlers`: lists the smart contract functions this Subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. -- `dataSources.mapping.blockHandlers`: lists the blocks this subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. +- `dataSources.mapping.blockHandlers`: lists the blocks this Subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. -A single subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. +A single Subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. ## Event Handlers -Event handlers in a subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the subgraph's manifest. This enables subgraphs to process and store event data according to defined logic. +Event handlers in a Subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the Subgraph's manifest. This enables Subgraphs to process and store event data according to defined logic. ### Defining an Event Handler -An event handler is declared within a data source in the subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. +An event handler is declared within a data source in the Subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. ```yaml dataSources: @@ -149,11 +149,11 @@ dataSources: ## Trình xử lý lệnh gọi -While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. +While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a Subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. Call handlers will only trigger in one of two cases: when the function specified is called by an account other than the contract itself or when it is marked as external in Solidity and called as part of another function in the same contract. -> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. +> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a Subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. ### Xác định một Trình xử lý lệnh gọi @@ -186,7 +186,7 @@ The `function` is the normalized function signature to filter calls by. The `han ### Chức năng Ánh xạ -Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: +Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example Subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: ```typescript import { CreateGravatarCall } from '../generated/Gravity/Gravity' @@ -205,7 +205,7 @@ The `handleCreateGravatar` function takes a new `CreateGravatarCall` which is a ## Trình xử lý Khối -In addition to subscribing to contract events or function calls, a subgraph may want to update its data as new blocks are appended to the chain. To achieve this a subgraph can run a function after every block or after blocks that match a pre-defined filter. +In addition to subscribing to contract events or function calls, a Subgraph may want to update its data as new blocks are appended to the chain. To achieve this a Subgraph can run a function after every block or after blocks that match a pre-defined filter. ### Bộ lọc được hỗ trợ @@ -218,7 +218,7 @@ filter: _The defined handler will be called once for every block which contains a call to the contract (data source) the handler is defined under._ -> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. +> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a Subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. The absence of a filter for a block handler will ensure that the handler is called every block. A data source can only contain one block handler for each filter type. @@ -261,7 +261,7 @@ blockHandlers: every: 10 ``` -The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the subgraph to perform specific operations at regular block intervals. +The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the Subgraph to perform specific operations at regular block intervals. #### Once Filter @@ -276,7 +276,7 @@ blockHandlers: kind: once ``` -The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. +The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the Subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. ```ts export function handleOnce(block: ethereum.Block): void { @@ -288,7 +288,7 @@ export function handleOnce(block: ethereum.Block): void { ### Chức năng Ánh xạ -The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing subgraph entities in the store, call smart contracts and create or update entities. +The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing Subgraph entities in the store, call smart contracts and create or update entities. ```typescript import { ethereum } from '@graphprotocol/graph-ts' @@ -317,7 +317,7 @@ An event will only be triggered when both the signature and topic 0 match. By de Starting from `specVersion` `0.0.5` and `apiVersion` `0.0.7`, event handlers can have access to the receipt for the transaction which emitted them. -To do so, event handlers must be declared in the subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. +To do so, event handlers must be declared in the Subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. ```yaml eventHandlers: @@ -454,7 +454,7 @@ There are setters and getters like `setString` and `getString` for all value typ ## Khối Bắt đầu -The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. +The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a Subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. ```yaml dataSources: @@ -488,13 +488,13 @@ dataSources: ## Indexer Hints -The `indexerHints` setting in a subgraph's manifest provides directives for indexers on processing and managing a subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. +The `indexerHints` setting in a Subgraph's manifest provides directives for indexers on processing and managing a Subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. > This feature is available from `specVersion: 1.0.0` ### Prune -`indexerHints.prune`: Defines the retention of historical block data for a subgraph. Options include: +`indexerHints.prune`: Defines the retention of historical block data for a Subgraph. Options include: 1. `"never"`: No pruning of historical data; retains the entire history. 2. `"auto"`: Retains the minimum necessary history as set by the indexer, optimizing query performance. @@ -505,19 +505,19 @@ The `indexerHints` setting in a subgraph's manifest provides directives for inde prune: auto ``` -> The term "history" in this context of subgraphs is about storing data that reflects the old states of mutable entities. +> The term "history" in this context of Subgraphs is about storing data that reflects the old states of mutable entities. History as of a given block is required for: -- [Time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the subgraph's history -- Using the subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another subgraph, at that block -- Rewinding the subgraph back to that block +- [Time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the Subgraph's history +- Using the Subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another Subgraph, at that block +- Rewinding the Subgraph back to that block If historical data as of the block has been pruned, the above capabilities will not be available. > Using `"auto"` is generally recommended as it maximizes query performance and is sufficient for most users who do not require access to extensive historical data. -For subgraphs leveraging [time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your subgraph's settings: +For Subgraphs leveraging [time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your Subgraph's settings: To retain a specific amount of historical data: @@ -532,3 +532,17 @@ To preserve the complete history of entity states: indexerHints: prune: never ``` + +## SpecVersion Releases + +| Phiên bản | Ghi chú phát hành | +| :-------: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune Subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From 98210bb1a7d3a1ca56a9a50160d1f604c8dfa5a2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:22:46 -0500 Subject: [PATCH 0821/1789] New translations subgraph-manifest.mdx (Marathi) --- .../developing/creating/subgraph-manifest.mdx | 88 +++++++++++-------- 1 file changed, 51 insertions(+), 37 deletions(-) diff --git a/website/src/pages/mr/subgraphs/developing/creating/subgraph-manifest.mdx b/website/src/pages/mr/subgraphs/developing/creating/subgraph-manifest.mdx index a09668000af7..abe072375bb7 100644 --- a/website/src/pages/mr/subgraphs/developing/creating/subgraph-manifest.mdx +++ b/website/src/pages/mr/subgraphs/developing/creating/subgraph-manifest.mdx @@ -4,19 +4,19 @@ title: Subgraph Manifest ## सविश्लेषण -The subgraph manifest, `subgraph.yaml`, defines the smart contracts & network your subgraph will index, the events from these contracts to pay attention to, and how to map event data to entities that Graph Node stores and allows to query. +The Subgraph manifest, `subgraph.yaml`, defines the smart contracts & network your Subgraph will index, the events from these contracts to pay attention to, and how to map event data to entities that Graph Node stores and allows to query. -The **subgraph definition** consists of the following files: +The **Subgraph definition** consists of the following files: -- `subgraph.yaml`: Contains the subgraph manifest +- `subgraph.yaml`: Contains the Subgraph manifest -- `schema.graphql`: A GraphQL schema defining the data stored for your subgraph and how to query it via GraphQL +- `schema.graphql`: A GraphQL schema defining the data stored for your Subgraph and how to query it via GraphQL - `mapping.ts`: [AssemblyScript Mappings](https://github.com/AssemblyScript/assemblyscript) code that translates event data into entities defined in your schema (e.g. `mapping.ts` in this guide) ### Subgraph Capabilities -A single subgraph can: +A single Subgraph can: - Index data from multiple smart contracts (but not multiple networks). @@ -24,9 +24,9 @@ A single subgraph can: - Add an entry for each contract that requires indexing to the `dataSources` array. -The full specification for subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). +The full specification for Subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). -For the example subgraph listed above, `subgraph.yaml` is: +For the example Subgraph listed above, `subgraph.yaml` is: ```yaml specVersion: 0.0.4 @@ -79,47 +79,47 @@ dataSources: ## Subgraph Entries -> Important Note: Be sure you populate your subgraph manifest with all handlers and [entities](/subgraphs/developing/creating/ql-schema/). +> Important Note: Be sure you populate your Subgraph manifest with all handlers and [entities](/subgraphs/developing/creating/ql-schema/). मॅनिफेस्टसाठी अद्यतनित करण्याच्या महत्त्वाच्या नोंदी आहेत: -- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. +- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the Subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. -- `description`: a human-readable description of what the subgraph is. This description is displayed in Graph Explorer when the subgraph is deployed to Subgraph Studio. +- `description`: a human-readable description of what the Subgraph is. This description is displayed in Graph Explorer when the Subgraph is deployed to Subgraph Studio. -- `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed in Graph Explorer. +- `repository`: the URL of the repository where the Subgraph manifest can be found. This is also displayed in Graph Explorer. - `features`: a list of all used [feature](#experimental-features) names. -- `indexerHints.prune`: Defines the retention of historical block data for a subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. +- `indexerHints.prune`: Defines the retention of historical block data for a Subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. -- `dataSources.source`: the address of the smart contract the subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. +- `dataSources.source`: the address of the smart contract the Subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. - `dataSources.source.startBlock`: the optional number of the block that the data source starts indexing from. In most cases, we suggest using the block in which the contract was created. - `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. -- `dataSources.context`: key-value pairs that can be used within subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for subgraph development. +- `dataSources.context`: key-value pairs that can be used within Subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for Subgraph development. - `dataSources.mapping.entities`: the entities that the data source writes to the store. The schema for each entity is defined in the schema.graphql file. - `dataSources.mapping.abis`: one or more named ABI files for the source contract as well as any other smart contracts that you interact with from within the mappings. -- `dataSources.mapping.eventHandlers`: lists the smart contract events this subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. +- `dataSources.mapping.eventHandlers`: lists the smart contract events this Subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. -- `dataSources.mapping.callHandlers`: lists the smart contract functions this subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. +- `dataSources.mapping.callHandlers`: lists the smart contract functions this Subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. -- `dataSources.mapping.blockHandlers`: lists the blocks this subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. +- `dataSources.mapping.blockHandlers`: lists the blocks this Subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. -A single subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. +A single Subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. ## Event Handlers -Event handlers in a subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the subgraph's manifest. This enables subgraphs to process and store event data according to defined logic. +Event handlers in a Subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the Subgraph's manifest. This enables Subgraphs to process and store event data according to defined logic. ### Defining an Event Handler -An event handler is declared within a data source in the subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. +An event handler is declared within a data source in the Subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. ```yaml dataSources: @@ -149,11 +149,11 @@ dataSources: ## हँडलर्सना कॉल करा -While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. +While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a Subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. कॉल हँडलर्स फक्त दोनपैकी एका प्रकरणात ट्रिगर होतील: जेव्हा निर्दिष्ट केलेल्या फंक्शनला कॉन्ट्रॅक्ट व्यतिरिक्त इतर खात्याद्वारे कॉल केले जाते किंवा जेव्हा ते सॉलिडिटीमध्ये बाह्य म्हणून चिन्हांकित केले जाते आणि त्याच कॉन्ट्रॅक्टमधील दुसर्‍या फंक्शनचा भाग म्हणून कॉल केले जाते. -> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. +> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a Subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. ### कॉल हँडलरची व्याख्या @@ -186,7 +186,7 @@ The `function` is the normalized function signature to filter calls by. The `han ### मॅपिंग कार्य -Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: +Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example Subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: ```typescript import { CreateGravatarCall } from '../generated/Gravity/Gravity' @@ -205,7 +205,7 @@ The `handleCreateGravatar` function takes a new `CreateGravatarCall` which is a ## ब्लॉक हँडलर -कॉन्ट्रॅक्ट इव्हेंट्स किंवा फंक्शन कॉल्सची सदस्यता घेण्याव्यतिरिक्त, सबग्राफला त्याचा डेटा अद्यतनित करायचा असेल कारण साखळीमध्ये नवीन ब्लॉक्स जोडले जातात. हे साध्य करण्यासाठी सबग्राफ प्रत्येक ब्लॉकनंतर किंवा पूर्व-परिभाषित फिल्टरशी जुळणार्‍या ब्लॉक्सनंतर फंक्शन चालवू शकतो. +In addition to subscribing to contract events or function calls, a Subgraph may want to update its data as new blocks are appended to the chain. To achieve this a Subgraph can run a function after every block or after blocks that match a pre-defined filter. ### समर्थित फिल्टर @@ -218,7 +218,7 @@ filter: _The defined handler will be called once for every block which contains a call to the contract (data source) the handler is defined under._ -> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. +> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a Subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. ब्लॉक हँडलरसाठी फिल्टरची अनुपस्थिती हे सुनिश्चित करेल की हँडलरला प्रत्येक ब्लॉक म्हटले जाईल. डेटा स्त्रोतामध्ये प्रत्येक फिल्टर प्रकारासाठी फक्त एक ब्लॉक हँडलर असू शकतो. @@ -261,7 +261,7 @@ blockHandlers: every: 10 ``` -The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the subgraph to perform specific operations at regular block intervals. +The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the Subgraph to perform specific operations at regular block intervals. #### Once Filter @@ -276,7 +276,7 @@ blockHandlers: kind: once ``` -The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. +The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the Subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. ```ts export function handleOnce(block: ethereum.Block): void { @@ -288,7 +288,7 @@ export function handleOnce(block: ethereum.Block): void { ### मॅपिंग कार्य -The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing subgraph entities in the store, call smart contracts and create or update entities. +The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing Subgraph entities in the store, call smart contracts and create or update entities. ```typescript import { ethereum } from '@graphprotocol/graph-ts' @@ -317,7 +317,7 @@ An event will only be triggered when both the signature and topic 0 match. By de Starting from `specVersion` `0.0.5` and `apiVersion` `0.0.7`, event handlers can have access to the receipt for the transaction which emitted them. -To do so, event handlers must be declared in the subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. +To do so, event handlers must be declared in the Subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. ```yaml eventHandlers: @@ -454,7 +454,7 @@ There are setters and getters like `setString` and `getString` for all value typ ## ब्लॉक सुरू करा -The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. +The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a Subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. ```yaml dataSources: @@ -488,13 +488,13 @@ dataSources: ## Indexer Hints -The `indexerHints` setting in a subgraph's manifest provides directives for indexers on processing and managing a subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. +The `indexerHints` setting in a Subgraph's manifest provides directives for indexers on processing and managing a Subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. > This feature is available from `specVersion: 1.0.0` ### Prune -`indexerHints.prune`: Defines the retention of historical block data for a subgraph. Options include: +`indexerHints.prune`: Defines the retention of historical block data for a Subgraph. Options include: 1. `"never"`: No pruning of historical data; retains the entire history. 2. `"auto"`: Retains the minimum necessary history as set by the indexer, optimizing query performance. @@ -505,19 +505,19 @@ The `indexerHints` setting in a subgraph's manifest provides directives for inde prune: auto ``` -> The term "history" in this context of subgraphs is about storing data that reflects the old states of mutable entities. +> The term "history" in this context of Subgraphs is about storing data that reflects the old states of mutable entities. History as of a given block is required for: -- [Time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the subgraph's history -- Using the subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another subgraph, at that block -- Rewinding the subgraph back to that block +- [Time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the Subgraph's history +- Using the Subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another Subgraph, at that block +- Rewinding the Subgraph back to that block If historical data as of the block has been pruned, the above capabilities will not be available. > Using `"auto"` is generally recommended as it maximizes query performance and is sufficient for most users who do not require access to extensive historical data. -For subgraphs leveraging [time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your subgraph's settings: +For Subgraphs leveraging [time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your Subgraph's settings: To retain a specific amount of historical data: @@ -532,3 +532,17 @@ To preserve the complete history of entity states: indexerHints: prune: never ``` + +## SpecVersion Releases + +| आवृत्ती | रिलीझ नोट्स | +| :-----: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune Subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From 198901cc172254e95764406f907f091631757d80 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:22:47 -0500 Subject: [PATCH 0822/1789] New translations subgraph-manifest.mdx (Hindi) --- .../developing/creating/subgraph-manifest.mdx | 90 +++++++++++-------- 1 file changed, 52 insertions(+), 38 deletions(-) diff --git a/website/src/pages/hi/subgraphs/developing/creating/subgraph-manifest.mdx b/website/src/pages/hi/subgraphs/developing/creating/subgraph-manifest.mdx index 31dbc7079552..a6be362620dd 100644 --- a/website/src/pages/hi/subgraphs/developing/creating/subgraph-manifest.mdx +++ b/website/src/pages/hi/subgraphs/developing/creating/subgraph-manifest.mdx @@ -4,29 +4,29 @@ title: Subgraph Manifest ## अवलोकन -subgraph मैनिफेस्ट, subgraph.yaml, उन स्मार्ट कॉन्ट्रैक्ट्स और नेटवर्क को परिभाषित करता है जिन्हें आपका subgraph इंडेक्स करेगा, इन कॉन्ट्रैक्ट्स से ध्यान देने योग्य इवेंट्स, और इवेंट डेटा को उन संस्थाओं के साथ मैप करने का तरीका जिन्हें Graph Node स्टोर करता है और जिन्हें क्वेरी करने की अनुमति देता है। +The Subgraph manifest, `subgraph.yaml`, defines the smart contracts & network your Subgraph will index, the events from these contracts to pay attention to, and how to map event data to entities that Graph Node stores and allows to query. -**subgraph definition** में निम्नलिखित फ़ाइलें शामिल हैं: +The **Subgraph definition** consists of the following files: -- subgraph.yaml: में subgraph मैनिफेस्ट शामिल है +- `subgraph.yaml`: Contains the Subgraph manifest -- schema.graphql: एक GraphQL स्कीमा जो आपके लिए डेटा को परिभाषित करता है और इसे GraphQL के माध्यम से क्वेरी करने का तरीका बताता है. +- `schema.graphql`: A GraphQL schema defining the data stored for your Subgraph and how to query it via GraphQL - `mapping.ts`: [AssemblyScript Mappings](https://github.com/AssemblyScript/assemblyscript) code that translates event data into entities defined in your schema (e.g. `mapping.ts` in this guide) ### Subgraph क्षमताएँ -एक सिंगल subgraph कर सकता है: +A single Subgraph can: - कई स्मार्ट कॉन्ट्रैक्ट्स से डेटा को इंडेक्स करें (लेकिन कई नेटवर्क नहीं)। -- IPFS फ़ाइलों से डेटा को डेटा स्रोत फ़ाइलें का उपयोग करके अनुक्रमित करें। +- IPFS फ़ाइलों से डेटा को डेटा स्रोत फ़ाइलें का उपयोग करके अनुक्रमित करें। - Add an entry for each contract that requires indexing to the `dataSources` array. -The full specification for subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). +The full specification for Subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). -For the example subgraph listed above, `subgraph.yaml` is: +For the example Subgraph listed above, `subgraph.yaml` is: ```yaml specVersion: 0.0.4 @@ -79,47 +79,47 @@ dataSources: ## Subgraph Entries -> Important Note: Be sure you populate your subgraph manifest with all handlers and [entities](/subgraphs/developing/creating/ql-schema/). +> Important Note: Be sure you populate your Subgraph manifest with all handlers and [entities](/subgraphs/developing/creating/ql-schema/). मेनिफेस्ट के लिए अद्यतन करने के लिए महत्वपूर्ण प्रविष्टियां हैं: -- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. +- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the Subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. -- `description`: a human-readable description of what the subgraph is. This description is displayed in Graph Explorer when the subgraph is deployed to Subgraph Studio. +- `description`: a human-readable description of what the Subgraph is. This description is displayed in Graph Explorer when the Subgraph is deployed to Subgraph Studio. -- `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed in Graph Explorer. +- `repository`: the URL of the repository where the Subgraph manifest can be found. This is also displayed in Graph Explorer. - `features`: a list of all used [feature](#experimental-features) names. -- `indexerHints.prune`: Defines the retention of historical block data for a subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. +- `indexerHints.prune`: Defines the retention of historical block data for a Subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. -- `dataSources.source`: the address of the smart contract the subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. +- `dataSources.source`: the address of the smart contract the Subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. - `dataSources.source.startBlock`: the optional number of the block that the data source starts indexing from. In most cases, we suggest using the block in which the contract was created. - `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. -- `dataSources.context`: key-value pairs that can be used within subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for subgraph development. +- `dataSources.context`: key-value pairs that can be used within Subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for Subgraph development. - `dataSources.mapping.entities`: the entities that the data source writes to the store. The schema for each entity is defined in the schema.graphql file. - `dataSources.mapping.abis`: one or more named ABI files for the source contract as well as any other smart contracts that you interact with from within the mappings. -- `dataSources.mapping.eventHandlers`: lists the smart contract events this subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. +- `dataSources.mapping.eventHandlers`: lists the smart contract events this Subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. -- `dataSources.mapping.callHandlers`: lists the smart contract functions this subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. +- `dataSources.mapping.callHandlers`: lists the smart contract functions this Subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. -- `dataSources.mapping.blockHandlers`: lists the blocks this subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. +- `dataSources.mapping.blockHandlers`: lists the blocks this Subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. -A single subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. +A single Subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. ## आयोजन Handlers -Event handlers एक subgraph में स्मार्ट कॉन्ट्रैक्ट्स द्वारा ब्लॉकचेन पर उत्पन्न होने वाले विशिष्ट घटनाओं पर प्रतिक्रिया करते हैं और subgraph के मैनिफेस्ट में परिभाषित हैंडलर्स को ट्रिगर करते हैं। इससे subgraphs को परिभाषित लॉजिक के अनुसार घटना डेटा को प्रोसेस और स्टोर करने की अनुमति मिलती है। +Event handlers in a Subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the Subgraph's manifest. This enables Subgraphs to process and store event data according to defined logic. ### इवेंट हैंडलर को परिभाषित करना -एक event handler को डेटा स्रोत के भीतर subgraph के YAML configuration में घोषित किया जाता है। यह निर्दिष्ट करता है कि कौन से events पर ध्यान देना है और उन events का पता चलने पर कार्यान्वित करने के लिए संबंधित function क्या है। +An event handler is declared within a data source in the Subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. ```yaml dataSources: @@ -149,11 +149,11 @@ dataSources: ## कॉल हैंडलर्स -While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. +While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a Subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. कॉल हैंडलर केवल दो मामलों में से एक में ट्रिगर होंगे: जब निर्दिष्ट फ़ंक्शन को अनुबंध के अलावा किसी अन्य खाते द्वारा कॉल किया जाता है या जब इसे सॉलिडिटी में बाहरी के रूप में चिह्नित किया जाता है और उसी अनुबंध में किसी अन्य फ़ंक्शन के भाग के रूप में कॉल किया जाता है। -> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. +> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a Subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. ### कॉल हैंडलर को परिभाषित करना @@ -186,7 +186,7 @@ The `function` is the normalized function signature to filter calls by. The `han ### मानचित्रण समारोह -Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: +Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example Subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: ```typescript import { CreateGravatarCall } from '../generated/Gravity/Gravity' @@ -205,7 +205,7 @@ The `handleCreateGravatar` function takes a new `CreateGravatarCall` which is a ## ब्लॉक हैंडलर -Contract events या function calls की सदस्यता लेने के अलावा, एक subgraph अपने data को update करना चाह सकता है क्योंकि chain में नए blocks जोड़े जाते हैं। इसे प्राप्त करने के लिए एक subgraph every block के बाद या pre-defined filter से match होन वाले block के बाद एक function चला सकता है। +In addition to subscribing to contract events or function calls, a Subgraph may want to update its data as new blocks are appended to the chain. To achieve this a Subgraph can run a function after every block or after blocks that match a pre-defined filter. ### समर्थित फ़िल्टर @@ -218,7 +218,7 @@ filter: _The defined handler will be called once for every block which contains a call to the contract (data source) the handler is defined under._ -> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. +> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a Subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. ब्लॉक हैंडलर के लिए फ़िल्टर की अनुपस्थिति सुनिश्चित करेगी कि हैंडलर को प्रत्येक ब्लॉक कहा जाता है। डेटा स्रोत में प्रत्येक फ़िल्टर प्रकार के लिए केवल एक ब्लॉक हैंडलर हो सकता है। @@ -261,7 +261,7 @@ blockHandlers: every: 10 ``` -The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the subgraph to perform specific operations at regular block intervals. +The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the Subgraph to perform specific operations at regular block intervals. #### Once फ़िल्टर @@ -276,7 +276,7 @@ blockHandlers: kind: once ``` -'once' फ़िल्टर के साथ परिभाषित हैंडलर केवल एक बार सभी अन्य हैंडलर्स चलने से पहले कॉल किया जाएगा। यह कॉन्फ़िगरेशन 'subgraph' को प्रारंभिक हैंडलर के रूप में उपयोग करने की अनुमति देता है, जिससे 'indexing' के शुरू होने पर विशिष्ट कार्य किए जा सकते हैं। +The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the Subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. ```ts export function handleOnce(block: ethereum.Block): void { @@ -288,7 +288,7 @@ export function handleOnce(block: ethereum.Block): void { ### मानचित्रण समारोह -The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing subgraph entities in the store, call smart contracts and create or update entities. +The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing Subgraph entities in the store, call smart contracts and create or update entities. ```typescript import { ethereum } from '@graphprotocol/graph-ts' @@ -317,7 +317,7 @@ An event will only be triggered when both the signature and topic 0 match. By de Starting from `specVersion` `0.0.5` and `apiVersion` `0.0.7`, event handlers can have access to the receipt for the transaction which emitted them. -To do so, event handlers must be declared in the subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. +To do so, event handlers must be declared in the Subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. ```yaml eventHandlers: @@ -454,7 +454,7 @@ There are setters and getters like `setString` and `getString` for all value typ ## Start Blocks -The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. +The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a Subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. ```yaml dataSources: @@ -488,13 +488,13 @@ dataSources: ## Indexer संकेत -The `indexerHints` setting in a subgraph's manifest provides directives for indexers on processing and managing a subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. +The `indexerHints` setting in a Subgraph's manifest provides directives for indexers on processing and managing a Subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. > This feature is available from `specVersion: 1.0.0` ### Prune -`indexerHints.prune`: Defines the retention of historical block data for a subgraph. Options include: +`indexerHints.prune`: Defines the retention of historical block data for a Subgraph. Options include: 1. `"never"`: No pruning of historical data; retains the entire history. 2. `"auto"`: Retains the minimum necessary history as set by the indexer, optimizing query performance. @@ -505,19 +505,19 @@ The `indexerHints` setting in a subgraph's manifest provides directives for inde prune: auto ``` -> इस संदर्भ में "history" का अर्थ उन आंकड़ों को संग्रहीत करने से है जो 'mutable' संस्थाओं की पुरानी स्थितियों को दर्शाते हैं। +> The term "history" in this context of Subgraphs is about storing data that reflects the old states of mutable entities. दिए गए ब्लॉक के रूप में इतिहास की आवश्यकता है: -- [Time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the subgraph's history -- Using the subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another subgraph, at that block -- उस ब्लॉक पर 'subgraph' को वापस लाना +- [Time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the Subgraph's history +- Using the Subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another Subgraph, at that block +- Rewinding the Subgraph back to that block यदि ब्लॉक के रूप में ऐतिहासिक डेटा को प्रून किया गया है, तो उपरोक्त क्षमताएँ उपलब्ध नहीं होंगी। > Using `"auto"` is generally recommended as it maximizes query performance and is sufficient for most users who do not require access to extensive historical data. -For subgraphs leveraging [time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your subgraph's settings: +For Subgraphs leveraging [time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your Subgraph's settings: विशिष्ट मात्रा में ऐतिहासिक डेटा बनाए रखने के लिए: @@ -532,3 +532,17 @@ For subgraphs leveraging [time travel queries](/subgraphs/querying/graphql-api/# indexerHints: prune: never ``` + +## SpecVersion Releases + +| Version | Release notes | +| :-----: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune Subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From abdd55ffba267fc0a58e176edce8629688071410 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:22:48 -0500 Subject: [PATCH 0823/1789] New translations subgraph-manifest.mdx (Swahili) --- .../developing/creating/subgraph-manifest.mdx | 548 ++++++++++++++++++ 1 file changed, 548 insertions(+) create mode 100644 website/src/pages/sw/subgraphs/developing/creating/subgraph-manifest.mdx diff --git a/website/src/pages/sw/subgraphs/developing/creating/subgraph-manifest.mdx b/website/src/pages/sw/subgraphs/developing/creating/subgraph-manifest.mdx new file mode 100644 index 000000000000..428ff5332baf --- /dev/null +++ b/website/src/pages/sw/subgraphs/developing/creating/subgraph-manifest.mdx @@ -0,0 +1,548 @@ +--- +title: Subgraph Manifest +--- + +## Overview + +The Subgraph manifest, `subgraph.yaml`, defines the smart contracts & network your Subgraph will index, the events from these contracts to pay attention to, and how to map event data to entities that Graph Node stores and allows to query. + +The **Subgraph definition** consists of the following files: + +- `subgraph.yaml`: Contains the Subgraph manifest + +- `schema.graphql`: A GraphQL schema defining the data stored for your Subgraph and how to query it via GraphQL + +- `mapping.ts`: [AssemblyScript Mappings](https://github.com/AssemblyScript/assemblyscript) code that translates event data into entities defined in your schema (e.g. `mapping.ts` in this guide) + +### Subgraph Capabilities + +A single Subgraph can: + +- Index data from multiple smart contracts (but not multiple networks). + +- Index data from IPFS files using File Data Sources. + +- Add an entry for each contract that requires indexing to the `dataSources` array. + +The full specification for Subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). + +For the example Subgraph listed above, `subgraph.yaml` is: + +```yaml +specVersion: 0.0.4 +description: Gravatar for Ethereum +repository: https://github.com/graphprotocol/graph-tooling +schema: + file: ./schema.graphql +indexerHints: + prune: auto +dataSources: + - kind: ethereum/contract + name: Gravity + network: mainnet + source: + address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' + abi: Gravity + startBlock: 6175244 + endBlock: 7175245 + context: + foo: + type: Bool + data: true + bar: + type: String + data: 'bar' + mapping: + kind: ethereum/events + apiVersion: 0.0.6 + language: wasm/assemblyscript + entities: + - Gravatar + abis: + - name: Gravity + file: ./abis/Gravity.json + eventHandlers: + - event: NewGravatar(uint256,address,string,string) + handler: handleNewGravatar + - event: UpdatedGravatar(uint256,address,string,string) + handler: handleUpdatedGravatar + callHandlers: + - function: createGravatar(string,string) + handler: handleCreateGravatar + blockHandlers: + - handler: handleBlock + - handler: handleBlockWithCall + filter: + kind: call + file: ./src/mapping.ts +``` + +## Subgraph Entries + +> Important Note: Be sure you populate your Subgraph manifest with all handlers and [entities](/subgraphs/developing/creating/ql-schema/). + +The important entries to update for the manifest are: + +- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the Subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. + +- `description`: a human-readable description of what the Subgraph is. This description is displayed in Graph Explorer when the Subgraph is deployed to Subgraph Studio. + +- `repository`: the URL of the repository where the Subgraph manifest can be found. This is also displayed in Graph Explorer. + +- `features`: a list of all used [feature](#experimental-features) names. + +- `indexerHints.prune`: Defines the retention of historical block data for a Subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. + +- `dataSources.source`: the address of the smart contract the Subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. + +- `dataSources.source.startBlock`: the optional number of the block that the data source starts indexing from. In most cases, we suggest using the block in which the contract was created. + +- `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. + +- `dataSources.context`: key-value pairs that can be used within Subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for Subgraph development. + +- `dataSources.mapping.entities`: the entities that the data source writes to the store. The schema for each entity is defined in the schema.graphql file. + +- `dataSources.mapping.abis`: one or more named ABI files for the source contract as well as any other smart contracts that you interact with from within the mappings. + +- `dataSources.mapping.eventHandlers`: lists the smart contract events this Subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. + +- `dataSources.mapping.callHandlers`: lists the smart contract functions this Subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. + +- `dataSources.mapping.blockHandlers`: lists the blocks this Subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. + +A single Subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. + +## Event Handlers + +Event handlers in a Subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the Subgraph's manifest. This enables Subgraphs to process and store event data according to defined logic. + +### Defining an Event Handler + +An event handler is declared within a data source in the Subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. + +```yaml +dataSources: + - kind: ethereum/contract + name: Gravity + network: dev + source: + address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' + abi: Gravity + mapping: + kind: ethereum/events + apiVersion: 0.0.6 + language: wasm/assemblyscript + entities: + - Gravatar + - Transaction + abis: + - name: Gravity + file: ./abis/Gravity.json + eventHandlers: + - event: Approval(address,address,uint256) + handler: handleApproval + - event: Transfer(address,address,uint256) + handler: handleTransfer + topic1: ['0xd8dA6BF26964aF9D7eEd9e03E53415D37aA96045', '0xc8dA6BF26964aF9D7eEd9e03E53415D37aA96325'] # Optional topic filter which filters only events with the specified topic. +``` + +## Call Handlers + +While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a Subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. + +Call handlers will only trigger in one of two cases: when the function specified is called by an account other than the contract itself or when it is marked as external in Solidity and called as part of another function in the same contract. + +> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a Subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. + +### Defining a Call Handler + +To define a call handler in your manifest, simply add a `callHandlers` array under the data source you would like to subscribe to. + +```yaml +dataSources: + - kind: ethereum/contract + name: Gravity + network: mainnet + source: + address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' + abi: Gravity + mapping: + kind: ethereum/events + apiVersion: 0.0.6 + language: wasm/assemblyscript + entities: + - Gravatar + - Transaction + abis: + - name: Gravity + file: ./abis/Gravity.json + callHandlers: + - function: createGravatar(string,string) + handler: handleCreateGravatar +``` + +The `function` is the normalized function signature to filter calls by. The `handler` property is the name of the function in your mapping you would like to execute when the target function is called in the data source contract. + +### Mapping Function + +Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example Subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: + +```typescript +import { CreateGravatarCall } from '../generated/Gravity/Gravity' +import { Transaction } from '../generated/schema' + +export function handleCreateGravatar(call: CreateGravatarCall): void { + let id = call.transaction.hash + let transaction = new Transaction(id) + transaction.displayName = call.inputs._displayName + transaction.imageUrl = call.inputs._imageUrl + transaction.save() +} +``` + +The `handleCreateGravatar` function takes a new `CreateGravatarCall` which is a subclass of `ethereum.Call`, provided by `@graphprotocol/graph-ts`, that includes the typed inputs and outputs of the call. The `CreateGravatarCall` type is generated for you when you run `graph codegen`. + +## Block Handlers + +In addition to subscribing to contract events or function calls, a Subgraph may want to update its data as new blocks are appended to the chain. To achieve this a Subgraph can run a function after every block or after blocks that match a pre-defined filter. + +### Supported Filters + +#### Call Filter + +```yaml +filter: + kind: call +``` + +_The defined handler will be called once for every block which contains a call to the contract (data source) the handler is defined under._ + +> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a Subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. + +The absence of a filter for a block handler will ensure that the handler is called every block. A data source can only contain one block handler for each filter type. + +```yaml +dataSources: + - kind: ethereum/contract + name: Gravity + network: dev + source: + address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' + abi: Gravity + mapping: + kind: ethereum/events + apiVersion: 0.0.6 + language: wasm/assemblyscript + entities: + - Gravatar + - Transaction + abis: + - name: Gravity + file: ./abis/Gravity.json + blockHandlers: + - handler: handleBlock + - handler: handleBlockWithCallToContract + filter: + kind: call +``` + +#### Polling Filter + +> **Requires `specVersion` >= 0.0.8** +> +> **Note:** Polling filters are only available on dataSources of `kind: ethereum`. + +```yaml +blockHandlers: + - handler: handleBlock + filter: + kind: polling + every: 10 +``` + +The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the Subgraph to perform specific operations at regular block intervals. + +#### Once Filter + +> **Requires `specVersion` >= 0.0.8** +> +> **Note:** Once filters are only available on dataSources of `kind: ethereum`. + +```yaml +blockHandlers: + - handler: handleOnce + filter: + kind: once +``` + +The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the Subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. + +```ts +export function handleOnce(block: ethereum.Block): void { + let data = new InitialData(Bytes.fromUTF8('initial')) + data.data = 'Setup data here' + data.save() +} +``` + +### Mapping Function + +The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing Subgraph entities in the store, call smart contracts and create or update entities. + +```typescript +import { ethereum } from '@graphprotocol/graph-ts' + +export function handleBlock(block: ethereum.Block): void { + let id = block.hash + let entity = new Block(id) + entity.save() +} +``` + +## Anonymous Events + +If you need to process anonymous events in Solidity, that can be achieved by providing the topic 0 of the event, as in the example: + +```yaml +eventHandlers: + - event: LogNote(bytes4,address,bytes32,bytes32,uint256,bytes) + topic0: '0x644843f351d3fba4abcd60109eaff9f54bac8fb8ccf0bab941009c21df21cf31' + handler: handleGive +``` + +An event will only be triggered when both the signature and topic 0 match. By default, `topic0` is equal to the hash of the event signature. + +## Transaction Receipts in Event Handlers + +Starting from `specVersion` `0.0.5` and `apiVersion` `0.0.7`, event handlers can have access to the receipt for the transaction which emitted them. + +To do so, event handlers must be declared in the Subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. + +```yaml +eventHandlers: + - event: NewGravatar(uint256,address,string,string) + handler: handleNewGravatar + receipt: true +``` + +Inside the handler function, the receipt can be accessed in the `Event.receipt` field. When the `receipt` key is set to `false` or omitted in the manifest, a `null` value will be returned instead. + +## Order of Triggering Handlers + +The triggers for a data source within a block are ordered using the following process: + +1. Event and call triggers are first ordered by transaction index within the block. +2. Event and call triggers within the same transaction are ordered using a convention: event triggers first then call triggers, each type respecting the order they are defined in the manifest. +3. Block triggers are run after event and call triggers, in the order they are defined in the manifest. + +These ordering rules are subject to change. + +> **Note:** When new [dynamic data source](#data-source-templates-for-dynamically-created-contracts) are created, the handlers defined for dynamic data sources will only start processing after all existing data source handlers are processed, and will repeat in the same sequence whenever triggered. + +## Data Source Templates + +A common pattern in EVM-compatible smart contracts is the use of registry or factory contracts, where one contract creates, manages, or references an arbitrary number of other contracts that each have their own state and events. + +The addresses of these sub-contracts may or may not be known upfront and many of these contracts may be created and/or added over time. This is why, in such cases, defining a single data source or a fixed number of data sources is impossible and a more dynamic approach is needed: _data source templates_. + +### Data Source for the Main Contract + +First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created onchain by the factory contract. + +```yaml +dataSources: + - kind: ethereum/contract + name: Factory + network: mainnet + source: + address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' + abi: Factory + mapping: + kind: ethereum/events + apiVersion: 0.0.6 + language: wasm/assemblyscript + file: ./src/mappings/factory.ts + entities: + - Directory + abis: + - name: Factory + file: ./abis/factory.json + eventHandlers: + - event: NewExchange(address,address) + handler: handleNewExchange +``` + +### Data Source Templates for Dynamically Created Contracts + +Then, you add _data source templates_ to the manifest. These are identical to regular data sources, except that they lack a pre-defined contract address under `source`. Typically, you would define one template for each type of sub-contract managed or referenced by the parent contract. + +```yaml +dataSources: + - kind: ethereum/contract + name: Factory + # ... other source fields for the main contract ... +templates: + - name: Exchange + kind: ethereum/contract + network: mainnet + source: + abi: Exchange + mapping: + kind: ethereum/events + apiVersion: 0.0.6 + language: wasm/assemblyscript + file: ./src/mappings/exchange.ts + entities: + - Exchange + abis: + - name: Exchange + file: ./abis/exchange.json + eventHandlers: + - event: TokenPurchase(address,uint256,uint256) + handler: handleTokenPurchase + - event: EthPurchase(address,uint256,uint256) + handler: handleEthPurchase + - event: AddLiquidity(address,uint256,uint256) + handler: handleAddLiquidity + - event: RemoveLiquidity(address,uint256,uint256) + handler: handleRemoveLiquidity +``` + +### Instantiating a Data Source Template + +In the final step, you update your main contract mapping to create a dynamic data source instance from one of the templates. In this example, you would change the main contract mapping to import the `Exchange` template and call the `Exchange.create(address)` method on it to start indexing the new exchange contract. + +```typescript +import { Exchange } from '../generated/templates' + +export function handleNewExchange(event: NewExchange): void { + // Start indexing the exchange; `event.params.exchange` is the + // address of the new exchange contract + Exchange.create(event.params.exchange) +} +``` + +> **Note:** A new data source will only process the calls and events for the block in which it was created and all following blocks, but will not process historical data, i.e., data that is contained in prior blocks. +> +> If prior blocks contain data relevant to the new data source, it is best to index that data by reading the current state of the contract and creating entities representing that state at the time the new data source is created. + +### Data Source Context + +Data source contexts allow passing extra configuration when instantiating a template. In our example, let's say exchanges are associated with a particular trading pair, which is included in the `NewExchange` event. That information can be passed into the instantiated data source, like so: + +```typescript +import { Exchange } from '../generated/templates' + +export function handleNewExchange(event: NewExchange): void { + let context = new DataSourceContext() + context.setString('tradingPair', event.params.tradingPair) + Exchange.createWithContext(event.params.exchange, context) +} +``` + +Inside a mapping of the `Exchange` template, the context can then be accessed: + +```typescript +import { dataSource } from '@graphprotocol/graph-ts' + +let context = dataSource.context() +let tradingPair = context.getString('tradingPair') +``` + +There are setters and getters like `setString` and `getString` for all value types. + +## Start Blocks + +The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a Subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. + +```yaml +dataSources: + - kind: ethereum/contract + name: ExampleSource + network: mainnet + source: + address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' + abi: ExampleContract + startBlock: 6627917 + mapping: + kind: ethereum/events + apiVersion: 0.0.6 + language: wasm/assemblyscript + file: ./src/mappings/factory.ts + entities: + - User + abis: + - name: ExampleContract + file: ./abis/ExampleContract.json + eventHandlers: + - event: NewEvent(address,address) + handler: handleNewEvent +``` + +> **Note:** The contract creation block can be quickly looked up on Etherscan: +> +> 1. Search for the contract by entering its address in the search bar. +> 2. Click on the creation transaction hash in the `Contract Creator` section. +> 3. Load the transaction details page where you'll find the start block for that contract. + +## Indexer Hints + +The `indexerHints` setting in a Subgraph's manifest provides directives for indexers on processing and managing a Subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. + +> This feature is available from `specVersion: 1.0.0` + +### Prune + +`indexerHints.prune`: Defines the retention of historical block data for a Subgraph. Options include: + +1. `"never"`: No pruning of historical data; retains the entire history. +2. `"auto"`: Retains the minimum necessary history as set by the indexer, optimizing query performance. +3. A specific number: Sets a custom limit on the number of historical blocks to retain. + +``` + indexerHints: + prune: auto +``` + +> The term "history" in this context of Subgraphs is about storing data that reflects the old states of mutable entities. + +History as of a given block is required for: + +- [Time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the Subgraph's history +- Using the Subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another Subgraph, at that block +- Rewinding the Subgraph back to that block + +If historical data as of the block has been pruned, the above capabilities will not be available. + +> Using `"auto"` is generally recommended as it maximizes query performance and is sufficient for most users who do not require access to extensive historical data. + +For Subgraphs leveraging [time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your Subgraph's settings: + +To retain a specific amount of historical data: + +``` + indexerHints: + prune: 1000 # Replace 1000 with the desired number of blocks to retain +``` + +To preserve the complete history of entity states: + +``` +indexerHints: + prune: never +``` + +## SpecVersion Releases + +| Version | Release notes | +| :-----: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune Subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From 4c30788310608f58356d9a3029ab4e97e06c4156 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:22:49 -0500 Subject: [PATCH 0824/1789] New translations multiple-networks.mdx (Romanian) --- .../deploying/multiple-networks.mdx | 37 ++++++++++--------- 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/website/src/pages/ro/subgraphs/developing/deploying/multiple-networks.mdx b/website/src/pages/ro/subgraphs/developing/deploying/multiple-networks.mdx index 4f7dcd3864e8..3b2b1bbc70ae 100644 --- a/website/src/pages/ro/subgraphs/developing/deploying/multiple-networks.mdx +++ b/website/src/pages/ro/subgraphs/developing/deploying/multiple-networks.mdx @@ -1,12 +1,13 @@ --- title: Deploying a Subgraph to Multiple Networks +sidebarTitle: Deploying to Multiple Networks --- -This page explains how to deploy a subgraph to multiple networks. To deploy a subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a subgraph already, see [Creating a subgraph](/developing/creating-a-subgraph/). +This page explains how to deploy a Subgraph to multiple networks. To deploy a Subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a Subgraph already, see [Creating a Subgraph](/developing/creating-a-subgraph/). -## Deploying the subgraph to multiple networks +## Deploying the Subgraph to multiple networks -In some cases, you will want to deploy the same subgraph to multiple networks without duplicating all of its code. The main challenge that comes with this is that the contract addresses on these networks are different. +In some cases, you will want to deploy the same Subgraph to multiple networks without duplicating all of its code. The main challenge that comes with this is that the contract addresses on these networks are different. ### Using `graph-cli` @@ -20,7 +21,7 @@ Options: --network-file Networks config file path (default: "./networks.json") ``` -You can use the `--network` option to specify a network configuration from a `json` standard file (defaults to `networks.json`) to easily update your subgraph during development. +You can use the `--network` option to specify a network configuration from a `json` standard file (defaults to `networks.json`) to easily update your Subgraph during development. > Note: The `init` command will now auto-generate a `networks.json` based on the provided information. You will then be able to update existing or add additional networks. @@ -54,7 +55,7 @@ If you don't have a `networks.json` file, you'll need to manually create one wit > Note: You don't have to specify any of the `templates` (if you have any) in the config file, only the `dataSources`. If there are any `templates` declared in the `subgraph.yaml` file, their network will be automatically updated to the one specified with the `--network` option. -Now, let's assume you want to be able to deploy your subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: +Now, let's assume you want to be able to deploy your Subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: ```yaml # ... @@ -96,7 +97,7 @@ yarn build --network sepolia yarn build --network sepolia --network-file path/to/config ``` -The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the subgraph. Your `subgraph.yaml` file now should look like this: +The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the Subgraph. Your `subgraph.yaml` file now should look like this: ```yaml # ... @@ -127,7 +128,7 @@ yarn deploy --network sepolia --network-file path/to/config One way to parameterize aspects like contract addresses using older `graph-cli` versions is to generate parts of it with a templating system like [Mustache](https://mustache.github.io/) or [Handlebars](https://handlebarsjs.com/). -To illustrate this approach, let's assume a subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: +To illustrate this approach, let's assume a Subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: ```json { @@ -179,7 +180,7 @@ In order to generate a manifest to either network, you could add two additional } ``` -To deploy this subgraph for mainnet or Sepolia you would now simply run one of the two following commands: +To deploy this Subgraph for mainnet or Sepolia you would now simply run one of the two following commands: ```sh # Mainnet: @@ -193,25 +194,25 @@ A working example of this can be found [here](https://github.com/graphprotocol/e **Note:** This approach can also be applied to more complex situations, where it is necessary to substitute more than contract addresses and network names or where generating mappings or ABIs from templates as well. -This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your subgraph to check if it is running behind. `synced` informs if the subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the subgraph. In this case, you can check the `fatalError` field for details on this error. +This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your Subgraph to check if it is running behind. `synced` informs if the Subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the Subgraph. In this case, you can check the `fatalError` field for details on this error. -## Subgraph Studio subgraph archive policy +## Subgraph Studio Subgraph archive policy -A subgraph version in Studio is archived if and only if it meets the following criteria: +A Subgraph version in Studio is archived if and only if it meets the following criteria: - The version is not published to the network (or pending publish) - The version was created 45 or more days ago -- The subgraph hasn't been queried in 30 days +- The Subgraph hasn't been queried in 30 days -In addition, when a new version is deployed, if the subgraph has not been published, then the N-2 version of the subgraph is archived. +In addition, when a new version is deployed, if the Subgraph has not been published, then the N-2 version of the Subgraph is archived. -Every subgraph affected with this policy has an option to bring the version in question back. +Every Subgraph affected with this policy has an option to bring the version in question back. -## Checking subgraph health +## Checking Subgraph health -If a subgraph syncs successfully, that is a good sign that it will continue to run well forever. However, new triggers on the network might cause your subgraph to hit an untested error condition or it may start to fall behind due to performance issues or issues with the node operators. +If a Subgraph syncs successfully, that is a good sign that it will continue to run well forever. However, new triggers on the network might cause your Subgraph to hit an untested error condition or it may start to fall behind due to performance issues or issues with the node operators. -Graph Node exposes a GraphQL endpoint which you can query to check the status of your subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a subgraph: +Graph Node exposes a GraphQL endpoint which you can query to check the status of your Subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a Subgraph: ```graphql { @@ -238,4 +239,4 @@ Graph Node exposes a GraphQL endpoint which you can query to check the status of } ``` -This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your subgraph to check if it is running behind. `synced` informs if the subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the subgraph. In this case, you can check the `fatalError` field for details on this error. +This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your Subgraph to check if it is running behind. `synced` informs if the Subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the Subgraph. In this case, you can check the `fatalError` field for details on this error. From 6631bac1b76c85fea303db007ea302189187f726 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:22:50 -0500 Subject: [PATCH 0825/1789] New translations multiple-networks.mdx (French) --- .../deploying/multiple-networks.mdx | 37 ++++++++++--------- 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/website/src/pages/fr/subgraphs/developing/deploying/multiple-networks.mdx b/website/src/pages/fr/subgraphs/developing/deploying/multiple-networks.mdx index a72771045069..3513471fb022 100644 --- a/website/src/pages/fr/subgraphs/developing/deploying/multiple-networks.mdx +++ b/website/src/pages/fr/subgraphs/developing/deploying/multiple-networks.mdx @@ -1,12 +1,13 @@ --- title: Déploiement d'un subgraph sur plusieurs réseaux +sidebarTitle: Deploying to Multiple Networks --- -Cette page explique comment déployer un subgraph sur plusieurs réseaux. Pour déployer un subgraph, vous devez d'abord installer [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). Si vous n'avez pas encore créé de subgraph, consultez [Créer un subgraph](/developing/creating-a-subgraph/). +This page explains how to deploy a Subgraph to multiple networks. To deploy a Subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a Subgraph already, see [Creating a Subgraph](/developing/creating-a-subgraph/). -## Déploiement du subgraph sur plusieurs réseaux +## Deploying the Subgraph to multiple networks -Dans certains cas, vous souhaiterez déployer le même subgraph sur plusieurs réseaux sans dupliquer tout son code. Le principal défi qui en découle est que les adresses contractuelles sur ces réseaux sont différentes. +In some cases, you will want to deploy the same Subgraph to multiple networks without duplicating all of its code. The main challenge that comes with this is that the contract addresses on these networks are different. ### En utilisant `graph-cli` @@ -19,7 +20,7 @@ Options: --network-file Chemin du fichier de configuration des réseaux (par défaut : "./networks.json") ``` -Vous pouvez utiliser l'option `--network` pour spécifier une configuration de réseau à partir d'un fichier standard `json` (par défaut networks.json) pour facilement mettre à jour votre subgraph pendant le développement. +You can use the `--network` option to specify a network configuration from a `json` standard file (defaults to `networks.json`) to easily update your Subgraph during development. > Note : La commande `init` générera désormais automatiquement un fichier networks.json en se basant sur les informations fournies. Vous pourrez ensuite mettre à jour les réseaux existants ou en ajouter de nouveaux. @@ -53,7 +54,7 @@ Si vous n'avez pas de fichier `networks.json`, vous devrez en créer un manuelle > Note : Vous n'avez besoin de spécifier aucun des `templates` (si vous en avez) dans le fichier de configuration, uniquement les `dataSources`. Si des `templates` sont déclarés dans le fichier `subgraph.yaml`, leur réseau sera automatiquement mis à jour vers celui spécifié avec l'option `--network`. -Supposons maintenant que vous souhaitiez déployer votre subgraph sur les réseaux `mainnet` et `sepolia`, et que ceci est votre fichier subgraph.yaml : +Now, let's assume you want to be able to deploy your Subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: ```yaml # ... @@ -95,7 +96,7 @@ yarn build --network sepolia yarn build --network sepolia --network-file chemin/à/configurer ``` -La commande `build` mettra à jour votre fichier `subgraph.yaml` avec la configuration `sepolia` puis recompilera le subgraph. Votre fichier `subgraph.yaml` devrait maintenant ressembler à ceci: +The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the Subgraph. Your `subgraph.yaml` file now should look like this: ```yaml # ... @@ -126,7 +127,7 @@ yarn deploy --network sepolia --network-file chemin/à/configurer Une façon de paramétrer des aspects tels que les adresses de contrat en utilisant des versions plus anciennes de `graph-cli` est de générer des parties de celui-ci avec un système de creation de modèle comme [Mustache](https://mustache.github.io/) ou [Handlebars](https://handlebarsjs.com/). -Pour illustrer cette approche, supposons qu'un subgraph doive être déployé sur le réseau principal (mainnet) et sur Sepolia en utilisant des adresses de contrat différentes. Vous pourriez alors définir deux fichiers de configuration fournissant les adresses pour chaque réseau : +To illustrate this approach, let's assume a Subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: ```json { @@ -178,7 +179,7 @@ Pour générer un manifeste pour l'un ou l'autre réseau, vous pourriez ajouter } ``` -Pour déployer ce subgraph pour mainnet ou Sepolia, vous devez simplement exécuter l'une des deux commandes suivantes : +To deploy this Subgraph for mainnet or Sepolia you would now simply run one of the two following commands: ```sh # Mainnet: @@ -192,25 +193,25 @@ Un exemple fonctionnel de ceci peut être trouvé [ici](https://github.com/graph Note : Cette approche peut également être appliquée à des situations plus complexes, dans lesquelles il est nécessaire de remplacer plus que les adresses des contrats et les noms de réseau ou où il est nécessaire de générer des mappages ou alors des ABI à partir de modèles également. -Cela vous donnera le `chainHeadBlock` que vous pouvez comparer avec le `latestBlock` sur votre subgraph pour vérifier s'il est en retard. `synced` vous informe si le subgraph a déjà rattrapé la chaîne. `health` peut actuellement prendre les valeurs de `healthy` si aucune erreur ne s'est produite, ou `failed` s'il y a eu une erreur qui a stoppé la progression du subgraph. Dans ce cas, vous pouvez vérifier le champ `fatalError` pour les détails sur cette erreur. +This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your Subgraph to check if it is running behind. `synced` informs if the Subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the Subgraph. In this case, you can check the `fatalError` field for details on this error. -## Politique d'archivage des subgraphs de Subgraph Studio +## Subgraph Studio Subgraph archive policy -Une version de subgraph dans Studio est archivée si et seulement si elle répond aux critères suivants : +A Subgraph version in Studio is archived if and only if it meets the following criteria: - La version n'est pas publiée sur le réseau (ou en attente de publication) - La version a été créée il y a 45 jours ou plus -- Le subgraph n'a pas été interrogé depuis 30 jours +- The Subgraph hasn't been queried in 30 days -De plus, lorsqu'une nouvelle version est déployée, si le subgraph n'a pas été publié, la version N-2 du subgraph est archivée. +In addition, when a new version is deployed, if the Subgraph has not been published, then the N-2 version of the Subgraph is archived. -Chaque subgraph concerné par cette politique dispose d'une option de restauration de la version en question. +Every Subgraph affected with this policy has an option to bring the version in question back. -## Vérification de l'état des subgraphs +## Checking Subgraph health -Si un subgraph se synchronise avec succès, c'est un bon signe qu'il continuera à bien fonctionner pour toujours. Cependant, de nouveaux déclencheurs sur le réseau peuvent amener votre subgraph à rencontrer une condition d'erreur non testée ou il peut commencer à prendre du retard en raison de problèmes de performances ou de problèmes avec les opérateurs de nœuds. +If a Subgraph syncs successfully, that is a good sign that it will continue to run well forever. However, new triggers on the network might cause your Subgraph to hit an untested error condition or it may start to fall behind due to performance issues or issues with the node operators. -Graph Node expose un endpoint GraphQL que vous pouvez interroger pour vérifier l'état de votre subgraph. Sur le service hébergé, il est disponible à l'adresse `https://api.thegraph.com/index-node/graphql`. Sur un nœud local, il est disponible sur le port `8030/graphql` par défaut. Le schéma complet de cet endpoint peut être trouvé [ici](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Voici un exemple de requête qui vérifie l'état de la version actuelle d'un subgraph: +Graph Node exposes a GraphQL endpoint which you can query to check the status of your Subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a Subgraph: ```graphql { @@ -237,4 +238,4 @@ Graph Node expose un endpoint GraphQL que vous pouvez interroger pour vérifier } ``` -Cela vous donnera le `chainHeadBlock` que vous pouvez comparer avec le `latestBlock` sur votre subgraph pour vérifier s'il est en retard. `synced` vous informe si le subgraph a déjà rattrapé la chaîne. `health` peut actuellement prendre les valeurs de `healthy` si aucune erreur ne s'est produite, ou `failed` s'il y a eu une erreur qui a stoppé la progression du subgraph. Dans ce cas, vous pouvez vérifier le champ `fatalError` pour les détails sur cette erreur. +This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your Subgraph to check if it is running behind. `synced` informs if the Subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the Subgraph. In this case, you can check the `fatalError` field for details on this error. From 416f331aec0f759ec2c1c548271e632ff5392e95 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:22:51 -0500 Subject: [PATCH 0826/1789] New translations multiple-networks.mdx (Spanish) --- .../deploying/multiple-networks.mdx | 37 ++++++++++--------- 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/website/src/pages/es/subgraphs/developing/deploying/multiple-networks.mdx b/website/src/pages/es/subgraphs/developing/deploying/multiple-networks.mdx index c206beeb8fb3..a96efc430a61 100644 --- a/website/src/pages/es/subgraphs/developing/deploying/multiple-networks.mdx +++ b/website/src/pages/es/subgraphs/developing/deploying/multiple-networks.mdx @@ -1,12 +1,13 @@ --- title: Deploying a Subgraph to Multiple Networks +sidebarTitle: Deploying to Multiple Networks --- -This page explains how to deploy a subgraph to multiple networks. To deploy a subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a subgraph already, see [Creating a subgraph](/developing/creating-a-subgraph/). +This page explains how to deploy a Subgraph to multiple networks. To deploy a Subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a Subgraph already, see [Creating a Subgraph](/developing/creating-a-subgraph/). -## Desplegando el subgráfo en múltiples redes +## Deploying the Subgraph to multiple networks -En algunos casos, querrás desplegar el mismo subgrafo en múltiples redes sin duplicar todo su código. El principal reto que conlleva esto es que las direcciones de los contratos en estas redes son diferentes. +In some cases, you will want to deploy the same Subgraph to multiple networks without duplicating all of its code. The main challenge that comes with this is that the contract addresses on these networks are different. ### Using `graph-cli` @@ -20,7 +21,7 @@ Options: --network-file Networks config file path (default: "./networks.json") ``` -You can use the `--network` option to specify a network configuration from a `json` standard file (defaults to `networks.json`) to easily update your subgraph during development. +You can use the `--network` option to specify a network configuration from a `json` standard file (defaults to `networks.json`) to easily update your Subgraph during development. > Note: The `init` command will now auto-generate a `networks.json` based on the provided information. You will then be able to update existing or add additional networks. @@ -54,7 +55,7 @@ If you don't have a `networks.json` file, you'll need to manually create one wit > Note: You don't have to specify any of the `templates` (if you have any) in the config file, only the `dataSources`. If there are any `templates` declared in the `subgraph.yaml` file, their network will be automatically updated to the one specified with the `--network` option. -Now, let's assume you want to be able to deploy your subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: +Now, let's assume you want to be able to deploy your Subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: ```yaml # ... @@ -96,7 +97,7 @@ yarn build --network sepolia yarn build --network sepolia --network-file path/to/config ``` -The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the subgraph. Your `subgraph.yaml` file now should look like this: +The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the Subgraph. Your `subgraph.yaml` file now should look like this: ```yaml # ... @@ -127,7 +128,7 @@ yarn deploy --network sepolia --network-file path/to/config One way to parameterize aspects like contract addresses using older `graph-cli` versions is to generate parts of it with a templating system like [Mustache](https://mustache.github.io/) or [Handlebars](https://handlebarsjs.com/). -To illustrate this approach, let's assume a subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: +To illustrate this approach, let's assume a Subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: ```json { @@ -179,7 +180,7 @@ In order to generate a manifest to either network, you could add two additional } ``` -To deploy this subgraph for mainnet or Sepolia you would now simply run one of the two following commands: +To deploy this Subgraph for mainnet or Sepolia you would now simply run one of the two following commands: ```sh # Mainnet: @@ -193,25 +194,25 @@ A working example of this can be found [here](https://github.com/graphprotocol/e **Note:** This approach can also be applied to more complex situations, where it is necessary to substitute more than contract addresses and network names or where generating mappings or ABIs from templates as well. -This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your subgraph to check if it is running behind. `synced` informs if the subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the subgraph. In this case, you can check the `fatalError` field for details on this error. +This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your Subgraph to check if it is running behind. `synced` informs if the Subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the Subgraph. In this case, you can check the `fatalError` field for details on this error. -## Política de archivo de subgrafos en Subgraph Studio +## Subgraph Studio Subgraph archive policy -A subgraph version in Studio is archived if and only if it meets the following criteria: +A Subgraph version in Studio is archived if and only if it meets the following criteria: - The version is not published to the network (or pending publish) - The version was created 45 or more days ago -- The subgraph hasn't been queried in 30 days +- The Subgraph hasn't been queried in 30 days -In addition, when a new version is deployed, if the subgraph has not been published, then the N-2 version of the subgraph is archived. +In addition, when a new version is deployed, if the Subgraph has not been published, then the N-2 version of the Subgraph is archived. -Cada subgrafo afectado por esta política tiene una opción para recuperar la versión en cuestión. +Every Subgraph affected with this policy has an option to bring the version in question back. -## Comprobando la salud del subgrafo +## Checking Subgraph health -Si un subgrafo se sincroniza con éxito, es una buena señal de que seguirá funcionando bien para siempre. Sin embargo, los nuevos activadores en la red pueden hacer que tu subgrafo alcance una condición de error no probada o puede comenzar a retrasarse debido a problemas de rendimiento o problemas con los operadores de nodos. +If a Subgraph syncs successfully, that is a good sign that it will continue to run well forever. However, new triggers on the network might cause your Subgraph to hit an untested error condition or it may start to fall behind due to performance issues or issues with the node operators. -Graph Node exposes a GraphQL endpoint which you can query to check the status of your subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a subgraph: +Graph Node exposes a GraphQL endpoint which you can query to check the status of your Subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a Subgraph: ```graphql { @@ -238,4 +239,4 @@ Graph Node exposes a GraphQL endpoint which you can query to check the status of } ``` -This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your subgraph to check if it is running behind. `synced` informs if the subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the subgraph. In this case, you can check the `fatalError` field for details on this error. +This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your Subgraph to check if it is running behind. `synced` informs if the Subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the Subgraph. In this case, you can check the `fatalError` field for details on this error. From 03a2c974db4420b8ea01591d87ceff43c5881ffa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:22:52 -0500 Subject: [PATCH 0827/1789] New translations multiple-networks.mdx (Arabic) --- .../deploying/multiple-networks.mdx | 37 ++++++++++--------- 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/website/src/pages/ar/subgraphs/developing/deploying/multiple-networks.mdx b/website/src/pages/ar/subgraphs/developing/deploying/multiple-networks.mdx index 4f7dcd3864e8..3b2b1bbc70ae 100644 --- a/website/src/pages/ar/subgraphs/developing/deploying/multiple-networks.mdx +++ b/website/src/pages/ar/subgraphs/developing/deploying/multiple-networks.mdx @@ -1,12 +1,13 @@ --- title: Deploying a Subgraph to Multiple Networks +sidebarTitle: Deploying to Multiple Networks --- -This page explains how to deploy a subgraph to multiple networks. To deploy a subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a subgraph already, see [Creating a subgraph](/developing/creating-a-subgraph/). +This page explains how to deploy a Subgraph to multiple networks. To deploy a Subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a Subgraph already, see [Creating a Subgraph](/developing/creating-a-subgraph/). -## Deploying the subgraph to multiple networks +## Deploying the Subgraph to multiple networks -In some cases, you will want to deploy the same subgraph to multiple networks without duplicating all of its code. The main challenge that comes with this is that the contract addresses on these networks are different. +In some cases, you will want to deploy the same Subgraph to multiple networks without duplicating all of its code. The main challenge that comes with this is that the contract addresses on these networks are different. ### Using `graph-cli` @@ -20,7 +21,7 @@ Options: --network-file Networks config file path (default: "./networks.json") ``` -You can use the `--network` option to specify a network configuration from a `json` standard file (defaults to `networks.json`) to easily update your subgraph during development. +You can use the `--network` option to specify a network configuration from a `json` standard file (defaults to `networks.json`) to easily update your Subgraph during development. > Note: The `init` command will now auto-generate a `networks.json` based on the provided information. You will then be able to update existing or add additional networks. @@ -54,7 +55,7 @@ If you don't have a `networks.json` file, you'll need to manually create one wit > Note: You don't have to specify any of the `templates` (if you have any) in the config file, only the `dataSources`. If there are any `templates` declared in the `subgraph.yaml` file, their network will be automatically updated to the one specified with the `--network` option. -Now, let's assume you want to be able to deploy your subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: +Now, let's assume you want to be able to deploy your Subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: ```yaml # ... @@ -96,7 +97,7 @@ yarn build --network sepolia yarn build --network sepolia --network-file path/to/config ``` -The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the subgraph. Your `subgraph.yaml` file now should look like this: +The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the Subgraph. Your `subgraph.yaml` file now should look like this: ```yaml # ... @@ -127,7 +128,7 @@ yarn deploy --network sepolia --network-file path/to/config One way to parameterize aspects like contract addresses using older `graph-cli` versions is to generate parts of it with a templating system like [Mustache](https://mustache.github.io/) or [Handlebars](https://handlebarsjs.com/). -To illustrate this approach, let's assume a subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: +To illustrate this approach, let's assume a Subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: ```json { @@ -179,7 +180,7 @@ In order to generate a manifest to either network, you could add two additional } ``` -To deploy this subgraph for mainnet or Sepolia you would now simply run one of the two following commands: +To deploy this Subgraph for mainnet or Sepolia you would now simply run one of the two following commands: ```sh # Mainnet: @@ -193,25 +194,25 @@ A working example of this can be found [here](https://github.com/graphprotocol/e **Note:** This approach can also be applied to more complex situations, where it is necessary to substitute more than contract addresses and network names or where generating mappings or ABIs from templates as well. -This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your subgraph to check if it is running behind. `synced` informs if the subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the subgraph. In this case, you can check the `fatalError` field for details on this error. +This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your Subgraph to check if it is running behind. `synced` informs if the Subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the Subgraph. In this case, you can check the `fatalError` field for details on this error. -## Subgraph Studio subgraph archive policy +## Subgraph Studio Subgraph archive policy -A subgraph version in Studio is archived if and only if it meets the following criteria: +A Subgraph version in Studio is archived if and only if it meets the following criteria: - The version is not published to the network (or pending publish) - The version was created 45 or more days ago -- The subgraph hasn't been queried in 30 days +- The Subgraph hasn't been queried in 30 days -In addition, when a new version is deployed, if the subgraph has not been published, then the N-2 version of the subgraph is archived. +In addition, when a new version is deployed, if the Subgraph has not been published, then the N-2 version of the Subgraph is archived. -Every subgraph affected with this policy has an option to bring the version in question back. +Every Subgraph affected with this policy has an option to bring the version in question back. -## Checking subgraph health +## Checking Subgraph health -If a subgraph syncs successfully, that is a good sign that it will continue to run well forever. However, new triggers on the network might cause your subgraph to hit an untested error condition or it may start to fall behind due to performance issues or issues with the node operators. +If a Subgraph syncs successfully, that is a good sign that it will continue to run well forever. However, new triggers on the network might cause your Subgraph to hit an untested error condition or it may start to fall behind due to performance issues or issues with the node operators. -Graph Node exposes a GraphQL endpoint which you can query to check the status of your subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a subgraph: +Graph Node exposes a GraphQL endpoint which you can query to check the status of your Subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a Subgraph: ```graphql { @@ -238,4 +239,4 @@ Graph Node exposes a GraphQL endpoint which you can query to check the status of } ``` -This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your subgraph to check if it is running behind. `synced` informs if the subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the subgraph. In this case, you can check the `fatalError` field for details on this error. +This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your Subgraph to check if it is running behind. `synced` informs if the Subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the Subgraph. In this case, you can check the `fatalError` field for details on this error. From 34f0c559bb201b4b5d6dd211289f852003c92149 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:22:53 -0500 Subject: [PATCH 0828/1789] New translations multiple-networks.mdx (Czech) --- .../deploying/multiple-networks.mdx | 37 ++++++++++--------- 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/website/src/pages/cs/subgraphs/developing/deploying/multiple-networks.mdx b/website/src/pages/cs/subgraphs/developing/deploying/multiple-networks.mdx index 77f05e1ad499..e9848601ebc7 100644 --- a/website/src/pages/cs/subgraphs/developing/deploying/multiple-networks.mdx +++ b/website/src/pages/cs/subgraphs/developing/deploying/multiple-networks.mdx @@ -1,12 +1,13 @@ --- title: Deploying a Subgraph to Multiple Networks +sidebarTitle: Deploying to Multiple Networks --- -This page explains how to deploy a subgraph to multiple networks. To deploy a subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a subgraph already, see [Creating a subgraph](/developing/creating-a-subgraph/). +This page explains how to deploy a Subgraph to multiple networks. To deploy a Subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a Subgraph already, see [Creating a Subgraph](/developing/creating-a-subgraph/). -## Nasazení podgrafu do více sítí +## Deploying the Subgraph to multiple networks -V některých případech budete chtít nasadit stejný podgraf do více sítí, aniž byste museli duplikovat celý jeho kód. Hlavním problémem, který s tím souvisí, je skutečnost, že smluvní adresy v těchto sítích jsou různé. +In some cases, you will want to deploy the same Subgraph to multiple networks without duplicating all of its code. The main challenge that comes with this is that the contract addresses on these networks are different. ### Using `graph-cli` @@ -20,7 +21,7 @@ Options: --network-file Networks config file path (default: "./networks.json") ``` -You can use the `--network` option to specify a network configuration from a `json` standard file (defaults to `networks.json`) to easily update your subgraph during development. +You can use the `--network` option to specify a network configuration from a `json` standard file (defaults to `networks.json`) to easily update your Subgraph during development. > Note: The `init` command will now auto-generate a `networks.json` based on the provided information. You will then be able to update existing or add additional networks. @@ -54,7 +55,7 @@ If you don't have a `networks.json` file, you'll need to manually create one wit > Note: You don't have to specify any of the `templates` (if you have any) in the config file, only the `dataSources`. If there are any `templates` declared in the `subgraph.yaml` file, their network will be automatically updated to the one specified with the `--network` option. -Now, let's assume you want to be able to deploy your subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: +Now, let's assume you want to be able to deploy your Subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: ```yaml # ... @@ -96,7 +97,7 @@ yarn build --network sepolia yarn build --network sepolia --network-file path/to/config ``` -The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the subgraph. Your `subgraph.yaml` file now should look like this: +The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the Subgraph. Your `subgraph.yaml` file now should look like this: ```yaml # ... @@ -127,7 +128,7 @@ yarn deploy --network sepolia --network-file path/to/config One way to parameterize aspects like contract addresses using older `graph-cli` versions is to generate parts of it with a templating system like [Mustache](https://mustache.github.io/) or [Handlebars](https://handlebarsjs.com/). -To illustrate this approach, let's assume a subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: +To illustrate this approach, let's assume a Subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: ```json { @@ -179,7 +180,7 @@ In order to generate a manifest to either network, you could add two additional } ``` -To deploy this subgraph for mainnet or Sepolia you would now simply run one of the two following commands: +To deploy this Subgraph for mainnet or Sepolia you would now simply run one of the two following commands: ```sh # Mainnet: @@ -193,25 +194,25 @@ A working example of this can be found [here](https://github.com/graphprotocol/e **Note:** This approach can also be applied to more complex situations, where it is necessary to substitute more than contract addresses and network names or where generating mappings or ABIs from templates as well. -This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your subgraph to check if it is running behind. `synced` informs if the subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the subgraph. In this case, you can check the `fatalError` field for details on this error. +This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your Subgraph to check if it is running behind. `synced` informs if the Subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the Subgraph. In this case, you can check the `fatalError` field for details on this error. -## Zásady archivace subgrafů Subgraph Studio +## Subgraph Studio Subgraph archive policy -A subgraph version in Studio is archived if and only if it meets the following criteria: +A Subgraph version in Studio is archived if and only if it meets the following criteria: - The version is not published to the network (or pending publish) - The version was created 45 or more days ago -- The subgraph hasn't been queried in 30 days +- The Subgraph hasn't been queried in 30 days -In addition, when a new version is deployed, if the subgraph has not been published, then the N-2 version of the subgraph is archived. +In addition, when a new version is deployed, if the Subgraph has not been published, then the N-2 version of the Subgraph is archived. -Každý podgraf ovlivněný touto zásadou má možnost vrátit danou verzi zpět. +Every Subgraph affected with this policy has an option to bring the version in question back. -## Kontrola stavu podgrafů +## Checking Subgraph health -Pokud se podgraf úspěšně synchronizuje, je to dobré znamení, že bude dobře fungovat navždy. Nové spouštěče v síti však mohou způsobit, že se podgraf dostane do neověřeného chybového stavu, nebo může začít zaostávat kvůli problémům s výkonem či operátory uzlů. +If a Subgraph syncs successfully, that is a good sign that it will continue to run well forever. However, new triggers on the network might cause your Subgraph to hit an untested error condition or it may start to fall behind due to performance issues or issues with the node operators. -Graph Node exposes a GraphQL endpoint which you can query to check the status of your subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a subgraph: +Graph Node exposes a GraphQL endpoint which you can query to check the status of your Subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a Subgraph: ```graphql { @@ -238,4 +239,4 @@ Graph Node exposes a GraphQL endpoint which you can query to check the status of } ``` -This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your subgraph to check if it is running behind. `synced` informs if the subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the subgraph. In this case, you can check the `fatalError` field for details on this error. +This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your Subgraph to check if it is running behind. `synced` informs if the Subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the Subgraph. In this case, you can check the `fatalError` field for details on this error. From 01af9310e4abaf90536b1ff119f774302949f0d2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:22:54 -0500 Subject: [PATCH 0829/1789] New translations multiple-networks.mdx (German) --- .../deploying/multiple-networks.mdx | 37 ++++++++++--------- 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/website/src/pages/de/subgraphs/developing/deploying/multiple-networks.mdx b/website/src/pages/de/subgraphs/developing/deploying/multiple-networks.mdx index 7bc4c42301c5..5078253cb966 100644 --- a/website/src/pages/de/subgraphs/developing/deploying/multiple-networks.mdx +++ b/website/src/pages/de/subgraphs/developing/deploying/multiple-networks.mdx @@ -1,12 +1,13 @@ --- title: Bereitstellen eines Subgraphen in mehreren Netzen +sidebarTitle: Deploying to Multiple Networks --- -Auf dieser Seite wird erklärt, wie man einen Subgraphen in mehreren Netzwerken bereitstellt. Um einen Subgraphen bereitzustellen, müssen Sie zunächst die [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) installieren. Wenn Sie noch keinen Subgraphen erstellt haben, lesen Sie [Erstellen eines Subgraphen](/developing/creating-a-subgraph/). +This page explains how to deploy a Subgraph to multiple networks. To deploy a Subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a Subgraph already, see [Creating a Subgraph](/developing/creating-a-subgraph/). -## Breitstellen des Subgraphen in mehreren Netzen +## Deploying the Subgraph to multiple networks -In manchen Fällen möchten Sie denselben Subgraph in mehreren Netzen bereitstellen, ohne den gesamten Code zu duplizieren. Die größte Herausforderung dabei ist, dass die Vertragsadressen in diesen Netzen unterschiedlich sind. +In some cases, you will want to deploy the same Subgraph to multiple networks without duplicating all of its code. The main challenge that comes with this is that the contract addresses on these networks are different. ### Verwendung von `graph-cli` @@ -20,7 +21,7 @@ Optionen: --network-file Netzwerkkonfigurationsdateipfad (Standard: „./networks.json“) ``` -Sie können die Option `--network` verwenden, um eine Netzwerkkonfiguration aus einer `json`-Standarddatei (standardmäßig `networks.json`) anzugeben, um Ihren Subgraphen während der Entwicklung einfach zu aktualisieren. +You can use the `--network` option to specify a network configuration from a `json` standard file (defaults to `networks.json`) to easily update your Subgraph during development. > Hinweis: Der Befehl `init` generiert nun automatisch eine `networks.json` auf der Grundlage der angegebenen Informationen. Sie können dann bestehende Netzwerke aktualisieren oder zusätzliche Netzwerke hinzufügen. @@ -54,7 +55,7 @@ Wenn Sie keine \`networks.json'-Datei haben, müssen Sie manuell eine Datei mit > Hinweis: Sie müssen keine `templates` (falls Sie welche haben) in der Konfigurationsdatei angeben, nur die `dataSources`. Wenn in der Datei `subgraph.yaml` irgendwelche `templates` deklariert sind, wird ihr Netzwerk automatisch auf das mit der Option `--network` angegebene aktualisiert. -Nehmen wir an, Sie möchten Ihren Subgraphen in den Netzwerken `mainnet` und `sepolia` einsetzen, und dies ist Ihre `subgraph.yaml`: +Now, let's assume you want to be able to deploy your Subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: ```yaml # ... @@ -96,7 +97,7 @@ yarn build --network sepolia yarn build --network sepolia --network-file path/to/config ``` -Der Befehl `build` aktualisiert die Datei `subgraph.yaml` mit der `sepolia`-Konfiguration und kompiliert den Subgraphen neu. Ihre `subgraph.yaml` Datei sollte nun wie folgt aussehen: +The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the Subgraph. Your `subgraph.yaml` file now should look like this: ```yaml # ... @@ -127,7 +128,7 @@ yarn deploy --network sepolia --network-file path/to/config Eine Möglichkeit, Aspekte wie Vertragsadressen mit älteren `graph-cli` Versionen zu parametrisieren, besteht darin, Teile davon mit einem Templating-System wie [Mustache](https://mustache.github.io/) oder [Handlebars](https://handlebarsjs.com/) zu generieren. -Zur Veranschaulichung dieses Ansatzes nehmen wir an, dass ein Subgraph im Mainnet und in Sepolia mit unterschiedlichen Vertragsadressen bereitgestellt werden soll. Sie könnten dann zwei Konfigurationsdateien definieren, die die Adressen für jedes Netz bereitstellen: +To illustrate this approach, let's assume a Subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: ```json { @@ -179,7 +180,7 @@ Um ein Manifest für eines der beiden Netzwerke zu erstellen, können Sie zwei z } ``` -Um diesen Subgraphen für Mainnet oder Sepolia einzusetzen, führen Sie nun einfach einen der beiden folgenden Befehle aus: +To deploy this Subgraph for mainnet or Sepolia you would now simply run one of the two following commands: ```sh # Mainnet: @@ -193,25 +194,25 @@ Ein funktionierendes Datenbeispiel hierfür finden Sie [hier](https://github.com **Hinweis:** Dieser Ansatz kann auch auf komplexere Situationen angewandt werden, in denen es notwendig ist, mehr als nur Vertragsadressen und Netzwerknamen zu ersetzen, oder in denen auch Mappings oder ABIs aus Vorlagen erzeugt werden. -Dies gibt Ihnen den `chainHeadBlock`, den Sie mit dem `latestBlock` Ihres Subgraphen vergleichen können, um zu prüfen, ob er im Rückstand ist. `synced` gibt Auskunft darüber, ob der Subgraph jemals zur Kette aufgeschlossen hat. `health` kann derzeit die Werte `healthy` annehmen, wenn keine Fehler aufgetreten sind, oder `failed`, wenn es einen Fehler gab, der den Fortschritt des Subgraphen aufgehalten hat. In diesem Fall können Sie das Feld `fatalError` auf Details zu diesem Fehler überprüfen. +This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your Subgraph to check if it is running behind. `synced` informs if the Subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the Subgraph. In this case, you can check the `fatalError` field for details on this error. -## Subgraph Studio Subgraphen-Archivierungsrichtlinie +## Subgraph Studio Subgraph archive policy -Eine Subgraph-Version in Studio wird nur dann archiviert, wenn sie die folgenden Kriterien erfüllt: +A Subgraph version in Studio is archived if and only if it meets the following criteria: - Die Version ist nicht im Netz veröffentlicht (oder steht zur Veröffentlichung an) - Die Version wurde vor 45 oder mehr Tagen erstellt -- Der Subgraph ist seit 30 Tagen nicht mehr abgefragt worden +- The Subgraph hasn't been queried in 30 days -Wenn eine neue Version bereitgestellt wird und der Subgraph noch nicht veröffentlicht wurde, wird außerdem die Version N-2 des Subgraphen archiviert. +In addition, when a new version is deployed, if the Subgraph has not been published, then the N-2 version of the Subgraph is archived. -Jeder Subgraph, der von dieser Richtlinie betroffen ist, hat die Möglichkeit, die betreffende Version zurückzubringen. +Every Subgraph affected with this policy has an option to bring the version in question back. -## Überprüfung des Zustands eines Subgraphen +## Checking Subgraph health -Wenn ein Subgraph erfolgreich synchronisiert wird, ist das ein gutes Zeichen dafür, dass er für immer gut laufen wird. Neue Auslöser im Netzwerk könnten jedoch dazu führen, dass Ihr Subgraph auf eine ungetestete Fehlerbedingung stößt, oder er könnte aufgrund von Leistungsproblemen oder Problemen mit den Knotenbetreibern ins Hintertreffen geraten. +If a Subgraph syncs successfully, that is a good sign that it will continue to run well forever. However, new triggers on the network might cause your Subgraph to hit an untested error condition or it may start to fall behind due to performance issues or issues with the node operators. -Graph Node stellt einen GraphQL-Endpunkt zur Verfügung, den Sie abfragen können, um den Status Ihres Subgraphen zu überprüfen. Auf dem gehosteten Dienst ist er unter `https://api.thegraph.com/index-node/graphql` verfügbar. Auf einem lokalen Knoten ist er standardmäßig auf Port `8030/graphql` verfügbar. Das vollständige Schema für diesen Endpunkt finden Sie [hier](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Hier ist ein Datenbeispiel für eine Abfrage, die den Status der aktuellen Version eines Subgraphen überprüft: +Graph Node exposes a GraphQL endpoint which you can query to check the status of your Subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a Subgraph: ```graphql { @@ -238,4 +239,4 @@ Graph Node stellt einen GraphQL-Endpunkt zur Verfügung, den Sie abfragen könne } ``` -Dies gibt Ihnen den `chainHeadBlock`, den Sie mit dem `latestBlock` Ihres Subgraphen vergleichen können, um zu prüfen, ob er im Rückstand ist. `synced` gibt Auskunft darüber, ob der Subgraph jemals zur Kette aufgeschlossen hat. `health` kann derzeit die Werte `healthy` annehmen, wenn keine Fehler aufgetreten sind, oder `failed`, wenn es einen Fehler gab, der den Fortschritt des Subgraphen aufgehalten hat. In diesem Fall können Sie das Feld `fatalError` auf Details zu diesem Fehler überprüfen. +This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your Subgraph to check if it is running behind. `synced` informs if the Subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the Subgraph. In this case, you can check the `fatalError` field for details on this error. From 4d0e2c32cf6de0bb55ff39d7019ff3c19961dc51 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:22:55 -0500 Subject: [PATCH 0830/1789] New translations multiple-networks.mdx (Italian) --- .../deploying/multiple-networks.mdx | 37 ++++++++++--------- 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/website/src/pages/it/subgraphs/developing/deploying/multiple-networks.mdx b/website/src/pages/it/subgraphs/developing/deploying/multiple-networks.mdx index 0bcbe1eddc43..f8b9f74c6479 100644 --- a/website/src/pages/it/subgraphs/developing/deploying/multiple-networks.mdx +++ b/website/src/pages/it/subgraphs/developing/deploying/multiple-networks.mdx @@ -1,12 +1,13 @@ --- title: Deploying a Subgraph to Multiple Networks +sidebarTitle: Deploying to Multiple Networks --- -This page explains how to deploy a subgraph to multiple networks. To deploy a subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a subgraph already, see [Creating a subgraph](/developing/creating-a-subgraph/). +This page explains how to deploy a Subgraph to multiple networks. To deploy a Subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a Subgraph already, see [Creating a Subgraph](/developing/creating-a-subgraph/). -## Distribuzione del subgraph su più reti +## Deploying the Subgraph to multiple networks -In alcuni casi, si desidera distribuire lo stesso subgraph su più reti senza duplicare tutto il suo codice. Il problema principale è che gli indirizzi dei contratti su queste reti sono diversi. +In some cases, you will want to deploy the same Subgraph to multiple networks without duplicating all of its code. The main challenge that comes with this is that the contract addresses on these networks are different. ### Using `graph-cli` @@ -20,7 +21,7 @@ Options: --network-file Networks config file path (default: "./networks.json") ``` -You can use the `--network` option to specify a network configuration from a `json` standard file (defaults to `networks.json`) to easily update your subgraph during development. +You can use the `--network` option to specify a network configuration from a `json` standard file (defaults to `networks.json`) to easily update your Subgraph during development. > Note: The `init` command will now auto-generate a `networks.json` based on the provided information. You will then be able to update existing or add additional networks. @@ -54,7 +55,7 @@ If you don't have a `networks.json` file, you'll need to manually create one wit > Note: You don't have to specify any of the `templates` (if you have any) in the config file, only the `dataSources`. If there are any `templates` declared in the `subgraph.yaml` file, their network will be automatically updated to the one specified with the `--network` option. -Now, let's assume you want to be able to deploy your subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: +Now, let's assume you want to be able to deploy your Subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: ```yaml # ... @@ -96,7 +97,7 @@ yarn build --network sepolia yarn build --network sepolia --network-file path/to/config ``` -The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the subgraph. Your `subgraph.yaml` file now should look like this: +The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the Subgraph. Your `subgraph.yaml` file now should look like this: ```yaml # ... @@ -127,7 +128,7 @@ yarn deploy --network sepolia --network-file path/to/config One way to parameterize aspects like contract addresses using older `graph-cli` versions is to generate parts of it with a templating system like [Mustache](https://mustache.github.io/) or [Handlebars](https://handlebarsjs.com/). -To illustrate this approach, let's assume a subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: +To illustrate this approach, let's assume a Subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: ```json { @@ -179,7 +180,7 @@ In order to generate a manifest to either network, you could add two additional } ``` -To deploy this subgraph for mainnet or Sepolia you would now simply run one of the two following commands: +To deploy this Subgraph for mainnet or Sepolia you would now simply run one of the two following commands: ```sh # Mainnet: @@ -193,25 +194,25 @@ A working example of this can be found [here](https://github.com/graphprotocol/e **Note:** This approach can also be applied to more complex situations, where it is necessary to substitute more than contract addresses and network names or where generating mappings or ABIs from templates as well. -This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your subgraph to check if it is running behind. `synced` informs if the subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the subgraph. In this case, you can check the `fatalError` field for details on this error. +This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your Subgraph to check if it is running behind. `synced` informs if the Subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the Subgraph. In this case, you can check the `fatalError` field for details on this error. -## Politica di archiviazione dei subgraph di Subgraph Studio +## Subgraph Studio Subgraph archive policy -A subgraph version in Studio is archived if and only if it meets the following criteria: +A Subgraph version in Studio is archived if and only if it meets the following criteria: - The version is not published to the network (or pending publish) - The version was created 45 or more days ago -- The subgraph hasn't been queried in 30 days +- The Subgraph hasn't been queried in 30 days -In addition, when a new version is deployed, if the subgraph has not been published, then the N-2 version of the subgraph is archived. +In addition, when a new version is deployed, if the Subgraph has not been published, then the N-2 version of the Subgraph is archived. -Ogni subgraph colpito da questa politica ha un'opzione per recuperare la versione in questione. +Every Subgraph affected with this policy has an option to bring the version in question back. -## Verifica dello stato di salute del subgraph +## Checking Subgraph health -Se un subgraph si sincronizza con successo, è un buon segno che continuerà a funzionare bene per sempre. Tuttavia, nuovi trigger sulla rete potrebbero far sì che il subgraph si trovi in una condizione di errore non testata o che inizi a rimanere indietro a causa di problemi di prestazioni o di problemi con gli operatori dei nodi. +If a Subgraph syncs successfully, that is a good sign that it will continue to run well forever. However, new triggers on the network might cause your Subgraph to hit an untested error condition or it may start to fall behind due to performance issues or issues with the node operators. -Graph Node exposes a GraphQL endpoint which you can query to check the status of your subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a subgraph: +Graph Node exposes a GraphQL endpoint which you can query to check the status of your Subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a Subgraph: ```graphql { @@ -238,4 +239,4 @@ Graph Node exposes a GraphQL endpoint which you can query to check the status of } ``` -This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your subgraph to check if it is running behind. `synced` informs if the subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the subgraph. In this case, you can check the `fatalError` field for details on this error. +This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your Subgraph to check if it is running behind. `synced` informs if the Subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the Subgraph. In this case, you can check the `fatalError` field for details on this error. From 6421fc5f52c4b555448576c1938946efe908a4c0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:22:56 -0500 Subject: [PATCH 0831/1789] New translations multiple-networks.mdx (Japanese) --- .../deploying/multiple-networks.mdx | 37 ++++++++++--------- 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/website/src/pages/ja/subgraphs/developing/deploying/multiple-networks.mdx b/website/src/pages/ja/subgraphs/developing/deploying/multiple-networks.mdx index 53c7dcfbd86b..a43e7a32c7b8 100644 --- a/website/src/pages/ja/subgraphs/developing/deploying/multiple-networks.mdx +++ b/website/src/pages/ja/subgraphs/developing/deploying/multiple-networks.mdx @@ -1,12 +1,13 @@ --- title: Deploying a Subgraph to Multiple Networks +sidebarTitle: Deploying to Multiple Networks --- -This page explains how to deploy a subgraph to multiple networks. To deploy a subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a subgraph already, see [Creating a subgraph](/developing/creating-a-subgraph/). +This page explains how to deploy a Subgraph to multiple networks. To deploy a Subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a Subgraph already, see [Creating a Subgraph](/developing/creating-a-subgraph/). -## サブグラフを複数のネットワークにデプロイする +## Deploying the Subgraph to multiple networks -場合によっては、すべてのコードを複製せずに、同じサブグラフを複数のネットワークに展開する必要があります。これに伴う主な課題は、これらのネットワークのコントラクト アドレスが異なることです。 +In some cases, you will want to deploy the same Subgraph to multiple networks without duplicating all of its code. The main challenge that comes with this is that the contract addresses on these networks are different. ### Using `graph-cli` @@ -20,7 +21,7 @@ Options: --network-file Networks config file path (default: "./networks.json") ``` -You can use the `--network` option to specify a network configuration from a `json` standard file (defaults to `networks.json`) to easily update your subgraph during development. +You can use the `--network` option to specify a network configuration from a `json` standard file (defaults to `networks.json`) to easily update your Subgraph during development. > Note: The `init` command will now auto-generate a `networks.json` based on the provided information. You will then be able to update existing or add additional networks. @@ -54,7 +55,7 @@ If you don't have a `networks.json` file, you'll need to manually create one wit > Note: You don't have to specify any of the `templates` (if you have any) in the config file, only the `dataSources`. If there are any `templates` declared in the `subgraph.yaml` file, their network will be automatically updated to the one specified with the `--network` option. -Now, let's assume you want to be able to deploy your subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: +Now, let's assume you want to be able to deploy your Subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: ```yaml # ... @@ -96,7 +97,7 @@ yarn build --network sepolia yarn build --network sepolia --network-file path/to/config ``` -The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the subgraph. Your `subgraph.yaml` file now should look like this: +The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the Subgraph. Your `subgraph.yaml` file now should look like this: ```yaml # ... @@ -127,7 +128,7 @@ yarn deploy --network sepolia --network-file path/to/config One way to parameterize aspects like contract addresses using older `graph-cli` versions is to generate parts of it with a templating system like [Mustache](https://mustache.github.io/) or [Handlebars](https://handlebarsjs.com/). -To illustrate this approach, let's assume a subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: +To illustrate this approach, let's assume a Subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: ```json { @@ -179,7 +180,7 @@ In order to generate a manifest to either network, you could add two additional } ``` -To deploy this subgraph for mainnet or Sepolia you would now simply run one of the two following commands: +To deploy this Subgraph for mainnet or Sepolia you would now simply run one of the two following commands: ```sh # Mainnet: @@ -193,25 +194,25 @@ A working example of this can be found [here](https://github.com/graphprotocol/e **Note:** This approach can also be applied to more complex situations, where it is necessary to substitute more than contract addresses and network names or where generating mappings or ABIs from templates as well. -This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your subgraph to check if it is running behind. `synced` informs if the subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the subgraph. In this case, you can check the `fatalError` field for details on this error. +This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your Subgraph to check if it is running behind. `synced` informs if the Subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the Subgraph. In this case, you can check the `fatalError` field for details on this error. -## Subgraph Studio・サブグラフ・アーカイブポリシー +## Subgraph Studio Subgraph archive policy -A subgraph version in Studio is archived if and only if it meets the following criteria: +A Subgraph version in Studio is archived if and only if it meets the following criteria: - The version is not published to the network (or pending publish) - The version was created 45 or more days ago -- The subgraph hasn't been queried in 30 days +- The Subgraph hasn't been queried in 30 days -In addition, when a new version is deployed, if the subgraph has not been published, then the N-2 version of the subgraph is archived. +In addition, when a new version is deployed, if the Subgraph has not been published, then the N-2 version of the Subgraph is archived. -このポリシーで影響を受けるすべてのサブグラフには、問題のバージョンを戻すオプションがあります。 +Every Subgraph affected with this policy has an option to bring the version in question back. -## サブグラフのヘルスチェック +## Checking Subgraph health -サブグラフが正常に同期された場合、それはそれが永久に正常に動作し続けることを示す良い兆候です。ただし、ネットワーク上の新しいトリガーにより、サブグラフがテストされていないエラー状態に陥ったり、パフォーマンスの問題やノード オペレーターの問題により遅れが生じたりする可能性があります。 +If a Subgraph syncs successfully, that is a good sign that it will continue to run well forever. However, new triggers on the network might cause your Subgraph to hit an untested error condition or it may start to fall behind due to performance issues or issues with the node operators. -Graph Node exposes a GraphQL endpoint which you can query to check the status of your subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a subgraph: +Graph Node exposes a GraphQL endpoint which you can query to check the status of your Subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a Subgraph: ```graphql { @@ -238,4 +239,4 @@ Graph Node exposes a GraphQL endpoint which you can query to check the status of } ``` -This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your subgraph to check if it is running behind. `synced` informs if the subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the subgraph. In this case, you can check the `fatalError` field for details on this error. +This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your Subgraph to check if it is running behind. `synced` informs if the Subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the Subgraph. In this case, you can check the `fatalError` field for details on this error. From 33aebe4ed79981e989f57caf83e48bf5d010fefd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:22:57 -0500 Subject: [PATCH 0832/1789] New translations multiple-networks.mdx (Korean) --- .../deploying/multiple-networks.mdx | 37 ++++++++++--------- 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/website/src/pages/ko/subgraphs/developing/deploying/multiple-networks.mdx b/website/src/pages/ko/subgraphs/developing/deploying/multiple-networks.mdx index 4f7dcd3864e8..3b2b1bbc70ae 100644 --- a/website/src/pages/ko/subgraphs/developing/deploying/multiple-networks.mdx +++ b/website/src/pages/ko/subgraphs/developing/deploying/multiple-networks.mdx @@ -1,12 +1,13 @@ --- title: Deploying a Subgraph to Multiple Networks +sidebarTitle: Deploying to Multiple Networks --- -This page explains how to deploy a subgraph to multiple networks. To deploy a subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a subgraph already, see [Creating a subgraph](/developing/creating-a-subgraph/). +This page explains how to deploy a Subgraph to multiple networks. To deploy a Subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a Subgraph already, see [Creating a Subgraph](/developing/creating-a-subgraph/). -## Deploying the subgraph to multiple networks +## Deploying the Subgraph to multiple networks -In some cases, you will want to deploy the same subgraph to multiple networks without duplicating all of its code. The main challenge that comes with this is that the contract addresses on these networks are different. +In some cases, you will want to deploy the same Subgraph to multiple networks without duplicating all of its code. The main challenge that comes with this is that the contract addresses on these networks are different. ### Using `graph-cli` @@ -20,7 +21,7 @@ Options: --network-file Networks config file path (default: "./networks.json") ``` -You can use the `--network` option to specify a network configuration from a `json` standard file (defaults to `networks.json`) to easily update your subgraph during development. +You can use the `--network` option to specify a network configuration from a `json` standard file (defaults to `networks.json`) to easily update your Subgraph during development. > Note: The `init` command will now auto-generate a `networks.json` based on the provided information. You will then be able to update existing or add additional networks. @@ -54,7 +55,7 @@ If you don't have a `networks.json` file, you'll need to manually create one wit > Note: You don't have to specify any of the `templates` (if you have any) in the config file, only the `dataSources`. If there are any `templates` declared in the `subgraph.yaml` file, their network will be automatically updated to the one specified with the `--network` option. -Now, let's assume you want to be able to deploy your subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: +Now, let's assume you want to be able to deploy your Subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: ```yaml # ... @@ -96,7 +97,7 @@ yarn build --network sepolia yarn build --network sepolia --network-file path/to/config ``` -The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the subgraph. Your `subgraph.yaml` file now should look like this: +The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the Subgraph. Your `subgraph.yaml` file now should look like this: ```yaml # ... @@ -127,7 +128,7 @@ yarn deploy --network sepolia --network-file path/to/config One way to parameterize aspects like contract addresses using older `graph-cli` versions is to generate parts of it with a templating system like [Mustache](https://mustache.github.io/) or [Handlebars](https://handlebarsjs.com/). -To illustrate this approach, let's assume a subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: +To illustrate this approach, let's assume a Subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: ```json { @@ -179,7 +180,7 @@ In order to generate a manifest to either network, you could add two additional } ``` -To deploy this subgraph for mainnet or Sepolia you would now simply run one of the two following commands: +To deploy this Subgraph for mainnet or Sepolia you would now simply run one of the two following commands: ```sh # Mainnet: @@ -193,25 +194,25 @@ A working example of this can be found [here](https://github.com/graphprotocol/e **Note:** This approach can also be applied to more complex situations, where it is necessary to substitute more than contract addresses and network names or where generating mappings or ABIs from templates as well. -This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your subgraph to check if it is running behind. `synced` informs if the subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the subgraph. In this case, you can check the `fatalError` field for details on this error. +This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your Subgraph to check if it is running behind. `synced` informs if the Subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the Subgraph. In this case, you can check the `fatalError` field for details on this error. -## Subgraph Studio subgraph archive policy +## Subgraph Studio Subgraph archive policy -A subgraph version in Studio is archived if and only if it meets the following criteria: +A Subgraph version in Studio is archived if and only if it meets the following criteria: - The version is not published to the network (or pending publish) - The version was created 45 or more days ago -- The subgraph hasn't been queried in 30 days +- The Subgraph hasn't been queried in 30 days -In addition, when a new version is deployed, if the subgraph has not been published, then the N-2 version of the subgraph is archived. +In addition, when a new version is deployed, if the Subgraph has not been published, then the N-2 version of the Subgraph is archived. -Every subgraph affected with this policy has an option to bring the version in question back. +Every Subgraph affected with this policy has an option to bring the version in question back. -## Checking subgraph health +## Checking Subgraph health -If a subgraph syncs successfully, that is a good sign that it will continue to run well forever. However, new triggers on the network might cause your subgraph to hit an untested error condition or it may start to fall behind due to performance issues or issues with the node operators. +If a Subgraph syncs successfully, that is a good sign that it will continue to run well forever. However, new triggers on the network might cause your Subgraph to hit an untested error condition or it may start to fall behind due to performance issues or issues with the node operators. -Graph Node exposes a GraphQL endpoint which you can query to check the status of your subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a subgraph: +Graph Node exposes a GraphQL endpoint which you can query to check the status of your Subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a Subgraph: ```graphql { @@ -238,4 +239,4 @@ Graph Node exposes a GraphQL endpoint which you can query to check the status of } ``` -This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your subgraph to check if it is running behind. `synced` informs if the subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the subgraph. In this case, you can check the `fatalError` field for details on this error. +This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your Subgraph to check if it is running behind. `synced` informs if the Subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the Subgraph. In this case, you can check the `fatalError` field for details on this error. From 80e1e66ec153dde7f78e87a8da73ce692574c26a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:22:58 -0500 Subject: [PATCH 0833/1789] New translations multiple-networks.mdx (Dutch) --- .../deploying/multiple-networks.mdx | 37 ++++++++++--------- 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/website/src/pages/nl/subgraphs/developing/deploying/multiple-networks.mdx b/website/src/pages/nl/subgraphs/developing/deploying/multiple-networks.mdx index 4f7dcd3864e8..3b2b1bbc70ae 100644 --- a/website/src/pages/nl/subgraphs/developing/deploying/multiple-networks.mdx +++ b/website/src/pages/nl/subgraphs/developing/deploying/multiple-networks.mdx @@ -1,12 +1,13 @@ --- title: Deploying a Subgraph to Multiple Networks +sidebarTitle: Deploying to Multiple Networks --- -This page explains how to deploy a subgraph to multiple networks. To deploy a subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a subgraph already, see [Creating a subgraph](/developing/creating-a-subgraph/). +This page explains how to deploy a Subgraph to multiple networks. To deploy a Subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a Subgraph already, see [Creating a Subgraph](/developing/creating-a-subgraph/). -## Deploying the subgraph to multiple networks +## Deploying the Subgraph to multiple networks -In some cases, you will want to deploy the same subgraph to multiple networks without duplicating all of its code. The main challenge that comes with this is that the contract addresses on these networks are different. +In some cases, you will want to deploy the same Subgraph to multiple networks without duplicating all of its code. The main challenge that comes with this is that the contract addresses on these networks are different. ### Using `graph-cli` @@ -20,7 +21,7 @@ Options: --network-file Networks config file path (default: "./networks.json") ``` -You can use the `--network` option to specify a network configuration from a `json` standard file (defaults to `networks.json`) to easily update your subgraph during development. +You can use the `--network` option to specify a network configuration from a `json` standard file (defaults to `networks.json`) to easily update your Subgraph during development. > Note: The `init` command will now auto-generate a `networks.json` based on the provided information. You will then be able to update existing or add additional networks. @@ -54,7 +55,7 @@ If you don't have a `networks.json` file, you'll need to manually create one wit > Note: You don't have to specify any of the `templates` (if you have any) in the config file, only the `dataSources`. If there are any `templates` declared in the `subgraph.yaml` file, their network will be automatically updated to the one specified with the `--network` option. -Now, let's assume you want to be able to deploy your subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: +Now, let's assume you want to be able to deploy your Subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: ```yaml # ... @@ -96,7 +97,7 @@ yarn build --network sepolia yarn build --network sepolia --network-file path/to/config ``` -The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the subgraph. Your `subgraph.yaml` file now should look like this: +The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the Subgraph. Your `subgraph.yaml` file now should look like this: ```yaml # ... @@ -127,7 +128,7 @@ yarn deploy --network sepolia --network-file path/to/config One way to parameterize aspects like contract addresses using older `graph-cli` versions is to generate parts of it with a templating system like [Mustache](https://mustache.github.io/) or [Handlebars](https://handlebarsjs.com/). -To illustrate this approach, let's assume a subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: +To illustrate this approach, let's assume a Subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: ```json { @@ -179,7 +180,7 @@ In order to generate a manifest to either network, you could add two additional } ``` -To deploy this subgraph for mainnet or Sepolia you would now simply run one of the two following commands: +To deploy this Subgraph for mainnet or Sepolia you would now simply run one of the two following commands: ```sh # Mainnet: @@ -193,25 +194,25 @@ A working example of this can be found [here](https://github.com/graphprotocol/e **Note:** This approach can also be applied to more complex situations, where it is necessary to substitute more than contract addresses and network names or where generating mappings or ABIs from templates as well. -This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your subgraph to check if it is running behind. `synced` informs if the subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the subgraph. In this case, you can check the `fatalError` field for details on this error. +This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your Subgraph to check if it is running behind. `synced` informs if the Subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the Subgraph. In this case, you can check the `fatalError` field for details on this error. -## Subgraph Studio subgraph archive policy +## Subgraph Studio Subgraph archive policy -A subgraph version in Studio is archived if and only if it meets the following criteria: +A Subgraph version in Studio is archived if and only if it meets the following criteria: - The version is not published to the network (or pending publish) - The version was created 45 or more days ago -- The subgraph hasn't been queried in 30 days +- The Subgraph hasn't been queried in 30 days -In addition, when a new version is deployed, if the subgraph has not been published, then the N-2 version of the subgraph is archived. +In addition, when a new version is deployed, if the Subgraph has not been published, then the N-2 version of the Subgraph is archived. -Every subgraph affected with this policy has an option to bring the version in question back. +Every Subgraph affected with this policy has an option to bring the version in question back. -## Checking subgraph health +## Checking Subgraph health -If a subgraph syncs successfully, that is a good sign that it will continue to run well forever. However, new triggers on the network might cause your subgraph to hit an untested error condition or it may start to fall behind due to performance issues or issues with the node operators. +If a Subgraph syncs successfully, that is a good sign that it will continue to run well forever. However, new triggers on the network might cause your Subgraph to hit an untested error condition or it may start to fall behind due to performance issues or issues with the node operators. -Graph Node exposes a GraphQL endpoint which you can query to check the status of your subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a subgraph: +Graph Node exposes a GraphQL endpoint which you can query to check the status of your Subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a Subgraph: ```graphql { @@ -238,4 +239,4 @@ Graph Node exposes a GraphQL endpoint which you can query to check the status of } ``` -This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your subgraph to check if it is running behind. `synced` informs if the subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the subgraph. In this case, you can check the `fatalError` field for details on this error. +This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your Subgraph to check if it is running behind. `synced` informs if the Subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the Subgraph. In this case, you can check the `fatalError` field for details on this error. From fc05aea510ba22a6d42cb771b109f97f0486f54c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:22:59 -0500 Subject: [PATCH 0834/1789] New translations multiple-networks.mdx (Polish) --- .../deploying/multiple-networks.mdx | 37 ++++++++++--------- 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/website/src/pages/pl/subgraphs/developing/deploying/multiple-networks.mdx b/website/src/pages/pl/subgraphs/developing/deploying/multiple-networks.mdx index 4f7dcd3864e8..3b2b1bbc70ae 100644 --- a/website/src/pages/pl/subgraphs/developing/deploying/multiple-networks.mdx +++ b/website/src/pages/pl/subgraphs/developing/deploying/multiple-networks.mdx @@ -1,12 +1,13 @@ --- title: Deploying a Subgraph to Multiple Networks +sidebarTitle: Deploying to Multiple Networks --- -This page explains how to deploy a subgraph to multiple networks. To deploy a subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a subgraph already, see [Creating a subgraph](/developing/creating-a-subgraph/). +This page explains how to deploy a Subgraph to multiple networks. To deploy a Subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a Subgraph already, see [Creating a Subgraph](/developing/creating-a-subgraph/). -## Deploying the subgraph to multiple networks +## Deploying the Subgraph to multiple networks -In some cases, you will want to deploy the same subgraph to multiple networks without duplicating all of its code. The main challenge that comes with this is that the contract addresses on these networks are different. +In some cases, you will want to deploy the same Subgraph to multiple networks without duplicating all of its code. The main challenge that comes with this is that the contract addresses on these networks are different. ### Using `graph-cli` @@ -20,7 +21,7 @@ Options: --network-file Networks config file path (default: "./networks.json") ``` -You can use the `--network` option to specify a network configuration from a `json` standard file (defaults to `networks.json`) to easily update your subgraph during development. +You can use the `--network` option to specify a network configuration from a `json` standard file (defaults to `networks.json`) to easily update your Subgraph during development. > Note: The `init` command will now auto-generate a `networks.json` based on the provided information. You will then be able to update existing or add additional networks. @@ -54,7 +55,7 @@ If you don't have a `networks.json` file, you'll need to manually create one wit > Note: You don't have to specify any of the `templates` (if you have any) in the config file, only the `dataSources`. If there are any `templates` declared in the `subgraph.yaml` file, their network will be automatically updated to the one specified with the `--network` option. -Now, let's assume you want to be able to deploy your subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: +Now, let's assume you want to be able to deploy your Subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: ```yaml # ... @@ -96,7 +97,7 @@ yarn build --network sepolia yarn build --network sepolia --network-file path/to/config ``` -The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the subgraph. Your `subgraph.yaml` file now should look like this: +The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the Subgraph. Your `subgraph.yaml` file now should look like this: ```yaml # ... @@ -127,7 +128,7 @@ yarn deploy --network sepolia --network-file path/to/config One way to parameterize aspects like contract addresses using older `graph-cli` versions is to generate parts of it with a templating system like [Mustache](https://mustache.github.io/) or [Handlebars](https://handlebarsjs.com/). -To illustrate this approach, let's assume a subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: +To illustrate this approach, let's assume a Subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: ```json { @@ -179,7 +180,7 @@ In order to generate a manifest to either network, you could add two additional } ``` -To deploy this subgraph for mainnet or Sepolia you would now simply run one of the two following commands: +To deploy this Subgraph for mainnet or Sepolia you would now simply run one of the two following commands: ```sh # Mainnet: @@ -193,25 +194,25 @@ A working example of this can be found [here](https://github.com/graphprotocol/e **Note:** This approach can also be applied to more complex situations, where it is necessary to substitute more than contract addresses and network names or where generating mappings or ABIs from templates as well. -This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your subgraph to check if it is running behind. `synced` informs if the subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the subgraph. In this case, you can check the `fatalError` field for details on this error. +This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your Subgraph to check if it is running behind. `synced` informs if the Subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the Subgraph. In this case, you can check the `fatalError` field for details on this error. -## Subgraph Studio subgraph archive policy +## Subgraph Studio Subgraph archive policy -A subgraph version in Studio is archived if and only if it meets the following criteria: +A Subgraph version in Studio is archived if and only if it meets the following criteria: - The version is not published to the network (or pending publish) - The version was created 45 or more days ago -- The subgraph hasn't been queried in 30 days +- The Subgraph hasn't been queried in 30 days -In addition, when a new version is deployed, if the subgraph has not been published, then the N-2 version of the subgraph is archived. +In addition, when a new version is deployed, if the Subgraph has not been published, then the N-2 version of the Subgraph is archived. -Every subgraph affected with this policy has an option to bring the version in question back. +Every Subgraph affected with this policy has an option to bring the version in question back. -## Checking subgraph health +## Checking Subgraph health -If a subgraph syncs successfully, that is a good sign that it will continue to run well forever. However, new triggers on the network might cause your subgraph to hit an untested error condition or it may start to fall behind due to performance issues or issues with the node operators. +If a Subgraph syncs successfully, that is a good sign that it will continue to run well forever. However, new triggers on the network might cause your Subgraph to hit an untested error condition or it may start to fall behind due to performance issues or issues with the node operators. -Graph Node exposes a GraphQL endpoint which you can query to check the status of your subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a subgraph: +Graph Node exposes a GraphQL endpoint which you can query to check the status of your Subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a Subgraph: ```graphql { @@ -238,4 +239,4 @@ Graph Node exposes a GraphQL endpoint which you can query to check the status of } ``` -This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your subgraph to check if it is running behind. `synced` informs if the subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the subgraph. In this case, you can check the `fatalError` field for details on this error. +This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your Subgraph to check if it is running behind. `synced` informs if the Subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the Subgraph. In this case, you can check the `fatalError` field for details on this error. From 5ea0ace2ae6c57ea49ae5f36e6ea55337f15dc7b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:23:00 -0500 Subject: [PATCH 0835/1789] New translations multiple-networks.mdx (Portuguese) --- .../deploying/multiple-networks.mdx | 37 ++++++++++--------- 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/website/src/pages/pt/subgraphs/developing/deploying/multiple-networks.mdx b/website/src/pages/pt/subgraphs/developing/deploying/multiple-networks.mdx index 7164b6d5a83c..29fe7bcfd125 100644 --- a/website/src/pages/pt/subgraphs/developing/deploying/multiple-networks.mdx +++ b/website/src/pages/pt/subgraphs/developing/deploying/multiple-networks.mdx @@ -1,12 +1,13 @@ --- title: Deploying a Subgraph to Multiple Networks +sidebarTitle: Deploying to Multiple Networks --- -This page explains how to deploy a subgraph to multiple networks. To deploy a subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a subgraph already, see [Creating a subgraph](/developing/creating-a-subgraph/). +This page explains how to deploy a Subgraph to multiple networks. To deploy a Subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a Subgraph already, see [Creating a Subgraph](/developing/creating-a-subgraph/). -## Como lançar o subgraph a várias redes +## Deploying the Subgraph to multiple networks -Em alguns casos, irá querer lançar o mesmo subgraph a várias redes sem duplicar o seu código completo. O grande desafio nisto é que os endereços de contrato nestas redes são diferentes. +In some cases, you will want to deploy the same Subgraph to multiple networks without duplicating all of its code. The main challenge that comes with this is that the contract addresses on these networks are different. ### Using `graph-cli` @@ -20,7 +21,7 @@ Options: --network-file Networks config file path (default: "./networks.json") ``` -You can use the `--network` option to specify a network configuration from a `json` standard file (defaults to `networks.json`) to easily update your subgraph during development. +You can use the `--network` option to specify a network configuration from a `json` standard file (defaults to `networks.json`) to easily update your Subgraph during development. > Note: The `init` command will now auto-generate a `networks.json` based on the provided information. You will then be able to update existing or add additional networks. @@ -54,7 +55,7 @@ If you don't have a `networks.json` file, you'll need to manually create one wit > Note: You don't have to specify any of the `templates` (if you have any) in the config file, only the `dataSources`. If there are any `templates` declared in the `subgraph.yaml` file, their network will be automatically updated to the one specified with the `--network` option. -Now, let's assume you want to be able to deploy your subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: +Now, let's assume you want to be able to deploy your Subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: ```yaml # ... @@ -96,7 +97,7 @@ yarn build --network sepolia yarn build --network sepolia --network-file local/do/config ``` -The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the subgraph. Your `subgraph.yaml` file now should look like this: +The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the Subgraph. Your `subgraph.yaml` file now should look like this: ```yaml # ... @@ -127,7 +128,7 @@ yarn deploy --network sepolia --network-file local/do/config One way to parameterize aspects like contract addresses using older `graph-cli` versions is to generate parts of it with a templating system like [Mustache](https://mustache.github.io/) or [Handlebars](https://handlebarsjs.com/). -Por exemplo, vamos supor que um subgraph deve ser lançado à mainnet e à Sepolia, através de diferentes endereços de contratos. Então, seria possível definir dois arquivos de config ao fornecer os endereços para cada rede: +To illustrate this approach, let's assume a Subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: ```json { @@ -179,7 +180,7 @@ In order to generate a manifest to either network, you could add two additional } ``` -Para lançar este subgraph à mainnet ou à Sepolia, apenas um dos seguintes comandos precisaria ser executado: +To deploy this Subgraph for mainnet or Sepolia you would now simply run one of the two following commands: ```sh # Mainnet: @@ -193,25 +194,25 @@ A working example of this can be found [here](https://github.com/graphprotocol/e **Note:** This approach can also be applied to more complex situations, where it is necessary to substitute more than contract addresses and network names or where generating mappings or ABIs from templates as well. -This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your subgraph to check if it is running behind. `synced` informs if the subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the subgraph. In this case, you can check the `fatalError` field for details on this error. +This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your Subgraph to check if it is running behind. `synced` informs if the Subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the Subgraph. In this case, you can check the `fatalError` field for details on this error. -## Política de arqivamento do Subgraph Studio +## Subgraph Studio Subgraph archive policy -Uma versão de subgraph no Studio é arquivada se, e apenas se, atender aos seguintes critérios: +A Subgraph version in Studio is archived if and only if it meets the following criteria: - A versão não foi publicada na rede (ou tem a publicação pendente) - A versão foi criada há 45 dias ou mais -- O subgraph não foi consultado em 30 dias +- The Subgraph hasn't been queried in 30 days -Além disto, quando uma nova versão é editada, se o subgraph ainda não foi publicado, então a versão N-2 do subgraph é arquivada. +In addition, when a new version is deployed, if the Subgraph has not been published, then the N-2 version of the Subgraph is archived. -Todos os subgraphs afetados por esta política têm a opção de trazer de volta a versão em questão. +Every Subgraph affected with this policy has an option to bring the version in question back. -## Como conferir a saúde do subgraph +## Checking Subgraph health -Se um subgraph for sincronizado com sucesso, isto indica que ele continuará a rodar bem para sempre. Porém, novos gatilhos na rede podem revelar uma condição de erro não testada, ou ele pode começar a se atrasar por problemas de desempenho ou com os operadores de nodes. +If a Subgraph syncs successfully, that is a good sign that it will continue to run well forever. However, new triggers on the network might cause your Subgraph to hit an untested error condition or it may start to fall behind due to performance issues or issues with the node operators. -Graph Node exposes a GraphQL endpoint which you can query to check the status of your subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a subgraph: +Graph Node exposes a GraphQL endpoint which you can query to check the status of your Subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a Subgraph: ```graphql { @@ -238,4 +239,4 @@ Graph Node exposes a GraphQL endpoint which you can query to check the status of } ``` -This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your subgraph to check if it is running behind. `synced` informs if the subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the subgraph. In this case, you can check the `fatalError` field for details on this error. +This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your Subgraph to check if it is running behind. `synced` informs if the Subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the Subgraph. In this case, you can check the `fatalError` field for details on this error. From 5b0ecfedf7576cd95ed4fabe3746c60e08e5327a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:23:01 -0500 Subject: [PATCH 0836/1789] New translations multiple-networks.mdx (Russian) --- .../deploying/multiple-networks.mdx | 37 ++++++++++--------- 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/website/src/pages/ru/subgraphs/developing/deploying/multiple-networks.mdx b/website/src/pages/ru/subgraphs/developing/deploying/multiple-networks.mdx index 8ae55fbd8bcc..4f15c642b820 100644 --- a/website/src/pages/ru/subgraphs/developing/deploying/multiple-networks.mdx +++ b/website/src/pages/ru/subgraphs/developing/deploying/multiple-networks.mdx @@ -1,12 +1,13 @@ --- title: Развертывание субграфа в нескольких сетях +sidebarTitle: Deploying to Multiple Networks --- -This page explains how to deploy a subgraph to multiple networks. To deploy a subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a subgraph already, see [Creating a subgraph](/developing/creating-a-subgraph/). +This page explains how to deploy a Subgraph to multiple networks. To deploy a Subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a Subgraph already, see [Creating a Subgraph](/developing/creating-a-subgraph/). -## Развертывание подграфа в нескольких сетях +## Deploying the Subgraph to multiple networks -В некоторых случаях вы захотите развернуть один и тот же подграф в нескольких сетях, не дублируя весь его код. Основная проблема, возникающая при этом, заключается в том, что адреса контрактов в этих сетях разные. +In some cases, you will want to deploy the same Subgraph to multiple networks without duplicating all of its code. The main challenge that comes with this is that the contract addresses on these networks are different. ### Использование `graph-cli` @@ -19,7 +20,7 @@ This page explains how to deploy a subgraph to multiple networks. To deploy a su --network-file Путь к файлу конфигурации сетей (по умолчанию: "./networks.json") ``` -Вы можете использовать опцию `--network` для указания конфигурации сети из стандартного файла `json` (по умолчанию используется `networks.json`), чтобы легко обновлять свой субграф во время разработки. +You can use the `--network` option to specify a network configuration from a `json` standard file (defaults to `networks.json`) to easily update your Subgraph during development. > Примечание: Команда `init` теперь автоматически сгенерирует `networks.json` на основе предоставленной информации. Затем Вы сможете обновить существующие или добавить дополнительные сети. @@ -53,7 +54,7 @@ This page explains how to deploy a subgraph to multiple networks. To deploy a su > Примечание: Вам не нужно указывать ни один из `templates` (если они у Вас есть) в файле конфигурации, только `dataSources`. Если есть какие-либо `templates`, объявленные в файле `subgraph.yaml`, их сеть будет автоматически обновлена до указанной с помощью опции `--network`. -Теперь давайте предположим, что Вы хотите иметь возможность развернуть свой субграф в сетях `mainnet` и `sepolia`, и это Ваш `subgraph.yaml`: +Now, let's assume you want to be able to deploy your Subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: ```yaml # ... @@ -95,7 +96,7 @@ yarn build --network sepolia yarn build --network sepolia --network-file path/to/config ``` -Команда `build` обновит Ваш `subgraph.yaml` конфигурацией `sepolia`, а затем повторно скомпилирует субграф. Ваш файл `subgraph.yaml` теперь должен выглядеть следующим образом: +The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the Subgraph. Your `subgraph.yaml` file now should look like this: ```yaml # ... @@ -126,7 +127,7 @@ yarn deploy --network sepolia --network-file path/to/config Одним из способов параметризации таких аспектов, как адреса контрактов, с использованием старых версий `graph-cli` является генерация его частей с помощью системы шаблонов, такой как [Mustache](https://mustache.github.io/) или [Handlebars](https://handlebarsjs.com/). -Чтобы проиллюстрировать этот подход, давайте предположим, что субграф должен быть развернут в майннете и в сети Sepolia с использованием разных адресов контракта. Затем Вы можете определить два файла конфигурации, содержащие адреса для каждой сети: +To illustrate this approach, let's assume a Subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: ```json { @@ -178,7 +179,7 @@ dataSources: } ``` -Чтобы развернуть этот субграф для основной сети или сети Sepolia, Вам нужно просто запустить одну из двух следующих команд: +To deploy this Subgraph for mainnet or Sepolia you would now simply run one of the two following commands: ```sh # Mainnet: @@ -192,25 +193,25 @@ yarn prepare:sepolia && yarn deploy **Примечание:** Этот подход также можно применять в более сложных ситуациях, когда необходимо заменить не только адреса контрактов и сетевые имена, но и сгенерировать мэппинги или ABI из шаблонов. -Это предоставит Вам `chainHeadBlock`, который Вы сможете сравнить с `latestBlock` своего субграфа, чтобы проверить, не отстает ли он. `synced` сообщает, попал ли субграф в чейн. `health` в настоящее время может принимать значения `healthy`, если ошибки отсутствуют, или `failed`, если произошла ошибка, остановившая работу субграфа. В этом случае Вы можете проверить поле `fatalError` для получения подробной информации об этой ошибке. +This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your Subgraph to check if it is running behind. `synced` informs if the Subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the Subgraph. In this case, you can check the `fatalError` field for details on this error. -## Политика архивирования подграфов в Subgraph Studio +## Subgraph Studio Subgraph archive policy -Версия субграфа в Studio архивируется, если и только если выполняются следующие критерии: +A Subgraph version in Studio is archived if and only if it meets the following criteria: - Версия не опубликована в сети (или ожидает публикации) - Версия была создана 45 или более дней назад -- Субграф не запрашивался в течение 30 дней +- The Subgraph hasn't been queried in 30 days -Кроме того, когда развертывается новая версия, если субграф не был опубликован, то версия N-2 субграфа архивируется. +In addition, when a new version is deployed, if the Subgraph has not been published, then the N-2 version of the Subgraph is archived. -У каждого подграфа, затронутого этой политикой, есть возможность вернуть соответствующую версию обратно. +Every Subgraph affected with this policy has an option to bring the version in question back. -## Проверка работоспособности подграфа +## Checking Subgraph health -Если подграф успешно синхронизируется, это хороший признак того, что он будет работать надёжно. Однако новые триггеры в сети могут привести к тому, что ваш подграф попадет в состояние непроверенной ошибки, или он может начать отставать из-за проблем с производительностью или проблем с операторами нод. +If a Subgraph syncs successfully, that is a good sign that it will continue to run well forever. However, new triggers on the network might cause your Subgraph to hit an untested error condition or it may start to fall behind due to performance issues or issues with the node operators. -Graph Node предоставляет конечную точку GraphQL, которую Вы можете запросить для проверки статуса своего субграфа. В хостинговом сервисе он доступен по адресу `https://api.thegraph.com/index-node/graphql`. На локальной ноде он по умолчанию доступен через порт `8030/graphql`. Полную схему для этой конечной точки можно найти [здесь](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Вот пример запроса, проверяющего состояние текущей версии субграфа: +Graph Node exposes a GraphQL endpoint which you can query to check the status of your Subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a Subgraph: ```graphql { @@ -237,4 +238,4 @@ Graph Node предоставляет конечную точку GraphQL, ко } ``` -Это предоставит Вам `chainHeadBlock`, который Вы сможете сравнить с `latestBlock` своего субграфа, чтобы проверить, не отстает ли он. `synced` сообщает, попал ли субграф в чейн. `health` в настоящее время может принимать значения `healthy`, если ошибки отсутствуют, или `failed`, если произошла ошибка, остановившая работу субграфа. В этом случае Вы можете проверить поле `fatalError` для получения подробной информации об этой ошибке. +This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your Subgraph to check if it is running behind. `synced` informs if the Subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the Subgraph. In this case, you can check the `fatalError` field for details on this error. From 242446c5bda4adda989b97693e64916a083cf8b5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:23:02 -0500 Subject: [PATCH 0837/1789] New translations multiple-networks.mdx (Swedish) --- .../deploying/multiple-networks.mdx | 37 ++++++++++--------- 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/website/src/pages/sv/subgraphs/developing/deploying/multiple-networks.mdx b/website/src/pages/sv/subgraphs/developing/deploying/multiple-networks.mdx index 8be847bc8fab..b45b0701bfdd 100644 --- a/website/src/pages/sv/subgraphs/developing/deploying/multiple-networks.mdx +++ b/website/src/pages/sv/subgraphs/developing/deploying/multiple-networks.mdx @@ -1,12 +1,13 @@ --- title: Deploying a Subgraph to Multiple Networks +sidebarTitle: Deploying to Multiple Networks --- -This page explains how to deploy a subgraph to multiple networks. To deploy a subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a subgraph already, see [Creating a subgraph](/developing/creating-a-subgraph/). +This page explains how to deploy a Subgraph to multiple networks. To deploy a Subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a Subgraph already, see [Creating a Subgraph](/developing/creating-a-subgraph/). -## Distribuera undergrafen till flera nätverk +## Deploying the Subgraph to multiple networks -I vissa fall vill du distribuera samma undergraf till flera nätverk utan att duplicera all dess kod. Den största utmaningen med detta är att kontraktsadresserna på dessa nätverk är olika. +In some cases, you will want to deploy the same Subgraph to multiple networks without duplicating all of its code. The main challenge that comes with this is that the contract addresses on these networks are different. ### Using `graph-cli` @@ -20,7 +21,7 @@ Options: --network-file Networks config file path (default: "./networks.json") ``` -You can use the `--network` option to specify a network configuration from a `json` standard file (defaults to `networks.json`) to easily update your subgraph during development. +You can use the `--network` option to specify a network configuration from a `json` standard file (defaults to `networks.json`) to easily update your Subgraph during development. > Note: The `init` command will now auto-generate a `networks.json` based on the provided information. You will then be able to update existing or add additional networks. @@ -54,7 +55,7 @@ If you don't have a `networks.json` file, you'll need to manually create one wit > Note: You don't have to specify any of the `templates` (if you have any) in the config file, only the `dataSources`. If there are any `templates` declared in the `subgraph.yaml` file, their network will be automatically updated to the one specified with the `--network` option. -Now, let's assume you want to be able to deploy your subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: +Now, let's assume you want to be able to deploy your Subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: ```yaml # ... @@ -96,7 +97,7 @@ yarn build --network sepolia yarn build --network sepolia --network-file path/to/config ``` -The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the subgraph. Your `subgraph.yaml` file now should look like this: +The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the Subgraph. Your `subgraph.yaml` file now should look like this: ```yaml # ... @@ -127,7 +128,7 @@ yarn deploy --network sepolia --network-file path/to/config One way to parameterize aspects like contract addresses using older `graph-cli` versions is to generate parts of it with a templating system like [Mustache](https://mustache.github.io/) or [Handlebars](https://handlebarsjs.com/). -To illustrate this approach, let's assume a subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: +To illustrate this approach, let's assume a Subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: ```json { @@ -179,7 +180,7 @@ In order to generate a manifest to either network, you could add two additional } ``` -To deploy this subgraph for mainnet or Sepolia you would now simply run one of the two following commands: +To deploy this Subgraph for mainnet or Sepolia you would now simply run one of the two following commands: ```sh # Mainnet: @@ -193,25 +194,25 @@ A working example of this can be found [here](https://github.com/graphprotocol/e **Note:** This approach can also be applied to more complex situations, where it is necessary to substitute more than contract addresses and network names or where generating mappings or ABIs from templates as well. -This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your subgraph to check if it is running behind. `synced` informs if the subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the subgraph. In this case, you can check the `fatalError` field for details on this error. +This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your Subgraph to check if it is running behind. `synced` informs if the Subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the Subgraph. In this case, you can check the `fatalError` field for details on this error. -## Subgraph Studio subgraf arkivpolitik +## Subgraph Studio Subgraph archive policy -A subgraph version in Studio is archived if and only if it meets the following criteria: +A Subgraph version in Studio is archived if and only if it meets the following criteria: - The version is not published to the network (or pending publish) - The version was created 45 or more days ago -- The subgraph hasn't been queried in 30 days +- The Subgraph hasn't been queried in 30 days -In addition, when a new version is deployed, if the subgraph has not been published, then the N-2 version of the subgraph is archived. +In addition, when a new version is deployed, if the Subgraph has not been published, then the N-2 version of the Subgraph is archived. -Varje subgraf som påverkas av denna policy har en möjlighet att ta tillbaka versionen i fråga. +Every Subgraph affected with this policy has an option to bring the version in question back. -## Kontroll av undergrafens hälsa +## Checking Subgraph health -Om en subgraf synkroniseras framgångsrikt är det ett gott tecken på att den kommer att fortsätta att fungera bra för alltid. Nya triggers i nätverket kan dock göra att din subgraf stöter på ett otestat feltillstånd eller så kan den börja halka efter på grund av prestandaproblem eller problem med nodoperatörerna. +If a Subgraph syncs successfully, that is a good sign that it will continue to run well forever. However, new triggers on the network might cause your Subgraph to hit an untested error condition or it may start to fall behind due to performance issues or issues with the node operators. -Graph Node exposes a GraphQL endpoint which you can query to check the status of your subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a subgraph: +Graph Node exposes a GraphQL endpoint which you can query to check the status of your Subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a Subgraph: ```graphql { @@ -238,4 +239,4 @@ Graph Node exposes a GraphQL endpoint which you can query to check the status of } ``` -This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your subgraph to check if it is running behind. `synced` informs if the subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the subgraph. In this case, you can check the `fatalError` field for details on this error. +This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your Subgraph to check if it is running behind. `synced` informs if the Subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the Subgraph. In this case, you can check the `fatalError` field for details on this error. From a5634c2b836c1f9d126220848b15127fe85dae89 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:23:03 -0500 Subject: [PATCH 0838/1789] New translations multiple-networks.mdx (Turkish) --- .../deploying/multiple-networks.mdx | 37 ++++++++++--------- 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/website/src/pages/tr/subgraphs/developing/deploying/multiple-networks.mdx b/website/src/pages/tr/subgraphs/developing/deploying/multiple-networks.mdx index 2241675eac10..d401f6ad16b2 100644 --- a/website/src/pages/tr/subgraphs/developing/deploying/multiple-networks.mdx +++ b/website/src/pages/tr/subgraphs/developing/deploying/multiple-networks.mdx @@ -1,12 +1,13 @@ --- title: Bir Subgraph'i Birden Fazla Ağda Dağıtma +sidebarTitle: Deploying to Multiple Networks --- -This page explains how to deploy a subgraph to multiple networks. To deploy a subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a subgraph already, see [Creating a subgraph](/developing/creating-a-subgraph/). +This page explains how to deploy a Subgraph to multiple networks. To deploy a Subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a Subgraph already, see [Creating a Subgraph](/developing/creating-a-subgraph/). -## Subgraph'i Birden Fazla Ağda Dağıtma +## Deploying the Subgraph to multiple networks -Bazı durumlarda, tüm kodu tekrarlamak zorunda olmadan aynı subgraph'i birden fazla ağda yayına almak isteyebilirsiniz. Bunu yapmaktaki temel zorluk, sözleşme kodu tamamen aynı olsa dahi, farklı ağlardaki sözleşme adreslerinin farklı olmasıdır. +In some cases, you will want to deploy the same Subgraph to multiple networks without duplicating all of its code. The main challenge that comes with this is that the contract addresses on these networks are different. ### `graph-cli` Kullanarak @@ -20,7 +21,7 @@ Seçenekler: --network-file Ağ yapılandırma dosya yolu (varsayılan: "./networks.json") ``` -`--network` seçeneğini, geliştirme sırasında subgraph'inizi kolayca güncellemek amacıyla, bir `json` standart dosyası kullanarak bir ağ yapılandırması belirlemek için kullanabilirsiniz. (Varsayılan olarak `networks.json` dosyasını kullanır.) +You can use the `--network` option to specify a network configuration from a `json` standard file (defaults to `networks.json`) to easily update your Subgraph during development. > Not: Artık, `init` komutu, sağlanan bilgilere dayanarak otomatik olarak bir `networks.json` dosyası oluşturmaktadır. Daha sonra mevcut ağları güncelleyebilir veya yeni ağlar ekleyebilirsiniz. @@ -54,7 +55,7 @@ Eğer bir `networks.json` dosyanız yoksa, aşağıdaki yapı ile manuel olarak > Not: Yapılandırma dosyasında `templates` (şablonlar, eğer varsa) kısmını doldurmanıza gerek yoktur, yalnızca `dataSources` (veri kaynaklarını) belirtmelisiniz. Eğer `subgraph.yaml` dosyasında `templates` kısmı tanımlanmışsa, bunların ağı `--network` seçeneği ile belirtilen ağa otomatik olarak güncellenecektir. -Şimdi, subgraph'inizi `mainnet` ve `sepolia` ağlarında dağıtmak istediğinizi varsayalım ve `subgraph.yaml` dosyanız aşağıdaki gibi olsun\`: +Now, let's assume you want to be able to deploy your Subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: ```yaml # ... @@ -96,7 +97,7 @@ yarn build --network sepolia yarn build --network sepolia --network-file path/to/config ``` -`build` komutu, `subgraph.yaml` dosyanızı `sepolia` yapılandırmasıyla güncelleyip ardından subgraph'i yeniden derleyecektir. `subgraph.yaml` dosyanız artık şöyle görünmelidir: +The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the Subgraph. Your `subgraph.yaml` file now should look like this: ```yaml # ... @@ -127,7 +128,7 @@ yarn deploy --network sepolia --network-file path/to/config Daha eski `graph-cli` sürümlerini kullanarak kontrat adresleri gibi unsurları parametrize etmenin bir yolu, bunların bir kısmını [Mustache](https://mustache.github.io/) veya [Handlebar](https://handlebarsjs.com/) gibi bir şablonlama sistemiyle oluşturmaktır. -Bu yaklaşımı açıklamak için, bir subgraph'in mainnet ve Sepolia ağlarına farklı sözleşme adresleri ile dağıtılması gerektiğini varsayalım. Her ağ için adresleri sağlayan iki yapılandırma dosyası tanımlayabilirsiniz: +To illustrate this approach, let's assume a Subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: ```json { @@ -179,7 +180,7 @@ Her iki ağ için de bir manifesto oluşturmak amacıyla, `package.json` dosyas } ``` -Bu subgraph'i mainnet veya Sepolia üzerinde yayına almak için artık aşağıdaki iki komuttan birini çalıştırabilirsiniz: +To deploy this Subgraph for mainnet or Sepolia you would now simply run one of the two following commands: ```sh # Mainnet: @@ -193,25 +194,25 @@ Bunun çalışan bir örneğini [burada](https://github.com/graphprotocol/exampl **Not**: Bu yaklaşım, sözleşme adresleri ve ağ adlarının ötesinde daha fazla değişiklik yapmanın, veya şablonlardan mapping ya da ABI'ler oluşturmanın gerekli olduğu, daha karmaşık durumlara da uygulanabilir. -Bu işlem, subgraph'inizin geride kalıp kalmadığını kontrol etmek için `chainHeadBlock` değerini subgraph'inizdeki `latestBlock` ile karşılaştırmanızı sağlar. `synced`, subgraph'in zincire daha önce hiç yetişip yetişmediğini belirtir. `health` ise şu anda hata olmadığında `healthy` ve bir hata nedeniyle subgraph'in ilerlemesi durduğunda `failed` değerlerini alabilir. Bu durumda, hataya dair ayrıntılar için `fatalError` alanını kontrol edebilirsiniz. +This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your Subgraph to check if it is running behind. `synced` informs if the Subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the Subgraph. In this case, you can check the `fatalError` field for details on this error. -## Subgraph Studio Subgraph Arşivleme Politikası +## Subgraph Studio Subgraph archive policy -Studio’daki bir subgraph sürümü yalnızca aşağıdaki kriterleri karşılaması durumunda arşivlenir: +A Subgraph version in Studio is archived if and only if it meets the following criteria: - Sürüm ağa yayımlanmamıştır (veya yayım askıda kalmıştır) - Sürüm, 45 gün veya daha uzun bir süre önce oluşturulmuştur -- Subgraph son 30 gündür sorgulanmamıştır +- The Subgraph hasn't been queried in 30 days -Ek olarak, yeni bir sürüm yayına alındığında, eğer subgraph yayımlanmadıysa, subgraph’in N-2 sürümü arşivlenir. +In addition, when a new version is deployed, if the Subgraph has not been published, then the N-2 version of the Subgraph is archived. -Bu politika kapsamında etkilenen her subgraph, ilgili sürümü geri getirme seçeneğine sahiptir. +Every Subgraph affected with this policy has an option to bring the version in question back. -## Subgraph durumunu kontrol etme +## Checking Subgraph health -Bir subgraph'in başarıyla senkronize olması, sonsuza kadar sorunsuz çalışmaya devam edeceğine dair iyi bir işarettir. Ancak, ağdaki yeni tetikleyiciler subgraph'inizin test edilmemiş bir hata durumuna düşmesine neden olabilir, veya performans sorunları ya da düğüm operatörlerindeki sorunlar nedeniyle subgraph geride kalmaya başlayabilir. +If a Subgraph syncs successfully, that is a good sign that it will continue to run well forever. However, new triggers on the network might cause your Subgraph to hit an untested error condition or it may start to fall behind due to performance issues or issues with the node operators. -Graph Düğümü, subgraph'inizin durumunu kontrol etmek için sorgu yapabileceğiniz bir GraphQL uç noktası sunar. Sağlayıcı hizmetinde bu uç nokta `https://api.thegraph.com/index-node/graphql` adresinde bulunmaktadır. Yerel bir düğümde ise varsayılan olarak `8030/graphql` portunda erişilebilir. Bu uç noktanın tam şemasına [buradan](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql) ulaşabilirsiniz. İşte bir subgraph'in güncel sürümünün durumunu kontrol eden örnek bir sorgu: +Graph Node exposes a GraphQL endpoint which you can query to check the status of your Subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a Subgraph: ```graphql { @@ -238,4 +239,4 @@ Graph Düğümü, subgraph'inizin durumunu kontrol etmek için sorgu yapabilece } ``` -Bu işlem, subgraph'inizin geride kalıp kalmadığını kontrol etmek için `chainHeadBlock` değerini subgraph'inizdeki `latestBlock` ile karşılaştırmanızı sağlar. `synced`, subgraph'in zincire daha önce hiç yetişip yetişmediğini belirtir. `health` ise şu anda hata olmadığında `healthy` ve bir hata nedeniyle subgraph'in ilerlemesi durduğunda `failed` değerlerini alabilir. Bu durumda, hataya dair ayrıntılar için `fatalError` alanını kontrol edebilirsiniz. +This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your Subgraph to check if it is running behind. `synced` informs if the Subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the Subgraph. In this case, you can check the `fatalError` field for details on this error. From da9b9744833217975dfde0cd7bc07139eeb09ad3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:23:04 -0500 Subject: [PATCH 0839/1789] New translations multiple-networks.mdx (Ukrainian) --- .../deploying/multiple-networks.mdx | 37 ++++++++++--------- 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/website/src/pages/uk/subgraphs/developing/deploying/multiple-networks.mdx b/website/src/pages/uk/subgraphs/developing/deploying/multiple-networks.mdx index 4f7dcd3864e8..3b2b1bbc70ae 100644 --- a/website/src/pages/uk/subgraphs/developing/deploying/multiple-networks.mdx +++ b/website/src/pages/uk/subgraphs/developing/deploying/multiple-networks.mdx @@ -1,12 +1,13 @@ --- title: Deploying a Subgraph to Multiple Networks +sidebarTitle: Deploying to Multiple Networks --- -This page explains how to deploy a subgraph to multiple networks. To deploy a subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a subgraph already, see [Creating a subgraph](/developing/creating-a-subgraph/). +This page explains how to deploy a Subgraph to multiple networks. To deploy a Subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a Subgraph already, see [Creating a Subgraph](/developing/creating-a-subgraph/). -## Deploying the subgraph to multiple networks +## Deploying the Subgraph to multiple networks -In some cases, you will want to deploy the same subgraph to multiple networks without duplicating all of its code. The main challenge that comes with this is that the contract addresses on these networks are different. +In some cases, you will want to deploy the same Subgraph to multiple networks without duplicating all of its code. The main challenge that comes with this is that the contract addresses on these networks are different. ### Using `graph-cli` @@ -20,7 +21,7 @@ Options: --network-file Networks config file path (default: "./networks.json") ``` -You can use the `--network` option to specify a network configuration from a `json` standard file (defaults to `networks.json`) to easily update your subgraph during development. +You can use the `--network` option to specify a network configuration from a `json` standard file (defaults to `networks.json`) to easily update your Subgraph during development. > Note: The `init` command will now auto-generate a `networks.json` based on the provided information. You will then be able to update existing or add additional networks. @@ -54,7 +55,7 @@ If you don't have a `networks.json` file, you'll need to manually create one wit > Note: You don't have to specify any of the `templates` (if you have any) in the config file, only the `dataSources`. If there are any `templates` declared in the `subgraph.yaml` file, their network will be automatically updated to the one specified with the `--network` option. -Now, let's assume you want to be able to deploy your subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: +Now, let's assume you want to be able to deploy your Subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: ```yaml # ... @@ -96,7 +97,7 @@ yarn build --network sepolia yarn build --network sepolia --network-file path/to/config ``` -The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the subgraph. Your `subgraph.yaml` file now should look like this: +The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the Subgraph. Your `subgraph.yaml` file now should look like this: ```yaml # ... @@ -127,7 +128,7 @@ yarn deploy --network sepolia --network-file path/to/config One way to parameterize aspects like contract addresses using older `graph-cli` versions is to generate parts of it with a templating system like [Mustache](https://mustache.github.io/) or [Handlebars](https://handlebarsjs.com/). -To illustrate this approach, let's assume a subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: +To illustrate this approach, let's assume a Subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: ```json { @@ -179,7 +180,7 @@ In order to generate a manifest to either network, you could add two additional } ``` -To deploy this subgraph for mainnet or Sepolia you would now simply run one of the two following commands: +To deploy this Subgraph for mainnet or Sepolia you would now simply run one of the two following commands: ```sh # Mainnet: @@ -193,25 +194,25 @@ A working example of this can be found [here](https://github.com/graphprotocol/e **Note:** This approach can also be applied to more complex situations, where it is necessary to substitute more than contract addresses and network names or where generating mappings or ABIs from templates as well. -This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your subgraph to check if it is running behind. `synced` informs if the subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the subgraph. In this case, you can check the `fatalError` field for details on this error. +This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your Subgraph to check if it is running behind. `synced` informs if the Subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the Subgraph. In this case, you can check the `fatalError` field for details on this error. -## Subgraph Studio subgraph archive policy +## Subgraph Studio Subgraph archive policy -A subgraph version in Studio is archived if and only if it meets the following criteria: +A Subgraph version in Studio is archived if and only if it meets the following criteria: - The version is not published to the network (or pending publish) - The version was created 45 or more days ago -- The subgraph hasn't been queried in 30 days +- The Subgraph hasn't been queried in 30 days -In addition, when a new version is deployed, if the subgraph has not been published, then the N-2 version of the subgraph is archived. +In addition, when a new version is deployed, if the Subgraph has not been published, then the N-2 version of the Subgraph is archived. -Every subgraph affected with this policy has an option to bring the version in question back. +Every Subgraph affected with this policy has an option to bring the version in question back. -## Checking subgraph health +## Checking Subgraph health -If a subgraph syncs successfully, that is a good sign that it will continue to run well forever. However, new triggers on the network might cause your subgraph to hit an untested error condition or it may start to fall behind due to performance issues or issues with the node operators. +If a Subgraph syncs successfully, that is a good sign that it will continue to run well forever. However, new triggers on the network might cause your Subgraph to hit an untested error condition or it may start to fall behind due to performance issues or issues with the node operators. -Graph Node exposes a GraphQL endpoint which you can query to check the status of your subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a subgraph: +Graph Node exposes a GraphQL endpoint which you can query to check the status of your Subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a Subgraph: ```graphql { @@ -238,4 +239,4 @@ Graph Node exposes a GraphQL endpoint which you can query to check the status of } ``` -This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your subgraph to check if it is running behind. `synced` informs if the subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the subgraph. In this case, you can check the `fatalError` field for details on this error. +This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your Subgraph to check if it is running behind. `synced` informs if the Subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the Subgraph. In this case, you can check the `fatalError` field for details on this error. From 5aafe8ddf03c9da11805b90583f0839ebdeb869b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:23:05 -0500 Subject: [PATCH 0840/1789] New translations multiple-networks.mdx (Chinese Simplified) --- .../deploying/multiple-networks.mdx | 37 ++++++++++--------- 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/website/src/pages/zh/subgraphs/developing/deploying/multiple-networks.mdx b/website/src/pages/zh/subgraphs/developing/deploying/multiple-networks.mdx index 3608f13cb405..d86c9dc5e45e 100644 --- a/website/src/pages/zh/subgraphs/developing/deploying/multiple-networks.mdx +++ b/website/src/pages/zh/subgraphs/developing/deploying/multiple-networks.mdx @@ -1,12 +1,13 @@ --- title: Deploying a Subgraph to Multiple Networks +sidebarTitle: Deploying to Multiple Networks --- -This page explains how to deploy a subgraph to multiple networks. To deploy a subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a subgraph already, see [Creating a subgraph](/developing/creating-a-subgraph/). +This page explains how to deploy a Subgraph to multiple networks. To deploy a Subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a Subgraph already, see [Creating a Subgraph](/developing/creating-a-subgraph/). -## 将子图部署到多个网络 +## Deploying the Subgraph to multiple networks -在某些情况下,您需要将相同的子图部署到多个网络,而不复制其所有代码。随之而来的主要挑战是这些网络上的合约地址不同。 +In some cases, you will want to deploy the same Subgraph to multiple networks without duplicating all of its code. The main challenge that comes with this is that the contract addresses on these networks are different. ### Using `graph-cli` @@ -20,7 +21,7 @@ Options: --network-file Networks config file path (default: "./networks.json") ``` -You can use the `--network` option to specify a network configuration from a `json` standard file (defaults to `networks.json`) to easily update your subgraph during development. +You can use the `--network` option to specify a network configuration from a `json` standard file (defaults to `networks.json`) to easily update your Subgraph during development. > Note: The `init` command will now auto-generate a `networks.json` based on the provided information. You will then be able to update existing or add additional networks. @@ -54,7 +55,7 @@ If you don't have a `networks.json` file, you'll need to manually create one wit > Note: You don't have to specify any of the `templates` (if you have any) in the config file, only the `dataSources`. If there are any `templates` declared in the `subgraph.yaml` file, their network will be automatically updated to the one specified with the `--network` option. -Now, let's assume you want to be able to deploy your subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: +Now, let's assume you want to be able to deploy your Subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: ```yaml # ... @@ -96,7 +97,7 @@ yarn build --network sepolia yarn build --network sepolia --network-file path/to/config ``` -The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the subgraph. Your `subgraph.yaml` file now should look like this: +The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the Subgraph. Your `subgraph.yaml` file now should look like this: ```yaml # ... @@ -127,7 +128,7 @@ yarn deploy --network sepolia --network-file path/to/config One way to parameterize aspects like contract addresses using older `graph-cli` versions is to generate parts of it with a templating system like [Mustache](https://mustache.github.io/) or [Handlebars](https://handlebarsjs.com/). -To illustrate this approach, let's assume a subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: +To illustrate this approach, let's assume a Subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: ```json { @@ -179,7 +180,7 @@ In order to generate a manifest to either network, you could add two additional } ``` -To deploy this subgraph for mainnet or Sepolia you would now simply run one of the two following commands: +To deploy this Subgraph for mainnet or Sepolia you would now simply run one of the two following commands: ```sh # Mainnet: @@ -193,25 +194,25 @@ A working example of this can be found [here](https://github.com/graphprotocol/e **Note:** This approach can also be applied to more complex situations, where it is necessary to substitute more than contract addresses and network names or where generating mappings or ABIs from templates as well. -This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your subgraph to check if it is running behind. `synced` informs if the subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the subgraph. In this case, you can check the `fatalError` field for details on this error. +This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your Subgraph to check if it is running behind. `synced` informs if the Subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the Subgraph. In this case, you can check the `fatalError` field for details on this error. -## 子图工作室子图封存策略 +## Subgraph Studio Subgraph archive policy -A subgraph version in Studio is archived if and only if it meets the following criteria: +A Subgraph version in Studio is archived if and only if it meets the following criteria: - The version is not published to the network (or pending publish) - The version was created 45 or more days ago -- The subgraph hasn't been queried in 30 days +- The Subgraph hasn't been queried in 30 days -In addition, when a new version is deployed, if the subgraph has not been published, then the N-2 version of the subgraph is archived. +In addition, when a new version is deployed, if the Subgraph has not been published, then the N-2 version of the Subgraph is archived. -受此策略影响的每个子图都有一个选项,可以回复有问题的版本。 +Every Subgraph affected with this policy has an option to bring the version in question back. -## 检查子图状态 +## Checking Subgraph health -如果子图成功同步,这是一个好信号,表明它将永远运行良好。然而,网络上的新触发器可能会导致子图遇到未经测试的错误条件,或者由于性能问题或节点操作符的问题,子图开始落后。 +If a Subgraph syncs successfully, that is a good sign that it will continue to run well forever. However, new triggers on the network might cause your Subgraph to hit an untested error condition or it may start to fall behind due to performance issues or issues with the node operators. -Graph Node exposes a GraphQL endpoint which you can query to check the status of your subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a subgraph: +Graph Node exposes a GraphQL endpoint which you can query to check the status of your Subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a Subgraph: ```graphql { @@ -238,4 +239,4 @@ Graph Node exposes a GraphQL endpoint which you can query to check the status of } ``` -This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your subgraph to check if it is running behind. `synced` informs if the subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the subgraph. In this case, you can check the `fatalError` field for details on this error. +This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your Subgraph to check if it is running behind. `synced` informs if the Subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the Subgraph. In this case, you can check the `fatalError` field for details on this error. From f7f1b531ffba196f8d2e37d88fb1ddbf378dc6b1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:23:06 -0500 Subject: [PATCH 0841/1789] New translations multiple-networks.mdx (Urdu (Pakistan)) --- .../deploying/multiple-networks.mdx | 37 ++++++++++--------- 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/website/src/pages/ur/subgraphs/developing/deploying/multiple-networks.mdx b/website/src/pages/ur/subgraphs/developing/deploying/multiple-networks.mdx index 0f23c9bdb044..018d2eb471e7 100644 --- a/website/src/pages/ur/subgraphs/developing/deploying/multiple-networks.mdx +++ b/website/src/pages/ur/subgraphs/developing/deploying/multiple-networks.mdx @@ -1,12 +1,13 @@ --- title: Deploying a Subgraph to Multiple Networks +sidebarTitle: Deploying to Multiple Networks --- -This page explains how to deploy a subgraph to multiple networks. To deploy a subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a subgraph already, see [Creating a subgraph](/developing/creating-a-subgraph/). +This page explains how to deploy a Subgraph to multiple networks. To deploy a Subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a Subgraph already, see [Creating a Subgraph](/developing/creating-a-subgraph/). -## سب گراف کو متعدد نیٹ ورکس پر تعینات کرنا +## Deploying the Subgraph to multiple networks -کچھ معاملات میں، آپ ایک ہی سب گراف کو متعدد نیٹ ورکس پر اس کے تمام کوڈ کی نقل کیے بغیر تعینات کرنا چاہیں گے۔ اس کے ساتھ آنے والا بنیادی چیلنج یہ ہے کہ ان نیٹ ورکس پر کنٹریکٹ ایڈریس مختلف ہیں. +In some cases, you will want to deploy the same Subgraph to multiple networks without duplicating all of its code. The main challenge that comes with this is that the contract addresses on these networks are different. ### Using `graph-cli` @@ -20,7 +21,7 @@ Options: --network-file Networks config file path (default: "./networks.json") ``` -You can use the `--network` option to specify a network configuration from a `json` standard file (defaults to `networks.json`) to easily update your subgraph during development. +You can use the `--network` option to specify a network configuration from a `json` standard file (defaults to `networks.json`) to easily update your Subgraph during development. > Note: The `init` command will now auto-generate a `networks.json` based on the provided information. You will then be able to update existing or add additional networks. @@ -54,7 +55,7 @@ If you don't have a `networks.json` file, you'll need to manually create one wit > Note: You don't have to specify any of the `templates` (if you have any) in the config file, only the `dataSources`. If there are any `templates` declared in the `subgraph.yaml` file, their network will be automatically updated to the one specified with the `--network` option. -Now, let's assume you want to be able to deploy your subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: +Now, let's assume you want to be able to deploy your Subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: ```yaml # ... @@ -96,7 +97,7 @@ yarn build --network sepolia yarn build --network sepolia --network-file path/to/config ``` -The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the subgraph. Your `subgraph.yaml` file now should look like this: +The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the Subgraph. Your `subgraph.yaml` file now should look like this: ```yaml # ... @@ -127,7 +128,7 @@ yarn deploy --network sepolia --network-file path/to/config One way to parameterize aspects like contract addresses using older `graph-cli` versions is to generate parts of it with a templating system like [Mustache](https://mustache.github.io/) or [Handlebars](https://handlebarsjs.com/). -To illustrate this approach, let's assume a subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: +To illustrate this approach, let's assume a Subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: ```json { @@ -179,7 +180,7 @@ In order to generate a manifest to either network, you could add two additional } ``` -To deploy this subgraph for mainnet or Sepolia you would now simply run one of the two following commands: +To deploy this Subgraph for mainnet or Sepolia you would now simply run one of the two following commands: ```sh # Mainnet: @@ -193,25 +194,25 @@ A working example of this can be found [here](https://github.com/graphprotocol/e **Note:** This approach can also be applied to more complex situations, where it is necessary to substitute more than contract addresses and network names or where generating mappings or ABIs from templates as well. -This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your subgraph to check if it is running behind. `synced` informs if the subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the subgraph. In this case, you can check the `fatalError` field for details on this error. +This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your Subgraph to check if it is running behind. `synced` informs if the Subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the Subgraph. In this case, you can check the `fatalError` field for details on this error. -## سب گراف سٹوڈیو سب گراف آرکائیو پالیسی +## Subgraph Studio Subgraph archive policy -A subgraph version in Studio is archived if and only if it meets the following criteria: +A Subgraph version in Studio is archived if and only if it meets the following criteria: - The version is not published to the network (or pending publish) - The version was created 45 or more days ago -- The subgraph hasn't been queried in 30 days +- The Subgraph hasn't been queried in 30 days -In addition, when a new version is deployed, if the subgraph has not been published, then the N-2 version of the subgraph is archived. +In addition, when a new version is deployed, if the Subgraph has not been published, then the N-2 version of the Subgraph is archived. -اس پالیسی سے متاثر ہونے والے ہر سب گراف کے پاس زیر بحث ورژن کو واپس لانے کا اختیار ہے. +Every Subgraph affected with this policy has an option to bring the version in question back. -## سب گراف کی صحت کی جانچ کرنا +## Checking Subgraph health -اگر ایک سب گراف کامیابی کے ساتھ مطابقت پذیر ہوتا ہے، تو یہ ایک اچھی علامت ہے کہ یہ ہمیشہ کے لیے اچھی طرح چلتا رہے گا۔ تاہم، نیٹ ورک پر نئے محرکات آپ کے سب گراف کو بغیر جانچ کی خرابی کی حالت کو نشانہ بنا سکتے ہیں یا کارکردگی کے مسائل یا نوڈ آپریٹرز کے ساتھ مسائل کی وجہ سے یہ پیچھے پڑنا شروع کر سکتا ہے. +If a Subgraph syncs successfully, that is a good sign that it will continue to run well forever. However, new triggers on the network might cause your Subgraph to hit an untested error condition or it may start to fall behind due to performance issues or issues with the node operators. -Graph Node exposes a GraphQL endpoint which you can query to check the status of your subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a subgraph: +Graph Node exposes a GraphQL endpoint which you can query to check the status of your Subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a Subgraph: ```graphql { @@ -238,4 +239,4 @@ Graph Node exposes a GraphQL endpoint which you can query to check the status of } ``` -This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your subgraph to check if it is running behind. `synced` informs if the subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the subgraph. In this case, you can check the `fatalError` field for details on this error. +This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your Subgraph to check if it is running behind. `synced` informs if the Subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the Subgraph. In this case, you can check the `fatalError` field for details on this error. From 4d0a28998bbd277ebbbffd87b96ba32e9246e19c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:23:07 -0500 Subject: [PATCH 0842/1789] New translations multiple-networks.mdx (Vietnamese) --- .../deploying/multiple-networks.mdx | 37 ++++++++++--------- 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/website/src/pages/vi/subgraphs/developing/deploying/multiple-networks.mdx b/website/src/pages/vi/subgraphs/developing/deploying/multiple-networks.mdx index 4f7dcd3864e8..3b2b1bbc70ae 100644 --- a/website/src/pages/vi/subgraphs/developing/deploying/multiple-networks.mdx +++ b/website/src/pages/vi/subgraphs/developing/deploying/multiple-networks.mdx @@ -1,12 +1,13 @@ --- title: Deploying a Subgraph to Multiple Networks +sidebarTitle: Deploying to Multiple Networks --- -This page explains how to deploy a subgraph to multiple networks. To deploy a subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a subgraph already, see [Creating a subgraph](/developing/creating-a-subgraph/). +This page explains how to deploy a Subgraph to multiple networks. To deploy a Subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a Subgraph already, see [Creating a Subgraph](/developing/creating-a-subgraph/). -## Deploying the subgraph to multiple networks +## Deploying the Subgraph to multiple networks -In some cases, you will want to deploy the same subgraph to multiple networks without duplicating all of its code. The main challenge that comes with this is that the contract addresses on these networks are different. +In some cases, you will want to deploy the same Subgraph to multiple networks without duplicating all of its code. The main challenge that comes with this is that the contract addresses on these networks are different. ### Using `graph-cli` @@ -20,7 +21,7 @@ Options: --network-file Networks config file path (default: "./networks.json") ``` -You can use the `--network` option to specify a network configuration from a `json` standard file (defaults to `networks.json`) to easily update your subgraph during development. +You can use the `--network` option to specify a network configuration from a `json` standard file (defaults to `networks.json`) to easily update your Subgraph during development. > Note: The `init` command will now auto-generate a `networks.json` based on the provided information. You will then be able to update existing or add additional networks. @@ -54,7 +55,7 @@ If you don't have a `networks.json` file, you'll need to manually create one wit > Note: You don't have to specify any of the `templates` (if you have any) in the config file, only the `dataSources`. If there are any `templates` declared in the `subgraph.yaml` file, their network will be automatically updated to the one specified with the `--network` option. -Now, let's assume you want to be able to deploy your subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: +Now, let's assume you want to be able to deploy your Subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: ```yaml # ... @@ -96,7 +97,7 @@ yarn build --network sepolia yarn build --network sepolia --network-file path/to/config ``` -The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the subgraph. Your `subgraph.yaml` file now should look like this: +The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the Subgraph. Your `subgraph.yaml` file now should look like this: ```yaml # ... @@ -127,7 +128,7 @@ yarn deploy --network sepolia --network-file path/to/config One way to parameterize aspects like contract addresses using older `graph-cli` versions is to generate parts of it with a templating system like [Mustache](https://mustache.github.io/) or [Handlebars](https://handlebarsjs.com/). -To illustrate this approach, let's assume a subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: +To illustrate this approach, let's assume a Subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: ```json { @@ -179,7 +180,7 @@ In order to generate a manifest to either network, you could add two additional } ``` -To deploy this subgraph for mainnet or Sepolia you would now simply run one of the two following commands: +To deploy this Subgraph for mainnet or Sepolia you would now simply run one of the two following commands: ```sh # Mainnet: @@ -193,25 +194,25 @@ A working example of this can be found [here](https://github.com/graphprotocol/e **Note:** This approach can also be applied to more complex situations, where it is necessary to substitute more than contract addresses and network names or where generating mappings or ABIs from templates as well. -This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your subgraph to check if it is running behind. `synced` informs if the subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the subgraph. In this case, you can check the `fatalError` field for details on this error. +This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your Subgraph to check if it is running behind. `synced` informs if the Subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the Subgraph. In this case, you can check the `fatalError` field for details on this error. -## Subgraph Studio subgraph archive policy +## Subgraph Studio Subgraph archive policy -A subgraph version in Studio is archived if and only if it meets the following criteria: +A Subgraph version in Studio is archived if and only if it meets the following criteria: - The version is not published to the network (or pending publish) - The version was created 45 or more days ago -- The subgraph hasn't been queried in 30 days +- The Subgraph hasn't been queried in 30 days -In addition, when a new version is deployed, if the subgraph has not been published, then the N-2 version of the subgraph is archived. +In addition, when a new version is deployed, if the Subgraph has not been published, then the N-2 version of the Subgraph is archived. -Every subgraph affected with this policy has an option to bring the version in question back. +Every Subgraph affected with this policy has an option to bring the version in question back. -## Checking subgraph health +## Checking Subgraph health -If a subgraph syncs successfully, that is a good sign that it will continue to run well forever. However, new triggers on the network might cause your subgraph to hit an untested error condition or it may start to fall behind due to performance issues or issues with the node operators. +If a Subgraph syncs successfully, that is a good sign that it will continue to run well forever. However, new triggers on the network might cause your Subgraph to hit an untested error condition or it may start to fall behind due to performance issues or issues with the node operators. -Graph Node exposes a GraphQL endpoint which you can query to check the status of your subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a subgraph: +Graph Node exposes a GraphQL endpoint which you can query to check the status of your Subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a Subgraph: ```graphql { @@ -238,4 +239,4 @@ Graph Node exposes a GraphQL endpoint which you can query to check the status of } ``` -This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your subgraph to check if it is running behind. `synced` informs if the subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the subgraph. In this case, you can check the `fatalError` field for details on this error. +This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your Subgraph to check if it is running behind. `synced` informs if the Subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the Subgraph. In this case, you can check the `fatalError` field for details on this error. From 44831c3cd9c0e14c54075423ce6a6d07651f2368 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:23:08 -0500 Subject: [PATCH 0843/1789] New translations multiple-networks.mdx (Marathi) --- .../deploying/multiple-networks.mdx | 37 ++++++++++--------- 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/website/src/pages/mr/subgraphs/developing/deploying/multiple-networks.mdx b/website/src/pages/mr/subgraphs/developing/deploying/multiple-networks.mdx index 8d85033aeb01..3e34f743a6c0 100644 --- a/website/src/pages/mr/subgraphs/developing/deploying/multiple-networks.mdx +++ b/website/src/pages/mr/subgraphs/developing/deploying/multiple-networks.mdx @@ -1,12 +1,13 @@ --- title: Deploying a Subgraph to Multiple Networks +sidebarTitle: Deploying to Multiple Networks --- -This page explains how to deploy a subgraph to multiple networks. To deploy a subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a subgraph already, see [Creating a subgraph](/developing/creating-a-subgraph/). +This page explains how to deploy a Subgraph to multiple networks. To deploy a Subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a Subgraph already, see [Creating a Subgraph](/developing/creating-a-subgraph/). -## एकाधिक नेटवर्कवर सबग्राफ तैनात करणे +## Deploying the Subgraph to multiple networks -काही प्रकरणांमध्ये, तुम्हाला समान सबग्राफ एकाधिक नेटवर्कवर त्याच्या कोडची नक्कल न करता उपयोजित करायचा असेल. यासह येणारे मुख्य आव्हान हे आहे की या नेटवर्कवरील कराराचे पत्ते वेगळे आहेत. +In some cases, you will want to deploy the same Subgraph to multiple networks without duplicating all of its code. The main challenge that comes with this is that the contract addresses on these networks are different. ### Using `graph-cli` @@ -20,7 +21,7 @@ Options: --network-file Networks config file path (default: "./networks.json") ``` -You can use the `--network` option to specify a network configuration from a `json` standard file (defaults to `networks.json`) to easily update your subgraph during development. +You can use the `--network` option to specify a network configuration from a `json` standard file (defaults to `networks.json`) to easily update your Subgraph during development. > Note: The `init` command will now auto-generate a `networks.json` based on the provided information. You will then be able to update existing or add additional networks. @@ -54,7 +55,7 @@ If you don't have a `networks.json` file, you'll need to manually create one wit > Note: You don't have to specify any of the `templates` (if you have any) in the config file, only the `dataSources`. If there are any `templates` declared in the `subgraph.yaml` file, their network will be automatically updated to the one specified with the `--network` option. -Now, let's assume you want to be able to deploy your subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: +Now, let's assume you want to be able to deploy your Subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: ```yaml # ... @@ -96,7 +97,7 @@ yarn build --network sepolia yarn build --network sepolia --network-file path/to/config ``` -The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the subgraph. Your `subgraph.yaml` file now should look like this: +The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the Subgraph. Your `subgraph.yaml` file now should look like this: ```yaml # ... @@ -127,7 +128,7 @@ yarn deploy --network sepolia --network-file path/to/config One way to parameterize aspects like contract addresses using older `graph-cli` versions is to generate parts of it with a templating system like [Mustache](https://mustache.github.io/) or [Handlebars](https://handlebarsjs.com/). -To illustrate this approach, let's assume a subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: +To illustrate this approach, let's assume a Subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: ```json { @@ -179,7 +180,7 @@ In order to generate a manifest to either network, you could add two additional } ``` -To deploy this subgraph for mainnet or Sepolia you would now simply run one of the two following commands: +To deploy this Subgraph for mainnet or Sepolia you would now simply run one of the two following commands: ```sh # Mainnet: @@ -193,25 +194,25 @@ A working example of this can be found [here](https://github.com/graphprotocol/e **Note:** This approach can also be applied to more complex situations, where it is necessary to substitute more than contract addresses and network names or where generating mappings or ABIs from templates as well. -This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your subgraph to check if it is running behind. `synced` informs if the subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the subgraph. In this case, you can check the `fatalError` field for details on this error. +This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your Subgraph to check if it is running behind. `synced` informs if the Subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the Subgraph. In this case, you can check the `fatalError` field for details on this error. -## सबग्राफ स्टुडिओ सबग्राफ संग्रहण धोरण +## Subgraph Studio Subgraph archive policy -A subgraph version in Studio is archived if and only if it meets the following criteria: +A Subgraph version in Studio is archived if and only if it meets the following criteria: - The version is not published to the network (or pending publish) - The version was created 45 or more days ago -- The subgraph hasn't been queried in 30 days +- The Subgraph hasn't been queried in 30 days -In addition, when a new version is deployed, if the subgraph has not been published, then the N-2 version of the subgraph is archived. +In addition, when a new version is deployed, if the Subgraph has not been published, then the N-2 version of the Subgraph is archived. -या धोरणामुळे प्रभावित झालेल्या प्रत्येक सबग्राफला प्रश्नातील आवृत्ती परत आणण्याचा पर्याय आहे. +Every Subgraph affected with this policy has an option to bring the version in question back. -## सबग्राफ आरोग्य तपासत आहे +## Checking Subgraph health -जर सबग्राफ यशस्वीरित्या समक्रमित झाला, तर ते कायमचे चांगले चालत राहण्याचे चांगले चिन्ह आहे. तथापि, नेटवर्कवरील नवीन ट्रिगर्समुळे तुमच्या सबग्राफची चाचणी न केलेली त्रुटी स्थिती येऊ शकते किंवा कार्यप्रदर्शन समस्यांमुळे किंवा नोड ऑपरेटरमधील समस्यांमुळे ते मागे पडू शकते. +If a Subgraph syncs successfully, that is a good sign that it will continue to run well forever. However, new triggers on the network might cause your Subgraph to hit an untested error condition or it may start to fall behind due to performance issues or issues with the node operators. -Graph Node exposes a GraphQL endpoint which you can query to check the status of your subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a subgraph: +Graph Node exposes a GraphQL endpoint which you can query to check the status of your Subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a Subgraph: ```graphql { @@ -238,4 +239,4 @@ Graph Node exposes a GraphQL endpoint which you can query to check the status of } ``` -This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your subgraph to check if it is running behind. `synced` informs if the subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the subgraph. In this case, you can check the `fatalError` field for details on this error. +This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your Subgraph to check if it is running behind. `synced` informs if the Subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the Subgraph. In this case, you can check the `fatalError` field for details on this error. From 691215e59b2f5ef6f22c306794dd5d6d1af60d73 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:23:09 -0500 Subject: [PATCH 0844/1789] New translations multiple-networks.mdx (Hindi) --- .../deploying/multiple-networks.mdx | 37 ++++++++++--------- 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/website/src/pages/hi/subgraphs/developing/deploying/multiple-networks.mdx b/website/src/pages/hi/subgraphs/developing/deploying/multiple-networks.mdx index 3e03014aba51..d10ef9160dc6 100644 --- a/website/src/pages/hi/subgraphs/developing/deploying/multiple-networks.mdx +++ b/website/src/pages/hi/subgraphs/developing/deploying/multiple-networks.mdx @@ -1,12 +1,13 @@ --- title: मल्टीपल नेटवर्क्स पर एक Subgraph डिप्लॉय करना +sidebarTitle: Deploying to Multiple Networks --- -This page explains how to deploy a subgraph to multiple networks. To deploy a subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a subgraph already, see [Creating a subgraph](/developing/creating-a-subgraph/). +This page explains how to deploy a Subgraph to multiple networks. To deploy a Subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a Subgraph already, see [Creating a Subgraph](/developing/creating-a-subgraph/). -## सबग्राफ को कई नेटवर्क पर तैनात करना +## Deploying the Subgraph to multiple networks -कुछ मामलों में, आप एक ही सबग्राफ को इसके सभी कोड को डुप्लिकेट किए बिना कई नेटवर्क पर तैनात करना चाहेंगे। इसके साथ आने वाली मुख्य चुनौती यह है कि इन नेटवर्कों पर अनुबंध के पते अलग-अलग हैं। +In some cases, you will want to deploy the same Subgraph to multiple networks without duplicating all of its code. The main challenge that comes with this is that the contract addresses on these networks are different. ### graph-cli का उपयोग करते हुए @@ -21,7 +22,7 @@ This page explains how to deploy a subgraph to multiple networks. To deploy a su ``` -आप --network विकल्प का उपयोग करके एक नेटवर्क कॉन्फ़िगरेशन को एक json मानक फ़ाइल (डिफ़ॉल्ट रूप से networks.json) से निर्दिष्ट कर सकते हैं ताकि विकास के दौरान आसानी से अपने subgraph को अपडेट किया जा सके +You can use the `--network` option to specify a network configuration from a `json` standard file (defaults to `networks.json`) to easily update your Subgraph during development. > ध्यान दें: init कमांड अब दी गई जानकारी के आधार पर एक networks.json को स्वचालित रूप से उत्पन्न करेगा। इसके बाद आप मौजूदा नेटवर्क को अपडेट कर सकेंगे या अतिरिक्त नेटवर्क जोड़ सकेंगे। @@ -55,7 +56,7 @@ This page explains how to deploy a subgraph to multiple networks. To deploy a su > ध्यान दें: आपको किसी भी 'templates' (यदि आपके पास कोई है) को config फ़ाइल में निर्दिष्ट करने की आवश्यकता नहीं है, केवल 'dataSources' को। यदि 'subgraph.yaml' फ़ाइल में कोई 'templates' घोषित किए गए हैं, तो उनका नेटवर्क स्वचालित रूप से उस नेटवर्क में अपडेट हो जाएगा जो 'network' विकल्प के साथ निर्दिष्ट किया गया है। -मान लीजिए कि आप अपने subgraph को mainnet और sepolia नेटवर्क पर डिप्लॉय करना चाहते हैं, और यह आपका subgraph.yaml है: +Now, let's assume you want to be able to deploy your Subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: ```yaml # ... @@ -97,7 +98,7 @@ yarn build --network sepolia yarn build --network sepolia --network-file path/to/config ``` -build कमांड आपके subgraph.yaml को sepolia कॉन्फ़िगरेशन के साथ अपडेट करेगा और फिर से subgraph को पुनः-कंपाइल करेगा। आपका subgraph.yaml फ़ाइल अब इस प्रकार दिखना चाहिए: +The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the Subgraph. Your `subgraph.yaml` file now should look like this: ```yaml # ... @@ -128,7 +129,7 @@ yarn deploy --network sepolia --network-file path/to/config एक तरीका है 'graph-cli' के पुराने संस्करणों का उपयोग करके अनुबंध पते जैसी विशेषताओं को पैरामीटरित करना, जो कि एक टेम्पलेटिंग सिस्टम जैसे Mustache (https://mustache.github.io/) या Handlebars (https://handlebarsjs.com/) के साथ इसके कुछ हिस्सों को जनरेट करना है। -To illustrate this approach, let's assume a subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: +To illustrate this approach, let's assume a Subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: ```json { @@ -180,7 +181,7 @@ dataSources: } ``` -To deploy this subgraph for mainnet or Sepolia you would now simply run one of the two following commands: +To deploy this Subgraph for mainnet or Sepolia you would now simply run one of the two following commands: ```sh मेननेट: @@ -194,25 +195,25 @@ yarn prepare && yarn deploy यह दृष्टिकोण अधिक जटिल परिस्थितियों में भी लागू किया जा सकता है, जहां अनुबंध पते और नेटवर्क नामों के अलावा अधिक को प्रतिस्थापित करने की आवश्यकता होती है या जहां टेम्पलेट से मैपिंग या ABIs उत्पन्न करने की आवश्यकता होती है। -यह आपको chainHeadBlock देगा जिसे आप अपने subgraph पर latestBlock के साथ तुलना कर सकते हैं यह जाँचने के लिए कि क्या यह पीछे चल रहा है। synced यह बताता है कि क्या subgraph कभी श्रृंखला के साथ मेल खा गया है। health वर्तमान में दो मान ले सकता है: healthy अगर कोई त्रुटियाँ नहीं हुई हैं, या failed अगर कोई त्रुटि हुई है जिसने subgraph की प्रगति को रोक दिया है। इस स्थिति में, आप इस त्रुटि के विवरण के लिए fatalError फ़ील्ड की जांच कर सकते हैं। +This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your Subgraph to check if it is running behind. `synced` informs if the Subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the Subgraph. In this case, you can check the `fatalError` field for details on this error. -## सबग्राफ स्टूडियो सबग्राफ संग्रह नीति +## Subgraph Studio Subgraph archive policy -A subgraph version in Studio is archived if and only if it meets the following criteria: +A Subgraph version in Studio is archived if and only if it meets the following criteria: - The version is not published to the network (or pending publish) - The version was created 45 or more days ago -- The subgraph hasn't been queried in 30 days +- The Subgraph hasn't been queried in 30 days -In addition, when a new version is deployed, if the subgraph has not been published, then the N-2 version of the subgraph is archived. +In addition, when a new version is deployed, if the Subgraph has not been published, then the N-2 version of the Subgraph is archived. -इस नीति से प्रभावित प्रत्येक सबग्राफ के पास विचाराधीन संस्करण को वापस लाने का विकल्प है। +Every Subgraph affected with this policy has an option to bring the version in question back. -## सबग्राफ स्वास्थ्य की जाँच करना +## Checking Subgraph health -यदि एक सबग्राफ सफलतापूर्वक सिंक हो जाता है, तो यह एक अच्छा संकेत है कि यह हमेशा के लिए अच्छी तरह से चलता रहेगा। हालांकि, नेटवर्क पर नए ट्रिगर्स के कारण आपका सबग्राफ एक अनुपयोगी त्रुटि स्थिति में आ सकता है या यह प्रदर्शन समस्याओं या नोड ऑपरेटरों के साथ समस्याओं के कारण पीछे पड़ना शुरू हो सकता है। +If a Subgraph syncs successfully, that is a good sign that it will continue to run well forever. However, new triggers on the network might cause your Subgraph to hit an untested error condition or it may start to fall behind due to performance issues or issues with the node operators. -Graph Node एक GraphQL endpoint को उजागर करता है जिसे आप अपने subgraph की स्थिति की जांच करने के लिए क्वेरी कर सकते हैं। होस्टेड सेवा पर, यह https://api.thegraph.com/index-node/graphql पर उपलब्ध है। एक स्थानीय नोड पर, यह डिफ़ॉल्ट रूप से पोर्ट 8030/graphql पर उपलब्ध है। इस endpoint के लिए पूरा स्कीमा यहां (https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql) पाया जा सकता है। यहां एक उदाहरण क्वेरी है जो एक subgraph के वर्तमान संस्करण की स्थिति की जांच करती है: +Graph Node exposes a GraphQL endpoint which you can query to check the status of your Subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a Subgraph: ```graphql { @@ -239,4 +240,4 @@ Graph Node एक GraphQL endpoint को उजागर करता है } ``` -यह आपको chainHeadBlock देगा जिसे आप अपने subgraph पर latestBlock के साथ तुलना कर सकते हैं यह जाँचने के लिए कि क्या यह पीछे चल रहा है। synced यह बताता है कि क्या subgraph कभी श्रृंखला के साथ मेल खा गया है। health वर्तमान में दो मान ले सकता है: healthy अगर कोई त्रुटियाँ नहीं हुई हैं, या failed अगर कोई त्रुटि हुई है जिसने subgraph की प्रगति को रोक दिया है। इस स्थिति में, आप इस त्रुटि के विवरण के लिए fatalError फ़ील्ड की जांच कर सकते हैं। +This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your Subgraph to check if it is running behind. `synced` informs if the Subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the Subgraph. In this case, you can check the `fatalError` field for details on this error. From 2298a74c86d0fef166f73b9fb77a1d90899b7285 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:23:10 -0500 Subject: [PATCH 0845/1789] New translations multiple-networks.mdx (Swahili) --- .../deploying/multiple-networks.mdx | 242 ++++++++++++++++++ 1 file changed, 242 insertions(+) create mode 100644 website/src/pages/sw/subgraphs/developing/deploying/multiple-networks.mdx diff --git a/website/src/pages/sw/subgraphs/developing/deploying/multiple-networks.mdx b/website/src/pages/sw/subgraphs/developing/deploying/multiple-networks.mdx new file mode 100644 index 000000000000..3b2b1bbc70ae --- /dev/null +++ b/website/src/pages/sw/subgraphs/developing/deploying/multiple-networks.mdx @@ -0,0 +1,242 @@ +--- +title: Deploying a Subgraph to Multiple Networks +sidebarTitle: Deploying to Multiple Networks +--- + +This page explains how to deploy a Subgraph to multiple networks. To deploy a Subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a Subgraph already, see [Creating a Subgraph](/developing/creating-a-subgraph/). + +## Deploying the Subgraph to multiple networks + +In some cases, you will want to deploy the same Subgraph to multiple networks without duplicating all of its code. The main challenge that comes with this is that the contract addresses on these networks are different. + +### Using `graph-cli` + +Both `graph build` (since `v0.29.0`) and `graph deploy` (since `v0.32.0`) accept two new options: + +```sh +Options: + + ... + --network Network configuration to use from the networks config file + --network-file Networks config file path (default: "./networks.json") +``` + +You can use the `--network` option to specify a network configuration from a `json` standard file (defaults to `networks.json`) to easily update your Subgraph during development. + +> Note: The `init` command will now auto-generate a `networks.json` based on the provided information. You will then be able to update existing or add additional networks. + +If you don't have a `networks.json` file, you'll need to manually create one with the following structure: + +```json +{ + "network1": { // the network name + "dataSource1": { // the dataSource name + "address": "0xabc...", // the contract address (optional) + "startBlock": 123456 // the startBlock (optional) + }, + "dataSource2": { + "address": "0x123...", + "startBlock": 123444 + } + }, + "network2": { + "dataSource1": { + "address": "0x987...", + "startBlock": 123 + }, + "dataSource2": { + "address": "0xxyz..", + "startBlock": 456 + } + }, + ... +} +``` + +> Note: You don't have to specify any of the `templates` (if you have any) in the config file, only the `dataSources`. If there are any `templates` declared in the `subgraph.yaml` file, their network will be automatically updated to the one specified with the `--network` option. + +Now, let's assume you want to be able to deploy your Subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: + +```yaml +# ... +dataSources: + - kind: ethereum/contract + name: Gravity + network: mainnet + source: + address: '0x123...' + abi: Gravity + mapping: + kind: ethereum/events +``` + +This is what your networks config file should look like: + +```json +{ + "mainnet": { + "Gravity": { + "address": "0x123..." + } + }, + "sepolia": { + "Gravity": { + "address": "0xabc..." + } + } +} +``` + +Now we can run one of the following commands: + +```sh +# Using default networks.json file +yarn build --network sepolia + +# Using custom named file +yarn build --network sepolia --network-file path/to/config +``` + +The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the Subgraph. Your `subgraph.yaml` file now should look like this: + +```yaml +# ... +dataSources: + - kind: ethereum/contract + name: Gravity + network: sepolia + source: + address: '0xabc...' + abi: Gravity + mapping: + kind: ethereum/events +``` + +Now you are ready to `yarn deploy`. + +> Note: As mentioned earlier, since `graph-cli 0.32.0` you can directly run `yarn deploy` with the `--network` option: + +```sh +# Using default networks.json file +yarn deploy --network sepolia + +# Using custom named file +yarn deploy --network sepolia --network-file path/to/config +``` + +### Using subgraph.yaml template + +One way to parameterize aspects like contract addresses using older `graph-cli` versions is to generate parts of it with a templating system like [Mustache](https://mustache.github.io/) or [Handlebars](https://handlebarsjs.com/). + +To illustrate this approach, let's assume a Subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: + +```json +{ + "network": "mainnet", + "address": "0x123..." +} +``` + +and + +```json +{ + "network": "sepolia", + "address": "0xabc..." +} +``` + +Along with that, you would substitute the network name and addresses in the manifest with variable placeholders `{{network}}` and `{{address}}` and rename the manifest to e.g. `subgraph.template.yaml`: + +```yaml +# ... +dataSources: + - kind: ethereum/contract + name: Gravity + network: mainnet + network: {{network}} + source: + address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' + address: '{{address}}' + abi: Gravity + mapping: + kind: ethereum/events +``` + +In order to generate a manifest to either network, you could add two additional commands to `package.json` along with a dependency on `mustache`: + +```json +{ + ... + "scripts": { + ... + "prepare:mainnet": "mustache config/mainnet.json subgraph.template.yaml > subgraph.yaml", + "prepare:sepolia": "mustache config/sepolia.json subgraph.template.yaml > subgraph.yaml" + }, + "devDependencies": { + ... + "mustache": "^3.1.0" + } +} +``` + +To deploy this Subgraph for mainnet or Sepolia you would now simply run one of the two following commands: + +```sh +# Mainnet: +yarn prepare:mainnet && yarn deploy + +# Sepolia: +yarn prepare:sepolia && yarn deploy +``` + +A working example of this can be found [here](https://github.com/graphprotocol/example-subgraph/tree/371232cf68e6d814facf5e5413ad0fef65144759). + +**Note:** This approach can also be applied to more complex situations, where it is necessary to substitute more than contract addresses and network names or where generating mappings or ABIs from templates as well. + +This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your Subgraph to check if it is running behind. `synced` informs if the Subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the Subgraph. In this case, you can check the `fatalError` field for details on this error. + +## Subgraph Studio Subgraph archive policy + +A Subgraph version in Studio is archived if and only if it meets the following criteria: + +- The version is not published to the network (or pending publish) +- The version was created 45 or more days ago +- The Subgraph hasn't been queried in 30 days + +In addition, when a new version is deployed, if the Subgraph has not been published, then the N-2 version of the Subgraph is archived. + +Every Subgraph affected with this policy has an option to bring the version in question back. + +## Checking Subgraph health + +If a Subgraph syncs successfully, that is a good sign that it will continue to run well forever. However, new triggers on the network might cause your Subgraph to hit an untested error condition or it may start to fall behind due to performance issues or issues with the node operators. + +Graph Node exposes a GraphQL endpoint which you can query to check the status of your Subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a Subgraph: + +```graphql +{ + indexingStatusForCurrentVersion(subgraphName: "org/subgraph") { + synced + health + fatalError { + message + block { + number + hash + } + handler + } + chains { + chainHeadBlock { + number + } + latestBlock { + number + } + } + } +} +``` + +This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your Subgraph to check if it is running behind. `synced` informs if the Subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the Subgraph. In this case, you can check the `fatalError` field for details on this error. From e7047ea92c77276a568835bdefeefc643335aa50 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:23:11 -0500 Subject: [PATCH 0846/1789] New translations using-subgraph-studio.mdx (Romanian) --- .../deploying/using-subgraph-studio.mdx | 66 +++++++++---------- 1 file changed, 30 insertions(+), 36 deletions(-) diff --git a/website/src/pages/ro/subgraphs/developing/deploying/using-subgraph-studio.mdx b/website/src/pages/ro/subgraphs/developing/deploying/using-subgraph-studio.mdx index 634c2700ba68..77d10212c770 100644 --- a/website/src/pages/ro/subgraphs/developing/deploying/using-subgraph-studio.mdx +++ b/website/src/pages/ro/subgraphs/developing/deploying/using-subgraph-studio.mdx @@ -2,23 +2,23 @@ title: Deploying Using Subgraph Studio --- -Learn how to deploy your subgraph to Subgraph Studio. +Learn how to deploy your Subgraph to Subgraph Studio. -> Note: When you deploy a subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a subgraph, you're publishing it onchain. +> Note: When you deploy a Subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a Subgraph, you're publishing it onchain. ## Subgraph Studio Overview In [Subgraph Studio](https://thegraph.com/studio/), you can do the following: -- View a list of subgraphs you've created -- Manage, view details, and visualize the status of a specific subgraph -- Create and manage your API keys for specific subgraphs +- View a list of Subgraphs you've created +- Manage, view details, and visualize the status of a specific Subgraph +- Create and manage your API keys for specific Subgraphs - Restrict your API keys to specific domains and allow only certain Indexers to query with them -- Create your subgraph -- Deploy your subgraph using The Graph CLI -- Test your subgraph in the playground environment -- Integrate your subgraph in staging using the development query URL -- Publish your subgraph to The Graph Network +- Create your Subgraph +- Deploy your Subgraph using The Graph CLI +- Test your Subgraph in the playground environment +- Integrate your Subgraph in staging using the development query URL +- Publish your Subgraph to The Graph Network - Manage your billing ## Install The Graph CLI @@ -44,10 +44,10 @@ npm install -g @graphprotocol/graph-cli 1. Open [Subgraph Studio](https://thegraph.com/studio/). 2. Connect your wallet to sign in. - You can do this via MetaMask, Coinbase Wallet, WalletConnect, or Safe. -3. After you sign in, your unique deploy key will be displayed on your subgraph details page. - - The deploy key allows you to publish your subgraphs or manage your API keys and billing. It is unique but can be regenerated if you think it has been compromised. +3. After you sign in, your unique deploy key will be displayed on your Subgraph details page. + - The deploy key allows you to publish your Subgraphs or manage your API keys and billing. It is unique but can be regenerated if you think it has been compromised. -> Important: You need an API key to query subgraphs +> Important: You need an API key to query Subgraphs ### How to Create a Subgraph in Subgraph Studio @@ -57,31 +57,25 @@ npm install -g @graphprotocol/graph-cli ### Subgraph Compatibility with The Graph Network -In order to be supported by Indexers on The Graph Network, subgraphs must: - -- Index a [supported network](/supported-networks/) -- Must not use any of the following features: - - ipfs.cat & ipfs.map - - Non-fatal errors - - Grafting +To be supported by Indexers on The Graph Network, Subgraphs must index a [supported network](/supported-networks/). For a full list of supported and unsupported features, check out the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) repo. ## Initialize Your Subgraph -Once your subgraph has been created in Subgraph Studio, you can initialize its code through the CLI using this command: +Once your Subgraph has been created in Subgraph Studio, you can initialize its code through the CLI using this command: ```bash graph init ``` -You can find the `` value on your subgraph details page in Subgraph Studio, see image below: +You can find the `` value on your Subgraph details page in Subgraph Studio, see image below: ![Subgraph Studio - Slug](/img/doc-subgraph-slug.png) -After running `graph init`, you will be asked to input the contract address, network, and an ABI that you want to query. This will generate a new folder on your local machine with some basic code to start working on your subgraph. You can then finalize your subgraph to make sure it works as expected. +After running `graph init`, you will be asked to input the contract address, network, and an ABI that you want to query. This will generate a new folder on your local machine with some basic code to start working on your Subgraph. You can then finalize your Subgraph to make sure it works as expected. ## Graph Auth -Before you can deploy your subgraph to Subgraph Studio, you need to log into your account within the CLI. To do this, you will need your deploy key, which you can find under your subgraph details page. +Before you can deploy your Subgraph to Subgraph Studio, you need to log into your account within the CLI. To do this, you will need your deploy key, which you can find under your Subgraph details page. Then, use the following command to authenticate from the CLI: @@ -91,11 +85,11 @@ graph auth ## Deploying a Subgraph -Once you are ready, you can deploy your subgraph to Subgraph Studio. +Once you are ready, you can deploy your Subgraph to Subgraph Studio. -> Deploying a subgraph with the CLI pushes it to the Studio, where you can test it and update the metadata. This action won't publish your subgraph to the decentralized network. +> Deploying a Subgraph with the CLI pushes it to the Studio, where you can test it and update the metadata. This action won't publish your Subgraph to the decentralized network. -Use the following CLI command to deploy your subgraph: +Use the following CLI command to deploy your Subgraph: ```bash graph deploy @@ -108,30 +102,30 @@ After running this command, the CLI will ask for a version label. ## Testing Your Subgraph -After deploying, you can test your subgraph (either in Subgraph Studio or in your own app, with the deployment query URL), deploy another version, update the metadata, and publish to [Graph Explorer](https://thegraph.com/explorer) when you are ready. +After deploying, you can test your Subgraph (either in Subgraph Studio or in your own app, with the deployment query URL), deploy another version, update the metadata, and publish to [Graph Explorer](https://thegraph.com/explorer) when you are ready. -Use Subgraph Studio to check the logs on the dashboard and look for any errors with your subgraph. +Use Subgraph Studio to check the logs on the dashboard and look for any errors with your Subgraph. ## Publish Your Subgraph -In order to publish your subgraph successfully, review [publishing a subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/). +In order to publish your Subgraph successfully, review [publishing a Subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/). ## Versioning Your Subgraph with the CLI -If you want to update your subgraph, you can do the following: +If you want to update your Subgraph, you can do the following: - You can deploy a new version to Studio using the CLI (it will only be private at this point). - Once you're happy with it, you can publish your new deployment to [Graph Explorer](https://thegraph.com/explorer). -- This action will create a new version of your subgraph that Curators can start signaling on and Indexers can index. +- This action will create a new version of your Subgraph that Curators can start signaling on and Indexers can index. -You can also update your subgraph's metadata without publishing a new version. You can update your subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates subgraph details in Explorer without having to publish a new version with a new deployment. +You can also update your Subgraph's metadata without publishing a new version. You can update your Subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates Subgraph details in Explorer without having to publish a new version with a new deployment. -> Note: There are costs associated with publishing a new version of a subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). +> Note: There are costs associated with publishing a new version of a Subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your Subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). ## Automatic Archiving of Subgraph Versions -Whenever you deploy a new subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your subgraph in Subgraph Studio. +Whenever you deploy a new Subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your Subgraph in Subgraph Studio. -> Note: Previous versions of non-published subgraphs deployed to Studio will be automatically archived. +> Note: Previous versions of non-published Subgraphs deployed to Studio will be automatically archived. ![Subgraph Studio - Unarchive](/img/Unarchive.png) From 823b7116ebb64216bfa92f2563a5080ddee51be6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:23:12 -0500 Subject: [PATCH 0847/1789] New translations using-subgraph-studio.mdx (French) --- .../deploying/using-subgraph-studio.mdx | 66 +++++++++---------- 1 file changed, 30 insertions(+), 36 deletions(-) diff --git a/website/src/pages/fr/subgraphs/developing/deploying/using-subgraph-studio.mdx b/website/src/pages/fr/subgraphs/developing/deploying/using-subgraph-studio.mdx index f4e354e2bb21..6677c532e9f7 100644 --- a/website/src/pages/fr/subgraphs/developing/deploying/using-subgraph-studio.mdx +++ b/website/src/pages/fr/subgraphs/developing/deploying/using-subgraph-studio.mdx @@ -2,23 +2,23 @@ title: Déploiement en utilisant Subgraph Studio --- -Apprenez à déployer votre subgraph sur Subgraph Studio. +Learn how to deploy your Subgraph to Subgraph Studio. -> Remarque : lorsque vous déployez un subgraph, vous le transférez vers Subgraph Studio, où vous pourrez le tester. Il est important de se rappeler que le déploiement n'est pas la même chose que la publication. Lorsque vous publiez un subgraph, vous le publiez onchain. +> Note: When you deploy a Subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a Subgraph, you're publishing it onchain. ## Présentation de Subgraph Studio Dans [Subgraph Studio](https://thegraph.com/studio/), vous pouvez faire ce qui suit: -- Voir une liste des subgraphs que vous avez créés -- Gérer, voir les détails et visualiser l'état d'un subgraph spécifique -- Créez et gérez vos clés API pour des subgraphs spécifiques +- View a list of Subgraphs you've created +- Manage, view details, and visualize the status of a specific Subgraph +- Create and manage your API keys for specific Subgraphs - Limitez vos clés API à des domaines spécifiques et autorisez uniquement certains Indexers à les utiliser pour effectuer des requêtes -- Créer votre subgraph -- Déployer votre subgraph en utilisant The Graph CLI -- Tester votre subgraph dans l'environnement de test -- Intégrer votre subgraph en staging en utilisant l'URL de requête du développement -- Publier votre subgraph sur The Graph Network +- Create your Subgraph +- Deploy your Subgraph using The Graph CLI +- Test your Subgraph in the playground environment +- Integrate your Subgraph in staging using the development query URL +- Publish your Subgraph to The Graph Network - Gérer votre facturation ## Installer The Graph CLI @@ -44,10 +44,10 @@ npm install -g @graphprotocol/graph-cli 1. Ouvrez [Subgraph Studio](https://thegraph.com/studio/). 2. Connectez votre portefeuille pour vous connecter. - Vous pouvez le faire via MetaMask, Coinbase Wallet, WalletConnect ou Safe. -3. Après vous être connecté, votre clé de déploiement unique sera affichée sur la page des détails de votre subgraph. - - La clé de déploiement vous permet de publier vos subgraphs ou de gérer vos clés d'API et votre facturation. Elle est unique mais peut être régénérée si vous pensez qu'elle a été compromise. +3. After you sign in, your unique deploy key will be displayed on your Subgraph details page. + - The deploy key allows you to publish your Subgraphs or manage your API keys and billing. It is unique but can be regenerated if you think it has been compromised. -> Important : Vous avez besoin d'une clé API pour interroger les subgraphs +> Important: You need an API key to query Subgraphs ### Comment créer un subgraph dans Subgraph Studio @@ -57,31 +57,25 @@ npm install -g @graphprotocol/graph-cli ### Compatibilité des subgraphs avec le réseau de The Graph -Pour être pris en charge par les Indexeurs sur The Graph Network, les subgraphs doivent : - -- Indexer un [réseau pris en charge](/supported-networks/) -- Ne doit utiliser aucune des fonctionnalités suivantes : - - ipfs.cat & ipfs.map - - Erreurs non fatales - - La greffe +To be supported by Indexers on The Graph Network, Subgraphs must index a [supported network](/supported-networks/). For a full list of supported and unsupported features, check out the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) repo. ## Initialisez votre Subgraph -Une fois que votre subgraph a été créé dans Subgraph Studio, vous pouvez initialiser son code via la CLI en utilisant cette commande : +Once your Subgraph has been created in Subgraph Studio, you can initialize its code through the CLI using this command: ```bash graph init ``` -Vous pouvez trouver la valeur `` sur la page des détails de votre subgraph dans Subgraph Studio, voir l'image ci-dessous : +You can find the `` value on your Subgraph details page in Subgraph Studio, see image below: ![Subgraph Studio - Slug](/img/doc-subgraph-slug.png) -Après avoir exécuté la commande `graph init`, ilvous sera demandé de saisir l'adresse du contrat, le réseau, et un ABI que vous souhaitez interroger. Cela générera un nouveau dossier sur votre machine locale avec quelques codes de base pour commencer à travailler sur votre subgraph. Vous pouvez ensuite finaliser votre subgraph pour vous assurer qu'il fonctionne comme prévu. +After running `graph init`, you will be asked to input the contract address, network, and an ABI that you want to query. This will generate a new folder on your local machine with some basic code to start working on your Subgraph. You can then finalize your Subgraph to make sure it works as expected. ## Authentification The Graph -Avant de pouvoir déployer votre subgraph sur Subgraph Studio, vous devez vous connecter à votre compte via la CLI. Pour le faire, vous aurez besoin de votre clé de déploiement, que vous pouvez trouver sur la page des détails de votre subgraph. +Before you can deploy your Subgraph to Subgraph Studio, you need to log into your account within the CLI. To do this, you will need your deploy key, which you can find under your Subgraph details page. Ensuite, utilisez la commande suivante pour vous authentifier depuis la CLI : @@ -91,11 +85,11 @@ graph auth ## Déploiement d'un Subgraph -Une fois prêt, vous pouvez déployer votre subgraph sur Subgraph Studio. +Once you are ready, you can deploy your Subgraph to Subgraph Studio. -> Déployer un subgraph avec la CLI le pousse vers le Studio, où vous pouvez le tester et mettre à jour les métadonnées. Cette action ne publiera pas votre subgraph sur le réseau décentralisé. +> Deploying a Subgraph with the CLI pushes it to the Studio, where you can test it and update the metadata. This action won't publish your Subgraph to the decentralized network. -Utilisez la commande CLI suivante pour déployer votre subgraph : +Use the following CLI command to deploy your Subgraph: ```bash graph deploy @@ -108,30 +102,30 @@ Après avoir exécuté cette commande, la CLI demandera une étiquette de versio ## Tester votre Subgraph -Après le déploiement, vous pouvez tester votre subgraph (soit dans Subgraph Studio, soit dans votre propre application, avec l'URL de requête du déploiement), déployer une autre version, mettre à jour les métadonnées, et publier sur [Graph Explorer](https://thegraph.com/explorer) lorsque vous êtes prêt. +After deploying, you can test your Subgraph (either in Subgraph Studio or in your own app, with the deployment query URL), deploy another version, update the metadata, and publish to [Graph Explorer](https://thegraph.com/explorer) when you are ready. -Utilisez Subgraph Studio pour vérifier les journaux (logs) sur le tableau de bord et rechercher les erreurs éventuelles de votre subgraph. +Use Subgraph Studio to check the logs on the dashboard and look for any errors with your Subgraph. ## Publiez votre subgraph -Afin de publier votre subgraph avec succès, consultez [publier un subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/). +In order to publish your Subgraph successfully, review [publishing a Subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/). ## Versionning de votre subgraph avec le CLI -Si vous souhaitez mettre à jour votre subgraph, vous pouvez faire ce qui suit : +If you want to update your Subgraph, you can do the following: - Vous pouvez déployer une nouvelle version dans Studio en utilisant la CLI (cette version sera privée à ce stade). - Une fois que vous en êtes satisfait, vous pouvez publier votre nouveau déploiement sur [Graph Explorer](https://thegraph.com/explorer). -- Cette action créera une nouvelle version de votre subgraph sur laquelle les Curateurs pourront commencer à signaler et que les Indexeurs pourront indexer. +- This action will create a new version of your Subgraph that Curators can start signaling on and Indexers can index. -Vous pouvez également mettre à jour les métadonnées de votre subgraph sans publier de nouvelle version. Vous pouvez mettre à jour les détails de votre subgraph dans Studio (sous la photo de profil, le nom, la description, etc.) en cochant une option appelée **Mettre à jour les détails** dans [Graph Explorer](https://thegraph.com/explorer). Si cette option est cochée, une transaction onchain sera générée qui mettra à jour les détails du subgraph dans Explorer sans avoir à publier une nouvelle version avec un nouveau déploiement. +You can also update your Subgraph's metadata without publishing a new version. You can update your Subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates Subgraph details in Explorer without having to publish a new version with a new deployment. -> Remarque : la publication d'une nouvelle version d'un subgraph sur le réseau entraîne des coûts. En plus des frais de transaction, vous devez également financer une partie de la taxe de curation sur le signal de migration automatique. Vous ne pouvez pas publier une nouvelle version de votre subgraph si les Curateurs ne l'ont pas signalé. Pour plus d'informations, veuillez lire la suite [ici](/resources/roles/curating/). +> Note: There are costs associated with publishing a new version of a Subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your Subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). ## Archivage automatique des versions de subgraphs -Chaque fois que vous déployez une nouvelle version de subgraph dans Subgraph Studio, la version précédente sera archivée. Les versions archivées ne seront pas indexées/synchronisées et ne pourront donc pas être interrogées. Vous pouvez désarchiver une version de votre subgraph dans Subgraph Studio. +Whenever you deploy a new Subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your Subgraph in Subgraph Studio. -> Remarque : les versions précédentes des subgraphs non publiés mais déployés dans Studio seront automatiquement archivées. +> Note: Previous versions of non-published Subgraphs deployed to Studio will be automatically archived. ![Subgraph Studio - Unarchive](/img/Unarchive.png) From 4acaed0ad3cf07520d8db7ca948a7205f0acdb57 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:23:13 -0500 Subject: [PATCH 0848/1789] New translations using-subgraph-studio.mdx (Spanish) --- .../deploying/using-subgraph-studio.mdx | 66 +++++++++---------- 1 file changed, 30 insertions(+), 36 deletions(-) diff --git a/website/src/pages/es/subgraphs/developing/deploying/using-subgraph-studio.mdx b/website/src/pages/es/subgraphs/developing/deploying/using-subgraph-studio.mdx index 11e4e4c22495..29eed7358005 100644 --- a/website/src/pages/es/subgraphs/developing/deploying/using-subgraph-studio.mdx +++ b/website/src/pages/es/subgraphs/developing/deploying/using-subgraph-studio.mdx @@ -2,23 +2,23 @@ title: Deploying Using Subgraph Studio --- -Learn how to deploy your subgraph to Subgraph Studio. +Learn how to deploy your Subgraph to Subgraph Studio. -> Note: When you deploy a subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a subgraph, you're publishing it onchain. +> Note: When you deploy a Subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a Subgraph, you're publishing it onchain. ## Subgraph Studio Overview In [Subgraph Studio](https://thegraph.com/studio/), you can do the following: -- View a list of subgraphs you've created -- Manage, view details, and visualize the status of a specific subgraph -- Crear y gestionar sus claves API para subgrafos específicos +- View a list of Subgraphs you've created +- Manage, view details, and visualize the status of a specific Subgraph +- Create and manage your API keys for specific Subgraphs - Restrict your API keys to specific domains and allow only certain Indexers to query with them -- Create your subgraph -- Deploy your subgraph using The Graph CLI -- Test your subgraph in the playground environment -- Integrate your subgraph in staging using the development query URL -- Publish your subgraph to The Graph Network +- Create your Subgraph +- Deploy your Subgraph using The Graph CLI +- Test your Subgraph in the playground environment +- Integrate your Subgraph in staging using the development query URL +- Publish your Subgraph to The Graph Network - Manage your billing ## Install The Graph CLI @@ -44,10 +44,10 @@ npm install -g @graphprotocol/graph-cli 1. Open [Subgraph Studio](https://thegraph.com/studio/). 2. Connect your wallet to sign in. - You can do this via MetaMask, Coinbase Wallet, WalletConnect, or Safe. -3. After you sign in, your unique deploy key will be displayed on your subgraph details page. - - The deploy key allows you to publish your subgraphs or manage your API keys and billing. It is unique but can be regenerated if you think it has been compromised. +3. After you sign in, your unique deploy key will be displayed on your Subgraph details page. + - The deploy key allows you to publish your Subgraphs or manage your API keys and billing. It is unique but can be regenerated if you think it has been compromised. -> Important: You need an API key to query subgraphs +> Important: You need an API key to query Subgraphs ### How to Create a Subgraph in Subgraph Studio @@ -57,31 +57,25 @@ npm install -g @graphprotocol/graph-cli ### Compatibilidad de los Subgrafos con The Graph Network -In order to be supported by Indexers on The Graph Network, subgraphs must: - -- Index a [supported network](/supported-networks/) -- No debe utilizar ninguna de las siguientes funciones: - - ipfs.cat & ipfs.map - - Errores no fatales - - Grafting +To be supported by Indexers on The Graph Network, Subgraphs must index a [supported network](/supported-networks/). For a full list of supported and unsupported features, check out the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) repo. ## Initialize Your Subgraph -Once your subgraph has been created in Subgraph Studio, you can initialize its code through the CLI using this command: +Once your Subgraph has been created in Subgraph Studio, you can initialize its code through the CLI using this command: ```bash graph init ``` -You can find the `` value on your subgraph details page in Subgraph Studio, see image below: +You can find the `` value on your Subgraph details page in Subgraph Studio, see image below: ![Subgraph Studio - Slug](/img/doc-subgraph-slug.png) -After running `graph init`, you will be asked to input the contract address, network, and an ABI that you want to query. This will generate a new folder on your local machine with some basic code to start working on your subgraph. You can then finalize your subgraph to make sure it works as expected. +After running `graph init`, you will be asked to input the contract address, network, and an ABI that you want to query. This will generate a new folder on your local machine with some basic code to start working on your Subgraph. You can then finalize your Subgraph to make sure it works as expected. ## Graph Auth -Before you can deploy your subgraph to Subgraph Studio, you need to log into your account within the CLI. To do this, you will need your deploy key, which you can find under your subgraph details page. +Before you can deploy your Subgraph to Subgraph Studio, you need to log into your account within the CLI. To do this, you will need your deploy key, which you can find under your Subgraph details page. Then, use the following command to authenticate from the CLI: @@ -91,11 +85,11 @@ graph auth ## Deploying a Subgraph -Once you are ready, you can deploy your subgraph to Subgraph Studio. +Once you are ready, you can deploy your Subgraph to Subgraph Studio. -> Deploying a subgraph with the CLI pushes it to the Studio, where you can test it and update the metadata. This action won't publish your subgraph to the decentralized network. +> Deploying a Subgraph with the CLI pushes it to the Studio, where you can test it and update the metadata. This action won't publish your Subgraph to the decentralized network. -Use the following CLI command to deploy your subgraph: +Use the following CLI command to deploy your Subgraph: ```bash graph deploy @@ -108,30 +102,30 @@ After running this command, the CLI will ask for a version label. ## Testing Your Subgraph -After deploying, you can test your subgraph (either in Subgraph Studio or in your own app, with the deployment query URL), deploy another version, update the metadata, and publish to [Graph Explorer](https://thegraph.com/explorer) when you are ready. +After deploying, you can test your Subgraph (either in Subgraph Studio or in your own app, with the deployment query URL), deploy another version, update the metadata, and publish to [Graph Explorer](https://thegraph.com/explorer) when you are ready. -Use Subgraph Studio to check the logs on the dashboard and look for any errors with your subgraph. +Use Subgraph Studio to check the logs on the dashboard and look for any errors with your Subgraph. ## Publish Your Subgraph -In order to publish your subgraph successfully, review [publishing a subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/). +In order to publish your Subgraph successfully, review [publishing a Subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/). ## Versioning Your Subgraph with the CLI -If you want to update your subgraph, you can do the following: +If you want to update your Subgraph, you can do the following: - You can deploy a new version to Studio using the CLI (it will only be private at this point). - Once you're happy with it, you can publish your new deployment to [Graph Explorer](https://thegraph.com/explorer). -- This action will create a new version of your subgraph that Curators can start signaling on and Indexers can index. +- This action will create a new version of your Subgraph that Curators can start signaling on and Indexers can index. -You can also update your subgraph's metadata without publishing a new version. You can update your subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates subgraph details in Explorer without having to publish a new version with a new deployment. +You can also update your Subgraph's metadata without publishing a new version. You can update your Subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates Subgraph details in Explorer without having to publish a new version with a new deployment. -> Note: There are costs associated with publishing a new version of a subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). +> Note: There are costs associated with publishing a new version of a Subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your Subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). ## Archivado Automático de Versiones de Subgrafos -Whenever you deploy a new subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your subgraph in Subgraph Studio. +Whenever you deploy a new Subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your Subgraph in Subgraph Studio. -> Note: Previous versions of non-published subgraphs deployed to Studio will be automatically archived. +> Note: Previous versions of non-published Subgraphs deployed to Studio will be automatically archived. ![Subgraph Studio - Unarchive](/img/Unarchive.png) From 3615af1325e4d6df9dfefecfac532dbf39cac844 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:23:14 -0500 Subject: [PATCH 0849/1789] New translations using-subgraph-studio.mdx (Arabic) --- .../deploying/using-subgraph-studio.mdx | 66 +++++++++---------- 1 file changed, 30 insertions(+), 36 deletions(-) diff --git a/website/src/pages/ar/subgraphs/developing/deploying/using-subgraph-studio.mdx b/website/src/pages/ar/subgraphs/developing/deploying/using-subgraph-studio.mdx index d8880ef1a196..1e0826bfe148 100644 --- a/website/src/pages/ar/subgraphs/developing/deploying/using-subgraph-studio.mdx +++ b/website/src/pages/ar/subgraphs/developing/deploying/using-subgraph-studio.mdx @@ -2,23 +2,23 @@ title: Deploying Using Subgraph Studio --- -Learn how to deploy your subgraph to Subgraph Studio. +Learn how to deploy your Subgraph to Subgraph Studio. -> Note: When you deploy a subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a subgraph, you're publishing it onchain. +> Note: When you deploy a Subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a Subgraph, you're publishing it onchain. ## Subgraph Studio Overview In [Subgraph Studio](https://thegraph.com/studio/), you can do the following: -- View a list of subgraphs you've created -- Manage, view details, and visualize the status of a specific subgraph -- إنشاء وإدارة مفاتيح API الخاصة بك لـ subgraphs محددة +- View a list of Subgraphs you've created +- Manage, view details, and visualize the status of a specific Subgraph +- Create and manage your API keys for specific Subgraphs - Restrict your API keys to specific domains and allow only certain Indexers to query with them -- Create your subgraph -- Deploy your subgraph using The Graph CLI -- Test your subgraph in the playground environment -- Integrate your subgraph in staging using the development query URL -- Publish your subgraph to The Graph Network +- Create your Subgraph +- Deploy your Subgraph using The Graph CLI +- Test your Subgraph in the playground environment +- Integrate your Subgraph in staging using the development query URL +- Publish your Subgraph to The Graph Network - Manage your billing ## Install The Graph CLI @@ -44,10 +44,10 @@ npm install -g @graphprotocol/graph-cli 1. Open [Subgraph Studio](https://thegraph.com/studio/). 2. Connect your wallet to sign in. - You can do this via MetaMask, Coinbase Wallet, WalletConnect, or Safe. -3. After you sign in, your unique deploy key will be displayed on your subgraph details page. - - The deploy key allows you to publish your subgraphs or manage your API keys and billing. It is unique but can be regenerated if you think it has been compromised. +3. After you sign in, your unique deploy key will be displayed on your Subgraph details page. + - The deploy key allows you to publish your Subgraphs or manage your API keys and billing. It is unique but can be regenerated if you think it has been compromised. -> Important: You need an API key to query subgraphs +> Important: You need an API key to query Subgraphs ### How to Create a Subgraph in Subgraph Studio @@ -57,31 +57,25 @@ npm install -g @graphprotocol/graph-cli ### توافق الـ Subgraph مع شبكة The Graph -In order to be supported by Indexers on The Graph Network, subgraphs must: - -- Index a [supported network](/supported-networks/) -- يجب ألا تستخدم أيًا من الميزات التالية: - - ipfs.cat & ipfs.map - - أخطاء غير فادحة - - Grafting +To be supported by Indexers on The Graph Network, Subgraphs must index a [supported network](/supported-networks/). For a full list of supported and unsupported features, check out the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) repo. ## Initialize Your Subgraph -Once your subgraph has been created in Subgraph Studio, you can initialize its code through the CLI using this command: +Once your Subgraph has been created in Subgraph Studio, you can initialize its code through the CLI using this command: ```bash graph init ``` -You can find the `` value on your subgraph details page in Subgraph Studio, see image below: +You can find the `` value on your Subgraph details page in Subgraph Studio, see image below: ![Subgraph Studio - Slug](/img/doc-subgraph-slug.png) -After running `graph init`, you will be asked to input the contract address, network, and an ABI that you want to query. This will generate a new folder on your local machine with some basic code to start working on your subgraph. You can then finalize your subgraph to make sure it works as expected. +After running `graph init`, you will be asked to input the contract address, network, and an ABI that you want to query. This will generate a new folder on your local machine with some basic code to start working on your Subgraph. You can then finalize your Subgraph to make sure it works as expected. ## Graph Auth -Before you can deploy your subgraph to Subgraph Studio, you need to log into your account within the CLI. To do this, you will need your deploy key, which you can find under your subgraph details page. +Before you can deploy your Subgraph to Subgraph Studio, you need to log into your account within the CLI. To do this, you will need your deploy key, which you can find under your Subgraph details page. Then, use the following command to authenticate from the CLI: @@ -91,11 +85,11 @@ graph auth ## Deploying a Subgraph -Once you are ready, you can deploy your subgraph to Subgraph Studio. +Once you are ready, you can deploy your Subgraph to Subgraph Studio. -> Deploying a subgraph with the CLI pushes it to the Studio, where you can test it and update the metadata. This action won't publish your subgraph to the decentralized network. +> Deploying a Subgraph with the CLI pushes it to the Studio, where you can test it and update the metadata. This action won't publish your Subgraph to the decentralized network. -Use the following CLI command to deploy your subgraph: +Use the following CLI command to deploy your Subgraph: ```bash graph deploy @@ -108,30 +102,30 @@ After running this command, the CLI will ask for a version label. ## Testing Your Subgraph -After deploying, you can test your subgraph (either in Subgraph Studio or in your own app, with the deployment query URL), deploy another version, update the metadata, and publish to [Graph Explorer](https://thegraph.com/explorer) when you are ready. +After deploying, you can test your Subgraph (either in Subgraph Studio or in your own app, with the deployment query URL), deploy another version, update the metadata, and publish to [Graph Explorer](https://thegraph.com/explorer) when you are ready. -Use Subgraph Studio to check the logs on the dashboard and look for any errors with your subgraph. +Use Subgraph Studio to check the logs on the dashboard and look for any errors with your Subgraph. ## Publish Your Subgraph -In order to publish your subgraph successfully, review [publishing a subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/). +In order to publish your Subgraph successfully, review [publishing a Subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/). ## Versioning Your Subgraph with the CLI -If you want to update your subgraph, you can do the following: +If you want to update your Subgraph, you can do the following: - You can deploy a new version to Studio using the CLI (it will only be private at this point). - Once you're happy with it, you can publish your new deployment to [Graph Explorer](https://thegraph.com/explorer). -- This action will create a new version of your subgraph that Curators can start signaling on and Indexers can index. +- This action will create a new version of your Subgraph that Curators can start signaling on and Indexers can index. -You can also update your subgraph's metadata without publishing a new version. You can update your subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates subgraph details in Explorer without having to publish a new version with a new deployment. +You can also update your Subgraph's metadata without publishing a new version. You can update your Subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates Subgraph details in Explorer without having to publish a new version with a new deployment. -> Note: There are costs associated with publishing a new version of a subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). +> Note: There are costs associated with publishing a new version of a Subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your Subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). ## الأرشفة التلقائية لإصدارات الـ Subgraph -Whenever you deploy a new subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your subgraph in Subgraph Studio. +Whenever you deploy a new Subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your Subgraph in Subgraph Studio. -> Note: Previous versions of non-published subgraphs deployed to Studio will be automatically archived. +> Note: Previous versions of non-published Subgraphs deployed to Studio will be automatically archived. ![Subgraph Studio - Unarchive](/img/Unarchive.png) From c3d6aae65adfaa8e10a6dd6b1565cbb6dde3edf2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:23:15 -0500 Subject: [PATCH 0850/1789] New translations using-subgraph-studio.mdx (Czech) --- .../deploying/using-subgraph-studio.mdx | 66 +++++++++---------- 1 file changed, 30 insertions(+), 36 deletions(-) diff --git a/website/src/pages/cs/subgraphs/developing/deploying/using-subgraph-studio.mdx b/website/src/pages/cs/subgraphs/developing/deploying/using-subgraph-studio.mdx index 7c53f174237a..14be0175123c 100644 --- a/website/src/pages/cs/subgraphs/developing/deploying/using-subgraph-studio.mdx +++ b/website/src/pages/cs/subgraphs/developing/deploying/using-subgraph-studio.mdx @@ -2,23 +2,23 @@ title: Deploying Using Subgraph Studio --- -Learn how to deploy your subgraph to Subgraph Studio. +Learn how to deploy your Subgraph to Subgraph Studio. -> Note: When you deploy a subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a subgraph, you're publishing it onchain. +> Note: When you deploy a Subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a Subgraph, you're publishing it onchain. ## Subgraph Studio Overview In [Subgraph Studio](https://thegraph.com/studio/), you can do the following: -- View a list of subgraphs you've created -- Manage, view details, and visualize the status of a specific subgraph -- Vytváření a správa klíčů API pro konkrétní podgrafy +- View a list of Subgraphs you've created +- Manage, view details, and visualize the status of a specific Subgraph +- Create and manage your API keys for specific Subgraphs - Restrict your API keys to specific domains and allow only certain Indexers to query with them -- Create your subgraph -- Deploy your subgraph using The Graph CLI -- Test your subgraph in the playground environment -- Integrate your subgraph in staging using the development query URL -- Publish your subgraph to The Graph Network +- Create your Subgraph +- Deploy your Subgraph using The Graph CLI +- Test your Subgraph in the playground environment +- Integrate your Subgraph in staging using the development query URL +- Publish your Subgraph to The Graph Network - Manage your billing ## Install The Graph CLI @@ -44,10 +44,10 @@ npm install -g @graphprotocol/graph-cli 1. Open [Subgraph Studio](https://thegraph.com/studio/). 2. Connect your wallet to sign in. - You can do this via MetaMask, Coinbase Wallet, WalletConnect, or Safe. -3. After you sign in, your unique deploy key will be displayed on your subgraph details page. - - The deploy key allows you to publish your subgraphs or manage your API keys and billing. It is unique but can be regenerated if you think it has been compromised. +3. After you sign in, your unique deploy key will be displayed on your Subgraph details page. + - The deploy key allows you to publish your Subgraphs or manage your API keys and billing. It is unique but can be regenerated if you think it has been compromised. -> Important: You need an API key to query subgraphs +> Important: You need an API key to query Subgraphs ### Jak vytvořit podgraf v Podgraf Studio @@ -57,31 +57,25 @@ npm install -g @graphprotocol/graph-cli ### Kompatibilita podgrafů se sítí grafů -Aby mohly být podgrafy podporovány indexátory v síti grafů, musí: - -- Index a [supported network](/supported-networks/) -- Nesmí používat žádnou z následujících funkcí: - - ipfs.cat & ipfs.map - - Nefatální - - Roubování +To be supported by Indexers on The Graph Network, Subgraphs must index a [supported network](/supported-networks/). For a full list of supported and unsupported features, check out the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) repo. ## Initialize Your Subgraph -Once your subgraph has been created in Subgraph Studio, you can initialize its code through the CLI using this command: +Once your Subgraph has been created in Subgraph Studio, you can initialize its code through the CLI using this command: ```bash graph init ``` -You can find the `` value on your subgraph details page in Subgraph Studio, see image below: +You can find the `` value on your Subgraph details page in Subgraph Studio, see image below: ![Subgraph Studio - Slug](/img/doc-subgraph-slug.png) -After running `graph init`, you will be asked to input the contract address, network, and an ABI that you want to query. This will generate a new folder on your local machine with some basic code to start working on your subgraph. You can then finalize your subgraph to make sure it works as expected. +After running `graph init`, you will be asked to input the contract address, network, and an ABI that you want to query. This will generate a new folder on your local machine with some basic code to start working on your Subgraph. You can then finalize your Subgraph to make sure it works as expected. ## Autorizace grafu -Before you can deploy your subgraph to Subgraph Studio, you need to log into your account within the CLI. To do this, you will need your deploy key, which you can find under your subgraph details page. +Before you can deploy your Subgraph to Subgraph Studio, you need to log into your account within the CLI. To do this, you will need your deploy key, which you can find under your Subgraph details page. Then, use the following command to authenticate from the CLI: @@ -91,11 +85,11 @@ graph auth ## Deploying a Subgraph -Once you are ready, you can deploy your subgraph to Subgraph Studio. +Once you are ready, you can deploy your Subgraph to Subgraph Studio. -> Deploying a subgraph with the CLI pushes it to the Studio, where you can test it and update the metadata. This action won't publish your subgraph to the decentralized network. +> Deploying a Subgraph with the CLI pushes it to the Studio, where you can test it and update the metadata. This action won't publish your Subgraph to the decentralized network. -Use the following CLI command to deploy your subgraph: +Use the following CLI command to deploy your Subgraph: ```bash graph deploy @@ -108,30 +102,30 @@ After running this command, the CLI will ask for a version label. ## Testing Your Subgraph -After deploying, you can test your subgraph (either in Subgraph Studio or in your own app, with the deployment query URL), deploy another version, update the metadata, and publish to [Graph Explorer](https://thegraph.com/explorer) when you are ready. +After deploying, you can test your Subgraph (either in Subgraph Studio or in your own app, with the deployment query URL), deploy another version, update the metadata, and publish to [Graph Explorer](https://thegraph.com/explorer) when you are ready. -Use Subgraph Studio to check the logs on the dashboard and look for any errors with your subgraph. +Use Subgraph Studio to check the logs on the dashboard and look for any errors with your Subgraph. ## Publish Your Subgraph -In order to publish your subgraph successfully, review [publishing a subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/). +In order to publish your Subgraph successfully, review [publishing a Subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/). ## Versioning Your Subgraph with the CLI -If you want to update your subgraph, you can do the following: +If you want to update your Subgraph, you can do the following: - You can deploy a new version to Studio using the CLI (it will only be private at this point). - Once you're happy with it, you can publish your new deployment to [Graph Explorer](https://thegraph.com/explorer). -- This action will create a new version of your subgraph that Curators can start signaling on and Indexers can index. +- This action will create a new version of your Subgraph that Curators can start signaling on and Indexers can index. -You can also update your subgraph's metadata without publishing a new version. You can update your subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates subgraph details in Explorer without having to publish a new version with a new deployment. +You can also update your Subgraph's metadata without publishing a new version. You can update your Subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates Subgraph details in Explorer without having to publish a new version with a new deployment. -> Note: There are costs associated with publishing a new version of a subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). +> Note: There are costs associated with publishing a new version of a Subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your Subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). ## Automatická archivace verzí podgrafů -Whenever you deploy a new subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your subgraph in Subgraph Studio. +Whenever you deploy a new Subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your Subgraph in Subgraph Studio. -> Note: Previous versions of non-published subgraphs deployed to Studio will be automatically archived. +> Note: Previous versions of non-published Subgraphs deployed to Studio will be automatically archived. ![Subgraph Studio - Unarchive](/img/Unarchive.png) From c750d5eeb49a32718fa4b08f7dd2b008060e9f82 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:23:16 -0500 Subject: [PATCH 0851/1789] New translations using-subgraph-studio.mdx (German) --- .../deploying/using-subgraph-studio.mdx | 66 +++++++++---------- 1 file changed, 30 insertions(+), 36 deletions(-) diff --git a/website/src/pages/de/subgraphs/developing/deploying/using-subgraph-studio.mdx b/website/src/pages/de/subgraphs/developing/deploying/using-subgraph-studio.mdx index b559bcdff049..300d81453b99 100644 --- a/website/src/pages/de/subgraphs/developing/deploying/using-subgraph-studio.mdx +++ b/website/src/pages/de/subgraphs/developing/deploying/using-subgraph-studio.mdx @@ -2,23 +2,23 @@ title: Bereitstellung mit Subgraph Studio --- -Erfahren Sie, wie Sie Ihren Subgraphen in Subgraph Studio bereitstellen können. +Learn how to deploy your Subgraph to Subgraph Studio. -> Hinweis: Wenn Sie einen Subgraphen bereitstellen, schieben Sie ihn zu Subgraph Studio, wo Sie ihn testen können. Es ist wichtig, daran zu denken, dass Bereitstellen nicht dasselbe ist wie Veröffentlichen. Wenn Sie einen Subgraphen veröffentlichen, dann veröffentlichen Sie ihn onchain. +> Note: When you deploy a Subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a Subgraph, you're publishing it onchain. ## Subgraph Studio Überblick In [Subgraph Studio] (https://thegraph.com/studio/) können Sie Folgendes tun: -- Eine Liste der von Ihnen erstellten Subgraphen anzeigen -- Verwalten, Details anzeigen und den Status eines bestimmten Subgraphen visualisieren -- Ihre API-Schlüssel für bestimmte Subgraphen erstellen und verwalten +- View a list of Subgraphs you've created +- Manage, view details, and visualize the status of a specific Subgraph +- Create and manage your API keys for specific Subgraphs - Ihre API-Schlüssel auf bestimmte Domains einschränken und nur bestimmten Indexern die Abfrage mit diesen Schlüsseln erlauben -- Ihren Subgraphen erstellen -- Ihren Subgraphen mit The Graph CLI verteilen -- Ihren Subgraphen in der „Playground“-Umgebung testen -- Ihren Subgraphen in Staging unter Verwendung der Entwicklungsabfrage-URL integrieren -- Ihren Subgraphen auf The Graph Network veröffentlichen +- Create your Subgraph +- Deploy your Subgraph using The Graph CLI +- Test your Subgraph in the playground environment +- Integrate your Subgraph in staging using the development query URL +- Publish your Subgraph to The Graph Network - Ihre Rechnungen verwalten ## Installieren der The Graph-CLI @@ -44,10 +44,10 @@ npm install -g @graphprotocol/graph-cli 1. Öffnen Sie [Subgraph Studio] (https://thegraph.com/studio/). 2. Verbinden Sie Ihre Wallet, um sich anzumelden. - Sie können dies über MetaMask, Coinbase Wallet, WalletConnect oder Safe tun. -3. Nachdem Sie sich angemeldet haben, wird Ihr eindeutiger Verteilungsschlüssel auf der Detailseite Ihres Subgraphen angezeigt. - - Mit dem Bereitstellungsschlüssel können Sie Ihre Subgraphen veröffentlichen oder Ihre API-Schlüssel und Abrechnungen verwalten. Er ist einmalig, kann aber neu generiert werden, wenn Sie glauben, dass er kompromittiert wurde. +3. After you sign in, your unique deploy key will be displayed on your Subgraph details page. + - The deploy key allows you to publish your Subgraphs or manage your API keys and billing. It is unique but can be regenerated if you think it has been compromised. -> Wichtig: Sie benötigen einen API-Schlüssel, um Subgraphen abzufragen +> Important: You need an API key to query Subgraphs ### So erstellen Sie einen Subgraphen in Subgraph Studio @@ -57,31 +57,25 @@ npm install -g @graphprotocol/graph-cli ### Kompatibilität von Subgraphen mit dem The Graph Network -Um von Indexern auf The Graph Network unterstützt zu werden, müssen Subgraphen: - -- Ein [unterstütztes Netzwerk](/supported-networks/) indizieren -- Keine der folgenden Funktionen verwenden: - - ipfs.cat & ipfs.map - - Non-fatal errors - - Grafting +To be supported by Indexers on The Graph Network, Subgraphs must index a [supported network](/supported-networks/). For a full list of supported and unsupported features, check out the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) repo. ## Initialisieren Ihres Subgraphen -Sobald Ihr Subgraph in Subgraph Studio erstellt wurde, können Sie seinen Code über die CLI mit diesem Befehl initialisieren: +Once your Subgraph has been created in Subgraph Studio, you can initialize its code through the CLI using this command: ```bash graph init ``` -Sie finden den Wert `` auf der Detailseite Ihres Subgraphen in Subgraph Studio, siehe Abbildung unten: +You can find the `` value on your Subgraph details page in Subgraph Studio, see image below: ![Subgraph Studio - Slug](/img/doc-subgraph-slug.png) -Nachdem Sie `graph init` ausgeführt haben, werden Sie aufgefordert, die Vertragsadresse, das Netzwerk und eine ABI einzugeben, die Sie abfragen möchten. Daraufhin wird ein neuer Ordner auf Ihrem lokalen Computer erstellt, der einige grundlegende Code enthält, um mit der Arbeit an Ihrem Subgraphen zu beginnen. Anschließend können Sie Ihren Subgraphen fertigstellen, um sicherzustellen, dass er wie erwartet funktioniert. +After running `graph init`, you will be asked to input the contract address, network, and an ABI that you want to query. This will generate a new folder on your local machine with some basic code to start working on your Subgraph. You can then finalize your Subgraph to make sure it works as expected. ## Graph Auth -Bevor Sie Ihren Subgraphen in Subgraph Studio bereitstellen können, müssen Sie sich bei Ihrem Konto in der CLI anmelden. Dazu benötigen Sie Ihren Bereitstellungsschlüssel, den Sie auf der Seite mit den Details Ihres Subgraphen finden. +Before you can deploy your Subgraph to Subgraph Studio, you need to log into your account within the CLI. To do this, you will need your deploy key, which you can find under your Subgraph details page. Verwenden Sie dann den folgenden Befehl, um sich über die CLI zu authentifizieren: @@ -91,11 +85,11 @@ graph auth ## Bereitstellen eines Subgraphen -Sobald Sie fertig sind, können Sie Ihren Subgraphen an Subgraph Studio übergeben. +Once you are ready, you can deploy your Subgraph to Subgraph Studio. -> Wenn Sie einen Subgraphen über die Befehlszeilenschnittstelle bereitstellen, wird er in das Studio übertragen, wo Sie ihn testen und die Metadaten aktualisieren können. Bei dieser Aktion wird Ihr Subgraph nicht im dezentralen Netzwerk veröffentlicht. +> Deploying a Subgraph with the CLI pushes it to the Studio, where you can test it and update the metadata. This action won't publish your Subgraph to the decentralized network. -Verwenden Sie den folgenden CLI-Befehl, um Ihren Subgraphen bereitzustellen: +Use the following CLI command to deploy your Subgraph: ```bash graph deploy @@ -108,30 +102,30 @@ Nach der Ausführung dieses Befehls wird die CLI nach einer Versionsbezeichnung ## Testen Ihres Subgraphen -Nach der Bereitstellung können Sie Ihren Subgraphen testen (entweder in Subgraph Studio oder in Ihrer eigenen Anwendung, mit der Bereitstellungsabfrage-URL), eine weitere Version bereitstellen, die Metadaten aktualisieren und im [Graph Explorer](https://thegraph.com/explorer) veröffentlichen, wenn Sie bereit sind. +After deploying, you can test your Subgraph (either in Subgraph Studio or in your own app, with the deployment query URL), deploy another version, update the metadata, and publish to [Graph Explorer](https://thegraph.com/explorer) when you are ready. -Verwenden Sie Subgraph Studio, um die Protokolle auf dem Dashboard zu überprüfen und nach Fehlern in Ihrem Subgraphen zu suchen. +Use Subgraph Studio to check the logs on the dashboard and look for any errors with your Subgraph. ## Veröffentlichung Ihres Subgraphen -Um Ihren Subgraphen erfolgreich zu veröffentlichen, lesen Sie [Veröffentlichen eines Subgraphen](/subgraphs/developing/publishing/publishing-a-subgraph/). +In order to publish your Subgraph successfully, review [publishing a Subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/). ## Versionierung Ihres Subgraphen mit der CLI -Wenn Sie Ihren Subgraphen aktualisieren möchten, können Sie wie folgt vorgehen: +If you want to update your Subgraph, you can do the following: - Sie können eine neue Version über die Befehlszeilenschnittstelle (CLI) in Studio bereitstellen (zu diesem Zeitpunkt ist sie nur privat). - Wenn Sie damit zufrieden sind, können Sie Ihre neue Bereitstellung im [Graph Explorer] (https://thegraph.com/explorer) veröffentlichen. -- Mit dieser Aktion wird eine neue Version Ihres Subgraphen erstellt, die von Kuratoren mit Signalen versehen und von Indexern indiziert werden kann. +- This action will create a new version of your Subgraph that Curators can start signaling on and Indexers can index. -Sie können auch die Metadaten Ihres Subgraphen aktualisieren, ohne eine neue Version zu veröffentlichen. Sie können Ihre Subgraph-Details in Studio (unter dem Profilbild, dem Namen, der Beschreibung usw.) aktualisieren, indem Sie eine Option namens **Details aktualisieren** im [Graph Explorer] (https://thegraph.com/explorer) aktivieren. Wenn diese Option aktiviert ist, wird eine Onchain-Transaktion generiert, die die Subgraph-Details im Explorer aktualisiert, ohne dass eine neue Version mit einer neuen Bereitstellung veröffentlicht werden muss. +You can also update your Subgraph's metadata without publishing a new version. You can update your Subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates Subgraph details in Explorer without having to publish a new version with a new deployment. -> Hinweis: Die Veröffentlichung einer neuen Version eines Subgraphen im Netz ist mit Kosten verbunden. Zusätzlich zu den Transaktionsgebühren müssen Sie auch einen Teil der Kurationssteuer für das Auto-Migrations-Signal finanzieren. Sie können keine neue Version Ihres Subgraphen veröffentlichen, wenn Kuratoren nicht darauf signalisiert haben. Für weitere Informationen, lesen Sie bitte [hier](/resources/roles/curating/). +> Note: There are costs associated with publishing a new version of a Subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your Subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). ## Automatische Archivierung von Subgraph-Versionen -Immer wenn Sie eine neue Subgraph-Version in Subgraph Studio bereitstellen, wird die vorherige Version archiviert. Archivierte Versionen werden nicht indiziert/synchronisiert und können daher nicht abgefragt werden. Sie können die Archivierung einer archivierten Version Ihres Subgraphen in Subgraph Studio dearchivieren. +Whenever you deploy a new Subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your Subgraph in Subgraph Studio. -> Hinweis: Frühere Versionen von nicht veröffentlichten Subgraphen, die in Studio bereitgestellt wurden, werden automatisch archiviert. +> Note: Previous versions of non-published Subgraphs deployed to Studio will be automatically archived. ![Subgraph Studio - Unarchive](/img/Unarchive.png) From 6dec7da9c9459d226225203d22df59cf9a6ee02d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:23:17 -0500 Subject: [PATCH 0852/1789] New translations using-subgraph-studio.mdx (Italian) --- .../deploying/using-subgraph-studio.mdx | 66 +++++++++---------- 1 file changed, 30 insertions(+), 36 deletions(-) diff --git a/website/src/pages/it/subgraphs/developing/deploying/using-subgraph-studio.mdx b/website/src/pages/it/subgraphs/developing/deploying/using-subgraph-studio.mdx index 6d7e019d9d6f..3a07d7d50b24 100644 --- a/website/src/pages/it/subgraphs/developing/deploying/using-subgraph-studio.mdx +++ b/website/src/pages/it/subgraphs/developing/deploying/using-subgraph-studio.mdx @@ -2,23 +2,23 @@ title: Deploying Using Subgraph Studio --- -Learn how to deploy your subgraph to Subgraph Studio. +Learn how to deploy your Subgraph to Subgraph Studio. -> Note: When you deploy a subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a subgraph, you're publishing it onchain. +> Note: When you deploy a Subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a Subgraph, you're publishing it onchain. ## Subgraph Studio Overview In [Subgraph Studio](https://thegraph.com/studio/), you can do the following: -- View a list of subgraphs you've created -- Manage, view details, and visualize the status of a specific subgraph -- Creare e gestire le chiavi API per specifici subgraph +- View a list of Subgraphs you've created +- Manage, view details, and visualize the status of a specific Subgraph +- Create and manage your API keys for specific Subgraphs - Restrict your API keys to specific domains and allow only certain Indexers to query with them -- Create your subgraph -- Deploy your subgraph using The Graph CLI -- Test your subgraph in the playground environment -- Integrate your subgraph in staging using the development query URL -- Publish your subgraph to The Graph Network +- Create your Subgraph +- Deploy your Subgraph using The Graph CLI +- Test your Subgraph in the playground environment +- Integrate your Subgraph in staging using the development query URL +- Publish your Subgraph to The Graph Network - Manage your billing ## Install The Graph CLI @@ -44,10 +44,10 @@ npm install -g @graphprotocol/graph-cli 1. Open [Subgraph Studio](https://thegraph.com/studio/). 2. Connect your wallet to sign in. - You can do this via MetaMask, Coinbase Wallet, WalletConnect, or Safe. -3. After you sign in, your unique deploy key will be displayed on your subgraph details page. - - The deploy key allows you to publish your subgraphs or manage your API keys and billing. It is unique but can be regenerated if you think it has been compromised. +3. After you sign in, your unique deploy key will be displayed on your Subgraph details page. + - The deploy key allows you to publish your Subgraphs or manage your API keys and billing. It is unique but can be regenerated if you think it has been compromised. -> Important: You need an API key to query subgraphs +> Important: You need an API key to query Subgraphs ### Come creare un subgraph nel Subgraph Studio @@ -57,31 +57,25 @@ npm install -g @graphprotocol/graph-cli ### Compatibilità del subgraph con The Graph Network -In order to be supported by Indexers on The Graph Network, subgraphs must: - -- Index a [supported network](/supported-networks/) -- Non deve utilizzare nessuna delle seguenti funzioni: - - ipfs.cat & ipfs.map - - Errori non fatali - - Grafting +To be supported by Indexers on The Graph Network, Subgraphs must index a [supported network](/supported-networks/). For a full list of supported and unsupported features, check out the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) repo. ## Initialize Your Subgraph -Once your subgraph has been created in Subgraph Studio, you can initialize its code through the CLI using this command: +Once your Subgraph has been created in Subgraph Studio, you can initialize its code through the CLI using this command: ```bash graph init ``` -You can find the `` value on your subgraph details page in Subgraph Studio, see image below: +You can find the `` value on your Subgraph details page in Subgraph Studio, see image below: ![Subgraph Studio - Slug](/img/doc-subgraph-slug.png) -After running `graph init`, you will be asked to input the contract address, network, and an ABI that you want to query. This will generate a new folder on your local machine with some basic code to start working on your subgraph. You can then finalize your subgraph to make sure it works as expected. +After running `graph init`, you will be asked to input the contract address, network, and an ABI that you want to query. This will generate a new folder on your local machine with some basic code to start working on your Subgraph. You can then finalize your Subgraph to make sure it works as expected. ## Graph Auth -Before you can deploy your subgraph to Subgraph Studio, you need to log into your account within the CLI. To do this, you will need your deploy key, which you can find under your subgraph details page. +Before you can deploy your Subgraph to Subgraph Studio, you need to log into your account within the CLI. To do this, you will need your deploy key, which you can find under your Subgraph details page. Then, use the following command to authenticate from the CLI: @@ -91,11 +85,11 @@ graph auth ## Deploying a Subgraph -Once you are ready, you can deploy your subgraph to Subgraph Studio. +Once you are ready, you can deploy your Subgraph to Subgraph Studio. -> Deploying a subgraph with the CLI pushes it to the Studio, where you can test it and update the metadata. This action won't publish your subgraph to the decentralized network. +> Deploying a Subgraph with the CLI pushes it to the Studio, where you can test it and update the metadata. This action won't publish your Subgraph to the decentralized network. -Use the following CLI command to deploy your subgraph: +Use the following CLI command to deploy your Subgraph: ```bash graph deploy @@ -108,30 +102,30 @@ After running this command, the CLI will ask for a version label. ## Testing Your Subgraph -After deploying, you can test your subgraph (either in Subgraph Studio or in your own app, with the deployment query URL), deploy another version, update the metadata, and publish to [Graph Explorer](https://thegraph.com/explorer) when you are ready. +After deploying, you can test your Subgraph (either in Subgraph Studio or in your own app, with the deployment query URL), deploy another version, update the metadata, and publish to [Graph Explorer](https://thegraph.com/explorer) when you are ready. -Use Subgraph Studio to check the logs on the dashboard and look for any errors with your subgraph. +Use Subgraph Studio to check the logs on the dashboard and look for any errors with your Subgraph. ## Publish Your Subgraph -In order to publish your subgraph successfully, review [publishing a subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/). +In order to publish your Subgraph successfully, review [publishing a Subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/). ## Versioning Your Subgraph with the CLI -If you want to update your subgraph, you can do the following: +If you want to update your Subgraph, you can do the following: - You can deploy a new version to Studio using the CLI (it will only be private at this point). - Once you're happy with it, you can publish your new deployment to [Graph Explorer](https://thegraph.com/explorer). -- This action will create a new version of your subgraph that Curators can start signaling on and Indexers can index. +- This action will create a new version of your Subgraph that Curators can start signaling on and Indexers can index. -You can also update your subgraph's metadata without publishing a new version. You can update your subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates subgraph details in Explorer without having to publish a new version with a new deployment. +You can also update your Subgraph's metadata without publishing a new version. You can update your Subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates Subgraph details in Explorer without having to publish a new version with a new deployment. -> Note: There are costs associated with publishing a new version of a subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). +> Note: There are costs associated with publishing a new version of a Subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your Subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). ## Archiviazione automatica delle versioni del subgraph -Whenever you deploy a new subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your subgraph in Subgraph Studio. +Whenever you deploy a new Subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your Subgraph in Subgraph Studio. -> Note: Previous versions of non-published subgraphs deployed to Studio will be automatically archived. +> Note: Previous versions of non-published Subgraphs deployed to Studio will be automatically archived. ![Subgraph Studio - Unarchive](/img/Unarchive.png) From c38da8090a226f2a92254c467bf3a8ce743a25dc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:23:18 -0500 Subject: [PATCH 0853/1789] New translations using-subgraph-studio.mdx (Japanese) --- .../deploying/using-subgraph-studio.mdx | 66 +++++++++---------- 1 file changed, 30 insertions(+), 36 deletions(-) diff --git a/website/src/pages/ja/subgraphs/developing/deploying/using-subgraph-studio.mdx b/website/src/pages/ja/subgraphs/developing/deploying/using-subgraph-studio.mdx index 21bb85d4fb51..4e8503e208e4 100644 --- a/website/src/pages/ja/subgraphs/developing/deploying/using-subgraph-studio.mdx +++ b/website/src/pages/ja/subgraphs/developing/deploying/using-subgraph-studio.mdx @@ -2,23 +2,23 @@ title: Deploying Using Subgraph Studio --- -Learn how to deploy your subgraph to Subgraph Studio. +Learn how to deploy your Subgraph to Subgraph Studio. -> Note: When you deploy a subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a subgraph, you're publishing it onchain. +> Note: When you deploy a Subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a Subgraph, you're publishing it onchain. ## Subgraph Studio Overview In [Subgraph Studio](https://thegraph.com/studio/), you can do the following: -- View a list of subgraphs you've created -- Manage, view details, and visualize the status of a specific subgraph -- 特定のサブグラフ用の API キーの作成と管理 +- View a list of Subgraphs you've created +- Manage, view details, and visualize the status of a specific Subgraph +- Create and manage your API keys for specific Subgraphs - Restrict your API keys to specific domains and allow only certain Indexers to query with them -- Create your subgraph -- Deploy your subgraph using The Graph CLI -- Test your subgraph in the playground environment -- Integrate your subgraph in staging using the development query URL -- Publish your subgraph to The Graph Network +- Create your Subgraph +- Deploy your Subgraph using The Graph CLI +- Test your Subgraph in the playground environment +- Integrate your Subgraph in staging using the development query URL +- Publish your Subgraph to The Graph Network - Manage your billing ## Install The Graph CLI @@ -44,10 +44,10 @@ npm install -g @graphprotocol/graph-cli 1. Open [Subgraph Studio](https://thegraph.com/studio/). 2. Connect your wallet to sign in. - You can do this via MetaMask, Coinbase Wallet, WalletConnect, or Safe. -3. After you sign in, your unique deploy key will be displayed on your subgraph details page. - - The deploy key allows you to publish your subgraphs or manage your API keys and billing. It is unique but can be regenerated if you think it has been compromised. +3. After you sign in, your unique deploy key will be displayed on your Subgraph details page. + - The deploy key allows you to publish your Subgraphs or manage your API keys and billing. It is unique but can be regenerated if you think it has been compromised. -> Important: You need an API key to query subgraphs +> Important: You need an API key to query Subgraphs ### Subgraph Studio でサブグラフを作成する方法 @@ -57,31 +57,25 @@ npm install -g @graphprotocol/graph-cli ### Subgraph と The Graph Network の互換性 -In order to be supported by Indexers on The Graph Network, subgraphs must: - -- Index a [supported network](/supported-networks/) -- 以下の機能のいずれも使用してはいけません: - - ipfs.cat & ipfs.map - - 致命的でないエラー - - Grafting +To be supported by Indexers on The Graph Network, Subgraphs must index a [supported network](/supported-networks/). For a full list of supported and unsupported features, check out the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) repo. ## Initialize Your Subgraph -Once your subgraph has been created in Subgraph Studio, you can initialize its code through the CLI using this command: +Once your Subgraph has been created in Subgraph Studio, you can initialize its code through the CLI using this command: ```bash graph init ``` -You can find the `` value on your subgraph details page in Subgraph Studio, see image below: +You can find the `` value on your Subgraph details page in Subgraph Studio, see image below: ![Subgraph Studio - Slug](/img/doc-subgraph-slug.png) -After running `graph init`, you will be asked to input the contract address, network, and an ABI that you want to query. This will generate a new folder on your local machine with some basic code to start working on your subgraph. You can then finalize your subgraph to make sure it works as expected. +After running `graph init`, you will be asked to input the contract address, network, and an ABI that you want to query. This will generate a new folder on your local machine with some basic code to start working on your Subgraph. You can then finalize your Subgraph to make sure it works as expected. ## グラフ認証 -Before you can deploy your subgraph to Subgraph Studio, you need to log into your account within the CLI. To do this, you will need your deploy key, which you can find under your subgraph details page. +Before you can deploy your Subgraph to Subgraph Studio, you need to log into your account within the CLI. To do this, you will need your deploy key, which you can find under your Subgraph details page. Then, use the following command to authenticate from the CLI: @@ -91,11 +85,11 @@ graph auth ## Deploying a Subgraph -Once you are ready, you can deploy your subgraph to Subgraph Studio. +Once you are ready, you can deploy your Subgraph to Subgraph Studio. -> Deploying a subgraph with the CLI pushes it to the Studio, where you can test it and update the metadata. This action won't publish your subgraph to the decentralized network. +> Deploying a Subgraph with the CLI pushes it to the Studio, where you can test it and update the metadata. This action won't publish your Subgraph to the decentralized network. -Use the following CLI command to deploy your subgraph: +Use the following CLI command to deploy your Subgraph: ```bash graph deploy @@ -108,30 +102,30 @@ After running this command, the CLI will ask for a version label. ## Testing Your Subgraph -After deploying, you can test your subgraph (either in Subgraph Studio or in your own app, with the deployment query URL), deploy another version, update the metadata, and publish to [Graph Explorer](https://thegraph.com/explorer) when you are ready. +After deploying, you can test your Subgraph (either in Subgraph Studio or in your own app, with the deployment query URL), deploy another version, update the metadata, and publish to [Graph Explorer](https://thegraph.com/explorer) when you are ready. -Use Subgraph Studio to check the logs on the dashboard and look for any errors with your subgraph. +Use Subgraph Studio to check the logs on the dashboard and look for any errors with your Subgraph. ## Publish Your Subgraph -In order to publish your subgraph successfully, review [publishing a subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/). +In order to publish your Subgraph successfully, review [publishing a Subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/). ## Versioning Your Subgraph with the CLI -If you want to update your subgraph, you can do the following: +If you want to update your Subgraph, you can do the following: - You can deploy a new version to Studio using the CLI (it will only be private at this point). - Once you're happy with it, you can publish your new deployment to [Graph Explorer](https://thegraph.com/explorer). -- This action will create a new version of your subgraph that Curators can start signaling on and Indexers can index. +- This action will create a new version of your Subgraph that Curators can start signaling on and Indexers can index. -You can also update your subgraph's metadata without publishing a new version. You can update your subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates subgraph details in Explorer without having to publish a new version with a new deployment. +You can also update your Subgraph's metadata without publishing a new version. You can update your Subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates Subgraph details in Explorer without having to publish a new version with a new deployment. -> Note: There are costs associated with publishing a new version of a subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). +> Note: There are costs associated with publishing a new version of a Subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your Subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). ## サブグラフのバージョンの自動アーカイブ -Whenever you deploy a new subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your subgraph in Subgraph Studio. +Whenever you deploy a new Subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your Subgraph in Subgraph Studio. -> Note: Previous versions of non-published subgraphs deployed to Studio will be automatically archived. +> Note: Previous versions of non-published Subgraphs deployed to Studio will be automatically archived. ![Subgraph Studio - Unarchive](/img/Unarchive.png) From f6cf602c2e136336f43210850332b00a20ad5492 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:23:19 -0500 Subject: [PATCH 0854/1789] New translations using-subgraph-studio.mdx (Korean) --- .../deploying/using-subgraph-studio.mdx | 66 +++++++++---------- 1 file changed, 30 insertions(+), 36 deletions(-) diff --git a/website/src/pages/ko/subgraphs/developing/deploying/using-subgraph-studio.mdx b/website/src/pages/ko/subgraphs/developing/deploying/using-subgraph-studio.mdx index 634c2700ba68..77d10212c770 100644 --- a/website/src/pages/ko/subgraphs/developing/deploying/using-subgraph-studio.mdx +++ b/website/src/pages/ko/subgraphs/developing/deploying/using-subgraph-studio.mdx @@ -2,23 +2,23 @@ title: Deploying Using Subgraph Studio --- -Learn how to deploy your subgraph to Subgraph Studio. +Learn how to deploy your Subgraph to Subgraph Studio. -> Note: When you deploy a subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a subgraph, you're publishing it onchain. +> Note: When you deploy a Subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a Subgraph, you're publishing it onchain. ## Subgraph Studio Overview In [Subgraph Studio](https://thegraph.com/studio/), you can do the following: -- View a list of subgraphs you've created -- Manage, view details, and visualize the status of a specific subgraph -- Create and manage your API keys for specific subgraphs +- View a list of Subgraphs you've created +- Manage, view details, and visualize the status of a specific Subgraph +- Create and manage your API keys for specific Subgraphs - Restrict your API keys to specific domains and allow only certain Indexers to query with them -- Create your subgraph -- Deploy your subgraph using The Graph CLI -- Test your subgraph in the playground environment -- Integrate your subgraph in staging using the development query URL -- Publish your subgraph to The Graph Network +- Create your Subgraph +- Deploy your Subgraph using The Graph CLI +- Test your Subgraph in the playground environment +- Integrate your Subgraph in staging using the development query URL +- Publish your Subgraph to The Graph Network - Manage your billing ## Install The Graph CLI @@ -44,10 +44,10 @@ npm install -g @graphprotocol/graph-cli 1. Open [Subgraph Studio](https://thegraph.com/studio/). 2. Connect your wallet to sign in. - You can do this via MetaMask, Coinbase Wallet, WalletConnect, or Safe. -3. After you sign in, your unique deploy key will be displayed on your subgraph details page. - - The deploy key allows you to publish your subgraphs or manage your API keys and billing. It is unique but can be regenerated if you think it has been compromised. +3. After you sign in, your unique deploy key will be displayed on your Subgraph details page. + - The deploy key allows you to publish your Subgraphs or manage your API keys and billing. It is unique but can be regenerated if you think it has been compromised. -> Important: You need an API key to query subgraphs +> Important: You need an API key to query Subgraphs ### How to Create a Subgraph in Subgraph Studio @@ -57,31 +57,25 @@ npm install -g @graphprotocol/graph-cli ### Subgraph Compatibility with The Graph Network -In order to be supported by Indexers on The Graph Network, subgraphs must: - -- Index a [supported network](/supported-networks/) -- Must not use any of the following features: - - ipfs.cat & ipfs.map - - Non-fatal errors - - Grafting +To be supported by Indexers on The Graph Network, Subgraphs must index a [supported network](/supported-networks/). For a full list of supported and unsupported features, check out the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) repo. ## Initialize Your Subgraph -Once your subgraph has been created in Subgraph Studio, you can initialize its code through the CLI using this command: +Once your Subgraph has been created in Subgraph Studio, you can initialize its code through the CLI using this command: ```bash graph init ``` -You can find the `` value on your subgraph details page in Subgraph Studio, see image below: +You can find the `` value on your Subgraph details page in Subgraph Studio, see image below: ![Subgraph Studio - Slug](/img/doc-subgraph-slug.png) -After running `graph init`, you will be asked to input the contract address, network, and an ABI that you want to query. This will generate a new folder on your local machine with some basic code to start working on your subgraph. You can then finalize your subgraph to make sure it works as expected. +After running `graph init`, you will be asked to input the contract address, network, and an ABI that you want to query. This will generate a new folder on your local machine with some basic code to start working on your Subgraph. You can then finalize your Subgraph to make sure it works as expected. ## Graph Auth -Before you can deploy your subgraph to Subgraph Studio, you need to log into your account within the CLI. To do this, you will need your deploy key, which you can find under your subgraph details page. +Before you can deploy your Subgraph to Subgraph Studio, you need to log into your account within the CLI. To do this, you will need your deploy key, which you can find under your Subgraph details page. Then, use the following command to authenticate from the CLI: @@ -91,11 +85,11 @@ graph auth ## Deploying a Subgraph -Once you are ready, you can deploy your subgraph to Subgraph Studio. +Once you are ready, you can deploy your Subgraph to Subgraph Studio. -> Deploying a subgraph with the CLI pushes it to the Studio, where you can test it and update the metadata. This action won't publish your subgraph to the decentralized network. +> Deploying a Subgraph with the CLI pushes it to the Studio, where you can test it and update the metadata. This action won't publish your Subgraph to the decentralized network. -Use the following CLI command to deploy your subgraph: +Use the following CLI command to deploy your Subgraph: ```bash graph deploy @@ -108,30 +102,30 @@ After running this command, the CLI will ask for a version label. ## Testing Your Subgraph -After deploying, you can test your subgraph (either in Subgraph Studio or in your own app, with the deployment query URL), deploy another version, update the metadata, and publish to [Graph Explorer](https://thegraph.com/explorer) when you are ready. +After deploying, you can test your Subgraph (either in Subgraph Studio or in your own app, with the deployment query URL), deploy another version, update the metadata, and publish to [Graph Explorer](https://thegraph.com/explorer) when you are ready. -Use Subgraph Studio to check the logs on the dashboard and look for any errors with your subgraph. +Use Subgraph Studio to check the logs on the dashboard and look for any errors with your Subgraph. ## Publish Your Subgraph -In order to publish your subgraph successfully, review [publishing a subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/). +In order to publish your Subgraph successfully, review [publishing a Subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/). ## Versioning Your Subgraph with the CLI -If you want to update your subgraph, you can do the following: +If you want to update your Subgraph, you can do the following: - You can deploy a new version to Studio using the CLI (it will only be private at this point). - Once you're happy with it, you can publish your new deployment to [Graph Explorer](https://thegraph.com/explorer). -- This action will create a new version of your subgraph that Curators can start signaling on and Indexers can index. +- This action will create a new version of your Subgraph that Curators can start signaling on and Indexers can index. -You can also update your subgraph's metadata without publishing a new version. You can update your subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates subgraph details in Explorer without having to publish a new version with a new deployment. +You can also update your Subgraph's metadata without publishing a new version. You can update your Subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates Subgraph details in Explorer without having to publish a new version with a new deployment. -> Note: There are costs associated with publishing a new version of a subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). +> Note: There are costs associated with publishing a new version of a Subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your Subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). ## Automatic Archiving of Subgraph Versions -Whenever you deploy a new subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your subgraph in Subgraph Studio. +Whenever you deploy a new Subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your Subgraph in Subgraph Studio. -> Note: Previous versions of non-published subgraphs deployed to Studio will be automatically archived. +> Note: Previous versions of non-published Subgraphs deployed to Studio will be automatically archived. ![Subgraph Studio - Unarchive](/img/Unarchive.png) From 4f44b77a6d403c22a5ce5f886eb3a7814b876e35 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:23:20 -0500 Subject: [PATCH 0855/1789] New translations using-subgraph-studio.mdx (Dutch) --- .../deploying/using-subgraph-studio.mdx | 66 +++++++++---------- 1 file changed, 30 insertions(+), 36 deletions(-) diff --git a/website/src/pages/nl/subgraphs/developing/deploying/using-subgraph-studio.mdx b/website/src/pages/nl/subgraphs/developing/deploying/using-subgraph-studio.mdx index 04fca3fb140a..370e428284cc 100644 --- a/website/src/pages/nl/subgraphs/developing/deploying/using-subgraph-studio.mdx +++ b/website/src/pages/nl/subgraphs/developing/deploying/using-subgraph-studio.mdx @@ -2,23 +2,23 @@ title: Deploying Using Subgraph Studio --- -Learn how to deploy your subgraph to Subgraph Studio. +Learn how to deploy your Subgraph to Subgraph Studio. -> Note: When you deploy a subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a subgraph, you're publishing it onchain. +> Note: When you deploy a Subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a Subgraph, you're publishing it onchain. ## Subgraph Studio Overview In [Subgraph Studio](https://thegraph.com/studio/), you can do the following: -- View a list of subgraphs you've created -- Manage, view details, and visualize the status of a specific subgraph -- Create and manage your API keys for specific subgraphs +- View a list of Subgraphs you've created +- Manage, view details, and visualize the status of a specific Subgraph +- Create and manage your API keys for specific Subgraphs - Restrict your API keys to specific domains and allow only certain Indexers to query with them -- Create your subgraph -- Deploy your subgraph using The Graph CLI -- Test your subgraph in the playground environment -- Integrate your subgraph in staging using the development query URL -- Publish your subgraph to The Graph Network +- Create your Subgraph +- Deploy your Subgraph using The Graph CLI +- Test your Subgraph in the playground environment +- Integrate your Subgraph in staging using the development query URL +- Publish your Subgraph to The Graph Network - Manage your billing ## Install The Graph CLI @@ -44,10 +44,10 @@ npm install -g @graphprotocol/graph-cli 1. Open [Subgraph Studio](https://thegraph.com/studio/). 2. Connect your wallet to sign in. - You can do this via MetaMask, Coinbase Wallet, WalletConnect, or Safe. -3. After you sign in, your unique deploy key will be displayed on your subgraph details page. - - The deploy key allows you to publish your subgraphs or manage your API keys and billing. It is unique but can be regenerated if you think it has been compromised. +3. After you sign in, your unique deploy key will be displayed on your Subgraph details page. + - The deploy key allows you to publish your Subgraphs or manage your API keys and billing. It is unique but can be regenerated if you think it has been compromised. -> Important: You need an API key to query subgraphs +> Important: You need an API key to query Subgraphs ### How to Create a Subgraph in Subgraph Studio @@ -57,31 +57,25 @@ npm install -g @graphprotocol/graph-cli ### Subgraph Compatibility with The Graph Network -In order to be supported by Indexers on The Graph Network, subgraphs must: - -- Index a [supported network](/supported-networks/) -- Must not use any of the following features: - - ipfs.cat & ipfs.map - - Non-fatal errors - - Grafting +To be supported by Indexers on The Graph Network, Subgraphs must index a [supported network](/supported-networks/). For a full list of supported and unsupported features, check out the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) repo. ## Initialize Your Subgraph -Once your subgraph has been created in Subgraph Studio, you can initialize its code through the CLI using this command: +Once your Subgraph has been created in Subgraph Studio, you can initialize its code through the CLI using this command: ```bash graph init ``` -You can find the `` value on your subgraph details page in Subgraph Studio, see image below: +You can find the `` value on your Subgraph details page in Subgraph Studio, see image below: ![Subgraph Studio - Slug](/img/doc-subgraph-slug.png) -After running `graph init`, you will be asked to input the contract address, network, and an ABI that you want to query. This will generate a new folder on your local machine with some basic code to start working on your subgraph. You can then finalize your subgraph to make sure it works as expected. +After running `graph init`, you will be asked to input the contract address, network, and an ABI that you want to query. This will generate a new folder on your local machine with some basic code to start working on your Subgraph. You can then finalize your Subgraph to make sure it works as expected. ## Graph Auth -Before you can deploy your subgraph to Subgraph Studio, you need to log into your account within the CLI. To do this, you will need your deploy key, which you can find under your subgraph details page. +Before you can deploy your Subgraph to Subgraph Studio, you need to log into your account within the CLI. To do this, you will need your deploy key, which you can find under your Subgraph details page. Then, use the following command to authenticate from the CLI: @@ -91,11 +85,11 @@ graph auth ## Deploying a Subgraph -Once you are ready, you can deploy your subgraph to Subgraph Studio. +Once you are ready, you can deploy your Subgraph to Subgraph Studio. -> Deploying a subgraph with the CLI pushes it to the Studio, where you can test it and update the metadata. This action won't publish your subgraph to the decentralized network. +> Deploying a Subgraph with the CLI pushes it to the Studio, where you can test it and update the metadata. This action won't publish your Subgraph to the decentralized network. -Use the following CLI command to deploy your subgraph: +Use the following CLI command to deploy your Subgraph: ```bash graph deploy @@ -108,30 +102,30 @@ After running this command, the CLI will ask for a version label. ## Testing Your Subgraph -After deploying, you can test your subgraph (either in Subgraph Studio or in your own app, with the deployment query URL), deploy another version, update the metadata, and publish to [Graph Explorer](https://thegraph.com/explorer) when you are ready. +After deploying, you can test your Subgraph (either in Subgraph Studio or in your own app, with the deployment query URL), deploy another version, update the metadata, and publish to [Graph Explorer](https://thegraph.com/explorer) when you are ready. -Use Subgraph Studio to check the logs on the dashboard and look for any errors with your subgraph. +Use Subgraph Studio to check the logs on the dashboard and look for any errors with your Subgraph. ## Publish Your Subgraph -In order to publish your subgraph successfully, review [publishing a subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/). +In order to publish your Subgraph successfully, review [publishing a Subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/). ## Versioning Your Subgraph with the CLI -If you want to update your subgraph, you can do the following: +If you want to update your Subgraph, you can do the following: - You can deploy a new version to Studio using the CLI (it will only be private at this point). - Once you're happy with it, you can publish your new deployment to [Graph Explorer](https://thegraph.com/explorer). -- This action will create a new version of your subgraph that Curators can start signaling on and Indexers can index. +- This action will create a new version of your Subgraph that Curators can start signaling on and Indexers can index. -You can also update your subgraph's metadata without publishing a new version. You can update your subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates subgraph details in Explorer without having to publish a new version with a new deployment. +You can also update your Subgraph's metadata without publishing a new version. You can update your Subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates Subgraph details in Explorer without having to publish a new version with a new deployment. -> Note: There are costs associated with publishing a new version of a subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). +> Note: There are costs associated with publishing a new version of a Subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your Subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). ## Automatic Archiving of Subgraph Versions -Whenever you deploy a new subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your subgraph in Subgraph Studio. +Whenever you deploy a new Subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your Subgraph in Subgraph Studio. -> Note: Previous versions of non-published subgraphs deployed to Studio will be automatically archived. +> Note: Previous versions of non-published Subgraphs deployed to Studio will be automatically archived. ![Subgraph Studio - Unarchive](/img/Unarchive.png) From 98b0529f1cd17503d5534c749005335f6b3b0d81 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:23:21 -0500 Subject: [PATCH 0856/1789] New translations using-subgraph-studio.mdx (Polish) --- .../deploying/using-subgraph-studio.mdx | 66 +++++++++---------- 1 file changed, 30 insertions(+), 36 deletions(-) diff --git a/website/src/pages/pl/subgraphs/developing/deploying/using-subgraph-studio.mdx b/website/src/pages/pl/subgraphs/developing/deploying/using-subgraph-studio.mdx index d2023c7b4a09..c21ff6dc2358 100644 --- a/website/src/pages/pl/subgraphs/developing/deploying/using-subgraph-studio.mdx +++ b/website/src/pages/pl/subgraphs/developing/deploying/using-subgraph-studio.mdx @@ -2,23 +2,23 @@ title: Deploying Using Subgraph Studio --- -Learn how to deploy your subgraph to Subgraph Studio. +Learn how to deploy your Subgraph to Subgraph Studio. -> Note: When you deploy a subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a subgraph, you're publishing it onchain. +> Note: When you deploy a Subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a Subgraph, you're publishing it onchain. ## Subgraph Studio Overview In [Subgraph Studio](https://thegraph.com/studio/), you can do the following: -- View a list of subgraphs you've created -- Manage, view details, and visualize the status of a specific subgraph -- Create and manage your API keys for specific subgraphs +- View a list of Subgraphs you've created +- Manage, view details, and visualize the status of a specific Subgraph +- Create and manage your API keys for specific Subgraphs - Restrict your API keys to specific domains and allow only certain Indexers to query with them -- Create your subgraph -- Deploy your subgraph using The Graph CLI -- Test your subgraph in the playground environment -- Integrate your subgraph in staging using the development query URL -- Publish your subgraph to The Graph Network +- Create your Subgraph +- Deploy your Subgraph using The Graph CLI +- Test your Subgraph in the playground environment +- Integrate your Subgraph in staging using the development query URL +- Publish your Subgraph to The Graph Network - Manage your billing ## Install The Graph CLI @@ -44,10 +44,10 @@ npm install -g @graphprotocol/graph-cli 1. Open [Subgraph Studio](https://thegraph.com/studio/). 2. Connect your wallet to sign in. - You can do this via MetaMask, Coinbase Wallet, WalletConnect, or Safe. -3. After you sign in, your unique deploy key will be displayed on your subgraph details page. - - The deploy key allows you to publish your subgraphs or manage your API keys and billing. It is unique but can be regenerated if you think it has been compromised. +3. After you sign in, your unique deploy key will be displayed on your Subgraph details page. + - The deploy key allows you to publish your Subgraphs or manage your API keys and billing. It is unique but can be regenerated if you think it has been compromised. -> Important: You need an API key to query subgraphs +> Important: You need an API key to query Subgraphs ### How to Create a Subgraph in Subgraph Studio @@ -57,31 +57,25 @@ npm install -g @graphprotocol/graph-cli ### Subgraph Compatibility with The Graph Network -In order to be supported by Indexers on The Graph Network, subgraphs must: - -- Index a [supported network](/supported-networks/) -- Must not use any of the following features: - - ipfs.cat & ipfs.map - - Non-fatal errors - - Grafting +To be supported by Indexers on The Graph Network, Subgraphs must index a [supported network](/supported-networks/). For a full list of supported and unsupported features, check out the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) repo. ## Initialize Your Subgraph -Once your subgraph has been created in Subgraph Studio, you can initialize its code through the CLI using this command: +Once your Subgraph has been created in Subgraph Studio, you can initialize its code through the CLI using this command: ```bash graph init ``` -You can find the `` value on your subgraph details page in Subgraph Studio, see image below: +You can find the `` value on your Subgraph details page in Subgraph Studio, see image below: ![Subgraph Studio - Slug](/img/doc-subgraph-slug.png) -After running `graph init`, you will be asked to input the contract address, network, and an ABI that you want to query. This will generate a new folder on your local machine with some basic code to start working on your subgraph. You can then finalize your subgraph to make sure it works as expected. +After running `graph init`, you will be asked to input the contract address, network, and an ABI that you want to query. This will generate a new folder on your local machine with some basic code to start working on your Subgraph. You can then finalize your Subgraph to make sure it works as expected. ## Graph Auth -Before you can deploy your subgraph to Subgraph Studio, you need to log into your account within the CLI. To do this, you will need your deploy key, which you can find under your subgraph details page. +Before you can deploy your Subgraph to Subgraph Studio, you need to log into your account within the CLI. To do this, you will need your deploy key, which you can find under your Subgraph details page. Then, use the following command to authenticate from the CLI: @@ -91,11 +85,11 @@ graph auth ## Deploying a Subgraph -Once you are ready, you can deploy your subgraph to Subgraph Studio. +Once you are ready, you can deploy your Subgraph to Subgraph Studio. -> Deploying a subgraph with the CLI pushes it to the Studio, where you can test it and update the metadata. This action won't publish your subgraph to the decentralized network. +> Deploying a Subgraph with the CLI pushes it to the Studio, where you can test it and update the metadata. This action won't publish your Subgraph to the decentralized network. -Use the following CLI command to deploy your subgraph: +Use the following CLI command to deploy your Subgraph: ```bash graph deploy @@ -108,30 +102,30 @@ After running this command, the CLI will ask for a version label. ## Testing Your Subgraph -After deploying, you can test your subgraph (either in Subgraph Studio or in your own app, with the deployment query URL), deploy another version, update the metadata, and publish to [Graph Explorer](https://thegraph.com/explorer) when you are ready. +After deploying, you can test your Subgraph (either in Subgraph Studio or in your own app, with the deployment query URL), deploy another version, update the metadata, and publish to [Graph Explorer](https://thegraph.com/explorer) when you are ready. -Use Subgraph Studio to check the logs on the dashboard and look for any errors with your subgraph. +Use Subgraph Studio to check the logs on the dashboard and look for any errors with your Subgraph. ## Publish Your Subgraph -In order to publish your subgraph successfully, review [publishing a subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/). +In order to publish your Subgraph successfully, review [publishing a Subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/). ## Versioning Your Subgraph with the CLI -If you want to update your subgraph, you can do the following: +If you want to update your Subgraph, you can do the following: - You can deploy a new version to Studio using the CLI (it will only be private at this point). - Once you're happy with it, you can publish your new deployment to [Graph Explorer](https://thegraph.com/explorer). -- This action will create a new version of your subgraph that Curators can start signaling on and Indexers can index. +- This action will create a new version of your Subgraph that Curators can start signaling on and Indexers can index. -You can also update your subgraph's metadata without publishing a new version. You can update your subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates subgraph details in Explorer without having to publish a new version with a new deployment. +You can also update your Subgraph's metadata without publishing a new version. You can update your Subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates Subgraph details in Explorer without having to publish a new version with a new deployment. -> Note: There are costs associated with publishing a new version of a subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). +> Note: There are costs associated with publishing a new version of a Subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your Subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). ## Automatic Archiving of Subgraph Versions -Whenever you deploy a new subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your subgraph in Subgraph Studio. +Whenever you deploy a new Subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your Subgraph in Subgraph Studio. -> Note: Previous versions of non-published subgraphs deployed to Studio will be automatically archived. +> Note: Previous versions of non-published Subgraphs deployed to Studio will be automatically archived. ![Subgraph Studio - Unarchive](/img/Unarchive.png) From 6b7cd8ed76a1ec0e2f6bfc9726919cc4e26a88c1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:23:22 -0500 Subject: [PATCH 0857/1789] New translations using-subgraph-studio.mdx (Portuguese) --- .../deploying/using-subgraph-studio.mdx | 66 +++++++++---------- 1 file changed, 30 insertions(+), 36 deletions(-) diff --git a/website/src/pages/pt/subgraphs/developing/deploying/using-subgraph-studio.mdx b/website/src/pages/pt/subgraphs/developing/deploying/using-subgraph-studio.mdx index d9e9be3f83e9..4c5602700f8a 100644 --- a/website/src/pages/pt/subgraphs/developing/deploying/using-subgraph-studio.mdx +++ b/website/src/pages/pt/subgraphs/developing/deploying/using-subgraph-studio.mdx @@ -2,23 +2,23 @@ title: Deploying Using Subgraph Studio --- -Learn how to deploy your subgraph to Subgraph Studio. +Learn how to deploy your Subgraph to Subgraph Studio. -> Note: When you deploy a subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a subgraph, you're publishing it onchain. +> Note: When you deploy a Subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a Subgraph, you're publishing it onchain. ## Subgraph Studio Overview In [Subgraph Studio](https://thegraph.com/studio/), you can do the following: -- View a list of subgraphs you've created -- Manage, view details, and visualize the status of a specific subgraph -- Criar e gerir as suas chaves de API para subgraphs específicos +- View a list of Subgraphs you've created +- Manage, view details, and visualize the status of a specific Subgraph +- Create and manage your API keys for specific Subgraphs - Restrict your API keys to specific domains and allow only certain Indexers to query with them -- Create your subgraph -- Deploy your subgraph using The Graph CLI -- Test your subgraph in the playground environment -- Integrate your subgraph in staging using the development query URL -- Publish your subgraph to The Graph Network +- Create your Subgraph +- Deploy your Subgraph using The Graph CLI +- Test your Subgraph in the playground environment +- Integrate your Subgraph in staging using the development query URL +- Publish your Subgraph to The Graph Network - Manage your billing ## Install The Graph CLI @@ -44,10 +44,10 @@ npm install -g @graphprotocol/graph-cli 1. Open [Subgraph Studio](https://thegraph.com/studio/). 2. Connect your wallet to sign in. - You can do this via MetaMask, Coinbase Wallet, WalletConnect, or Safe. -3. After you sign in, your unique deploy key will be displayed on your subgraph details page. - - The deploy key allows you to publish your subgraphs or manage your API keys and billing. It is unique but can be regenerated if you think it has been compromised. +3. After you sign in, your unique deploy key will be displayed on your Subgraph details page. + - The deploy key allows you to publish your Subgraphs or manage your API keys and billing. It is unique but can be regenerated if you think it has been compromised. -> Important: You need an API key to query subgraphs +> Important: You need an API key to query Subgraphs ### Como Criar um Subgraph no Subgraph Studio @@ -57,31 +57,25 @@ npm install -g @graphprotocol/graph-cli ### Compatibilidade de Subgraph com a Graph Network -Para ter apoio de Indexadores na Graph Network, os subgraphs devem: - -- Index a [supported network](/supported-networks/) -- Não deve usar quaisquer das seguintes características: - - ipfs.cat & ipfs.map - - Erros não-fatais - - Enxerto +To be supported by Indexers on The Graph Network, Subgraphs must index a [supported network](/supported-networks/). For a full list of supported and unsupported features, check out the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) repo. ## Initialize Your Subgraph -Once your subgraph has been created in Subgraph Studio, you can initialize its code through the CLI using this command: +Once your Subgraph has been created in Subgraph Studio, you can initialize its code through the CLI using this command: ```bash graph init ``` -You can find the `` value on your subgraph details page in Subgraph Studio, see image below: +You can find the `` value on your Subgraph details page in Subgraph Studio, see image below: ![Subgraph Studio - Slug](/img/doc-subgraph-slug.png) -After running `graph init`, you will be asked to input the contract address, network, and an ABI that you want to query. This will generate a new folder on your local machine with some basic code to start working on your subgraph. You can then finalize your subgraph to make sure it works as expected. +After running `graph init`, you will be asked to input the contract address, network, and an ABI that you want to query. This will generate a new folder on your local machine with some basic code to start working on your Subgraph. You can then finalize your Subgraph to make sure it works as expected. ## Autenticação -Before you can deploy your subgraph to Subgraph Studio, you need to log into your account within the CLI. To do this, you will need your deploy key, which you can find under your subgraph details page. +Before you can deploy your Subgraph to Subgraph Studio, you need to log into your account within the CLI. To do this, you will need your deploy key, which you can find under your Subgraph details page. Then, use the following command to authenticate from the CLI: @@ -91,11 +85,11 @@ graph auth ## Deploying a Subgraph -Once you are ready, you can deploy your subgraph to Subgraph Studio. +Once you are ready, you can deploy your Subgraph to Subgraph Studio. -> Deploying a subgraph with the CLI pushes it to the Studio, where you can test it and update the metadata. This action won't publish your subgraph to the decentralized network. +> Deploying a Subgraph with the CLI pushes it to the Studio, where you can test it and update the metadata. This action won't publish your Subgraph to the decentralized network. -Use the following CLI command to deploy your subgraph: +Use the following CLI command to deploy your Subgraph: ```bash graph deploy @@ -108,30 +102,30 @@ After running this command, the CLI will ask for a version label. ## Testing Your Subgraph -After deploying, you can test your subgraph (either in Subgraph Studio or in your own app, with the deployment query URL), deploy another version, update the metadata, and publish to [Graph Explorer](https://thegraph.com/explorer) when you are ready. +After deploying, you can test your Subgraph (either in Subgraph Studio or in your own app, with the deployment query URL), deploy another version, update the metadata, and publish to [Graph Explorer](https://thegraph.com/explorer) when you are ready. -Use Subgraph Studio to check the logs on the dashboard and look for any errors with your subgraph. +Use Subgraph Studio to check the logs on the dashboard and look for any errors with your Subgraph. ## Publish Your Subgraph -In order to publish your subgraph successfully, review [publishing a subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/). +In order to publish your Subgraph successfully, review [publishing a Subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/). ## Versioning Your Subgraph with the CLI -If you want to update your subgraph, you can do the following: +If you want to update your Subgraph, you can do the following: - You can deploy a new version to Studio using the CLI (it will only be private at this point). - Once you're happy with it, you can publish your new deployment to [Graph Explorer](https://thegraph.com/explorer). -- This action will create a new version of your subgraph that Curators can start signaling on and Indexers can index. +- This action will create a new version of your Subgraph that Curators can start signaling on and Indexers can index. -You can also update your subgraph's metadata without publishing a new version. You can update your subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates subgraph details in Explorer without having to publish a new version with a new deployment. +You can also update your Subgraph's metadata without publishing a new version. You can update your Subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates Subgraph details in Explorer without having to publish a new version with a new deployment. -> Note: There are costs associated with publishing a new version of a subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). +> Note: There are costs associated with publishing a new version of a Subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your Subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). ## Arquivamento Automático de Versões de Subgraphs -Whenever you deploy a new subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your subgraph in Subgraph Studio. +Whenever you deploy a new Subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your Subgraph in Subgraph Studio. -> Note: Previous versions of non-published subgraphs deployed to Studio will be automatically archived. +> Note: Previous versions of non-published Subgraphs deployed to Studio will be automatically archived. ![Subgraph Studio - Unarchive](/img/Unarchive.png) From 72484c6fdf276d879bfc727bc935f820fa90cd2d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:23:23 -0500 Subject: [PATCH 0858/1789] New translations using-subgraph-studio.mdx (Russian) --- .../deploying/using-subgraph-studio.mdx | 66 +++++++++---------- 1 file changed, 30 insertions(+), 36 deletions(-) diff --git a/website/src/pages/ru/subgraphs/developing/deploying/using-subgraph-studio.mdx b/website/src/pages/ru/subgraphs/developing/deploying/using-subgraph-studio.mdx index e1aadd279a0b..3ff9c8594763 100644 --- a/website/src/pages/ru/subgraphs/developing/deploying/using-subgraph-studio.mdx +++ b/website/src/pages/ru/subgraphs/developing/deploying/using-subgraph-studio.mdx @@ -2,23 +2,23 @@ title: Deploying Using Subgraph Studio --- -Узнайте, как развернуть свой субграф в Subgraph Studio. +Learn how to deploy your Subgraph to Subgraph Studio. -> Note: When you deploy a subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a subgraph, you're publishing it onchain. +> Note: When you deploy a Subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a Subgraph, you're publishing it onchain. ## Обзор Subgraph Studio В [Subgraph Studio](https://thegraph.com/studio/) Вы можете выполнять следующие действия: -- Просматривать список созданных Вами субграфов -- Управлять, просматривать детали и визуализировать статус конкретного субграфа -- Создание и управление ключами API для определенных подграфов +- View a list of Subgraphs you've created +- Manage, view details, and visualize the status of a specific Subgraph +- Create and manage your API keys for specific Subgraphs - Ограничивать использование своих API-ключей определенными доменами и разрешать только определенным индексаторам выполнять запросы с их помощью -- Создавать свой субграф -- Развертывать свой субграф, используя The Graph CLI -- Тестировать свой субграф в тестовой среде Playground -- Интегрировать свой субграф на стадии разработки, используя URL запроса разработки -- Публиковать свой субграф в The Graph Network +- Create your Subgraph +- Deploy your Subgraph using The Graph CLI +- Test your Subgraph in the playground environment +- Integrate your Subgraph in staging using the development query URL +- Publish your Subgraph to The Graph Network - Управлять своими платежами ## Установка The Graph CLI @@ -44,10 +44,10 @@ npm install -g @graphprotocol/graph-cli 1. Откройте [Subgraph Studio](https://thegraph.com/studio/). 2. Подключите свой кошелек для входа. - Вы можете это сделать через MetaMask, Coinbase Wallet, WalletConnect или Safe. -3. После входа в систему Ваш уникальный ключ развертывания будет отображаться на странице сведений о Вашем субграфе. - - Ключ развертывания позволяет публиковать субграфы, а также управлять вашими API-ключами и оплатой. Он уникален, но может быть восстановлен, если Вы подозреваете, что он был взломан. +3. After you sign in, your unique deploy key will be displayed on your Subgraph details page. + - The deploy key allows you to publish your Subgraphs or manage your API keys and billing. It is unique but can be regenerated if you think it has been compromised. -> Важно: для выполнения запросов к субграфам необходим API-ключ +> Important: You need an API key to query Subgraphs ### How to Create a Subgraph in Subgraph Studio @@ -57,31 +57,25 @@ npm install -g @graphprotocol/graph-cli ### Совместимость подграфов с сетью Graph -In order to be supported by Indexers on The Graph Network, subgraphs must: - -- Index a [supported network](/supported-networks/) -- Не должны использовать ни одну из следующих функций: - - ipfs.cat & ipfs.map - - Нефатальные ошибки - - Grafting +To be supported by Indexers on The Graph Network, Subgraphs must index a [supported network](/supported-networks/). For a full list of supported and unsupported features, check out the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) repo. ## Инициализация Вашего Субграфа -После создания субграфа в Subgraph Studio Вы можете инициализировать его код через CLI с помощью следующей команды: +Once your Subgraph has been created in Subgraph Studio, you can initialize its code through the CLI using this command: ```bash graph init ``` -Значение `` можно найти на странице сведений о субграфе в Subgraph Studio, см. изображение ниже: +You can find the `` value on your Subgraph details page in Subgraph Studio, see image below: ![Subgraph Studio - Slug](/img/doc-subgraph-slug.png) -После запуска `graph init` Вам будет предложено ввести адрес контракта, сеть и ABI, которые Вы хотите запросить. Это приведет к созданию новой папки на Вашем локальном компьютере с базовым кодом для начала работы над субграфом. Затем Вы можете завершить работу над своим субграфом, чтобы убедиться, что он функционирует должным образом. +After running `graph init`, you will be asked to input the contract address, network, and an ABI that you want to query. This will generate a new folder on your local machine with some basic code to start working on your Subgraph. You can then finalize your Subgraph to make sure it works as expected. ## Аутентификация в Graph -Прежде чем Вы сможете развернуть свой субграф в Subgraph Studio, Вам будет необходимо войти в свою учетную запись в CLI. Для этого Вам понадобится ключ развертывания, который Вы сможете найти на странице сведений о субграфе. +Before you can deploy your Subgraph to Subgraph Studio, you need to log into your account within the CLI. To do this, you will need your deploy key, which you can find under your Subgraph details page. После этого используйте следующую команду для аутентификации через CLI: @@ -91,11 +85,11 @@ graph auth ## Развертывание субграфа -Когда будете готовы, Вы сможете развернуть свой субграф в Subgraph Studio. +Once you are ready, you can deploy your Subgraph to Subgraph Studio. -> Развертывание субграфа с помощью CLI отправляет его в Studio, где Вы сможете протестировать его и обновить метаданные. Это действие не приводит к публикации субграфа в децентрализованной сети. +> Deploying a Subgraph with the CLI pushes it to the Studio, where you can test it and update the metadata. This action won't publish your Subgraph to the decentralized network. -Используйте следующую команду CLI для развертывания своего субграфа: +Use the following CLI command to deploy your Subgraph: ```bash graph deploy @@ -108,30 +102,30 @@ graph deploy ## Тестирование Вашего субграфа -После развертывания Вы можете протестировать свой субграф (в Subgraph Studio или в собственном приложении, используя URL-адрес запроса на развертывание), развернуть другую версию, обновить метаданные и, когда будете готовы, опубликовать в [Graph Explorer](https://thegraph.com/explorer). +After deploying, you can test your Subgraph (either in Subgraph Studio or in your own app, with the deployment query URL), deploy another version, update the metadata, and publish to [Graph Explorer](https://thegraph.com/explorer) when you are ready. -Используйте Subgraph Studio, чтобы проверить логи на панели управления и обнаружить возможные ошибки в своем субграфе. +Use Subgraph Studio to check the logs on the dashboard and look for any errors with your Subgraph. ## Публикация Вашего субграфа -In order to publish your subgraph successfully, review [publishing a subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/). +In order to publish your Subgraph successfully, review [publishing a Subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/). ## Управление версиями Вашего субграфа с помощью CLI -Если Вы хотите обновить свой субграф, Вы можете сделать следующее: +If you want to update your Subgraph, you can do the following: - Вы можете развернуть новую версию в Studio, используя CLI (на этом этапе она будет только приватной). - Если результат Вас устроит, Вы можете опубликовать новое развертывание в [Graph Explorer](https://thegraph.com/explorer). -- Это действие создаст новую версию вашего субграфа, о которой Кураторы смогут начать сигнализировать, а Индексаторы — индексировать. +- This action will create a new version of your Subgraph that Curators can start signaling on and Indexers can index. -You can also update your subgraph's metadata without publishing a new version. You can update your subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates subgraph details in Explorer without having to publish a new version with a new deployment. +You can also update your Subgraph's metadata without publishing a new version. You can update your Subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates Subgraph details in Explorer without having to publish a new version with a new deployment. -> Note: There are costs associated with publishing a new version of a subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). +> Note: There are costs associated with publishing a new version of a Subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your Subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). ## Автоматическое архивирование версий подграфа -Каждый раз, когда Вы развертываете новую версию субграфа в Subgraph Studio, предыдущая версия архивируется. Архивированные версии не будут проиндексированы/синхронизированы и, следовательно, их нельзя будет запросить. Вы можете разархивировать архивированную версию своего субграфа в Subgraph Studio. +Whenever you deploy a new Subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your Subgraph in Subgraph Studio. -> Примечание: предыдущие версии непубликованных субграфов, развернутых в Studio, будут автоматически архивированы. +> Note: Previous versions of non-published Subgraphs deployed to Studio will be automatically archived. ![Subgraph Studio - Unarchive](/img/Unarchive.png) From 0c1973ea0f36310f40404131edbbb37c102f8669 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:23:24 -0500 Subject: [PATCH 0859/1789] New translations using-subgraph-studio.mdx (Swedish) --- .../deploying/using-subgraph-studio.mdx | 66 +++++++++---------- 1 file changed, 30 insertions(+), 36 deletions(-) diff --git a/website/src/pages/sv/subgraphs/developing/deploying/using-subgraph-studio.mdx b/website/src/pages/sv/subgraphs/developing/deploying/using-subgraph-studio.mdx index cf6d67e5bb9d..dc1facd6d5cb 100644 --- a/website/src/pages/sv/subgraphs/developing/deploying/using-subgraph-studio.mdx +++ b/website/src/pages/sv/subgraphs/developing/deploying/using-subgraph-studio.mdx @@ -2,23 +2,23 @@ title: Deploying Using Subgraph Studio --- -Learn how to deploy your subgraph to Subgraph Studio. +Learn how to deploy your Subgraph to Subgraph Studio. -> Note: When you deploy a subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a subgraph, you're publishing it onchain. +> Note: When you deploy a Subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a Subgraph, you're publishing it onchain. ## Subgraph Studio Overview In [Subgraph Studio](https://thegraph.com/studio/), you can do the following: -- View a list of subgraphs you've created -- Manage, view details, and visualize the status of a specific subgraph -- Skapa och hantera API nycklar för specifika undergrafer +- View a list of Subgraphs you've created +- Manage, view details, and visualize the status of a specific Subgraph +- Create and manage your API keys for specific Subgraphs - Restrict your API keys to specific domains and allow only certain Indexers to query with them -- Create your subgraph -- Deploy your subgraph using The Graph CLI -- Test your subgraph in the playground environment -- Integrate your subgraph in staging using the development query URL -- Publish your subgraph to The Graph Network +- Create your Subgraph +- Deploy your Subgraph using The Graph CLI +- Test your Subgraph in the playground environment +- Integrate your Subgraph in staging using the development query URL +- Publish your Subgraph to The Graph Network - Manage your billing ## Install The Graph CLI @@ -44,10 +44,10 @@ npm install -g @graphprotocol/graph-cli 1. Open [Subgraph Studio](https://thegraph.com/studio/). 2. Connect your wallet to sign in. - You can do this via MetaMask, Coinbase Wallet, WalletConnect, or Safe. -3. After you sign in, your unique deploy key will be displayed on your subgraph details page. - - The deploy key allows you to publish your subgraphs or manage your API keys and billing. It is unique but can be regenerated if you think it has been compromised. +3. After you sign in, your unique deploy key will be displayed on your Subgraph details page. + - The deploy key allows you to publish your Subgraphs or manage your API keys and billing. It is unique but can be regenerated if you think it has been compromised. -> Important: You need an API key to query subgraphs +> Important: You need an API key to query Subgraphs ### Hur man skapar en subgraf i Subgraf Studio @@ -57,31 +57,25 @@ npm install -g @graphprotocol/graph-cli ### Kompatibilitet mellan undergrafer och grafnätet -In order to be supported by Indexers on The Graph Network, subgraphs must: - -- Index a [supported network](/supported-networks/) -- Får inte använda någon av följande egenskaper: - - ipfs.cat & ipfs.map - - Icke dödliga fel - - Ympning +To be supported by Indexers on The Graph Network, Subgraphs must index a [supported network](/supported-networks/). For a full list of supported and unsupported features, check out the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) repo. ## Initialize Your Subgraph -Once your subgraph has been created in Subgraph Studio, you can initialize its code through the CLI using this command: +Once your Subgraph has been created in Subgraph Studio, you can initialize its code through the CLI using this command: ```bash graph init ``` -You can find the `` value on your subgraph details page in Subgraph Studio, see image below: +You can find the `` value on your Subgraph details page in Subgraph Studio, see image below: ![Subgraph Studio - Slug](/img/doc-subgraph-slug.png) -After running `graph init`, you will be asked to input the contract address, network, and an ABI that you want to query. This will generate a new folder on your local machine with some basic code to start working on your subgraph. You can then finalize your subgraph to make sure it works as expected. +After running `graph init`, you will be asked to input the contract address, network, and an ABI that you want to query. This will generate a new folder on your local machine with some basic code to start working on your Subgraph. You can then finalize your Subgraph to make sure it works as expected. ## Auth för grafer -Before you can deploy your subgraph to Subgraph Studio, you need to log into your account within the CLI. To do this, you will need your deploy key, which you can find under your subgraph details page. +Before you can deploy your Subgraph to Subgraph Studio, you need to log into your account within the CLI. To do this, you will need your deploy key, which you can find under your Subgraph details page. Then, use the following command to authenticate from the CLI: @@ -91,11 +85,11 @@ graph auth ## Deploying a Subgraph -Once you are ready, you can deploy your subgraph to Subgraph Studio. +Once you are ready, you can deploy your Subgraph to Subgraph Studio. -> Deploying a subgraph with the CLI pushes it to the Studio, where you can test it and update the metadata. This action won't publish your subgraph to the decentralized network. +> Deploying a Subgraph with the CLI pushes it to the Studio, where you can test it and update the metadata. This action won't publish your Subgraph to the decentralized network. -Use the following CLI command to deploy your subgraph: +Use the following CLI command to deploy your Subgraph: ```bash graph deploy @@ -108,30 +102,30 @@ After running this command, the CLI will ask for a version label. ## Testing Your Subgraph -After deploying, you can test your subgraph (either in Subgraph Studio or in your own app, with the deployment query URL), deploy another version, update the metadata, and publish to [Graph Explorer](https://thegraph.com/explorer) when you are ready. +After deploying, you can test your Subgraph (either in Subgraph Studio or in your own app, with the deployment query URL), deploy another version, update the metadata, and publish to [Graph Explorer](https://thegraph.com/explorer) when you are ready. -Use Subgraph Studio to check the logs on the dashboard and look for any errors with your subgraph. +Use Subgraph Studio to check the logs on the dashboard and look for any errors with your Subgraph. ## Publish Your Subgraph -In order to publish your subgraph successfully, review [publishing a subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/). +In order to publish your Subgraph successfully, review [publishing a Subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/). ## Versioning Your Subgraph with the CLI -If you want to update your subgraph, you can do the following: +If you want to update your Subgraph, you can do the following: - You can deploy a new version to Studio using the CLI (it will only be private at this point). - Once you're happy with it, you can publish your new deployment to [Graph Explorer](https://thegraph.com/explorer). -- This action will create a new version of your subgraph that Curators can start signaling on and Indexers can index. +- This action will create a new version of your Subgraph that Curators can start signaling on and Indexers can index. -You can also update your subgraph's metadata without publishing a new version. You can update your subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates subgraph details in Explorer without having to publish a new version with a new deployment. +You can also update your Subgraph's metadata without publishing a new version. You can update your Subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates Subgraph details in Explorer without having to publish a new version with a new deployment. -> Note: There are costs associated with publishing a new version of a subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). +> Note: There are costs associated with publishing a new version of a Subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your Subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). ## Automatisk arkivering av versioner av undergrafer -Whenever you deploy a new subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your subgraph in Subgraph Studio. +Whenever you deploy a new Subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your Subgraph in Subgraph Studio. -> Note: Previous versions of non-published subgraphs deployed to Studio will be automatically archived. +> Note: Previous versions of non-published Subgraphs deployed to Studio will be automatically archived. ![Subgraph Studio - Unarchive](/img/Unarchive.png) From 0eb9eb4c08d0e8f80ef6c0ece5b2920ff34f6fcd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:23:25 -0500 Subject: [PATCH 0860/1789] New translations using-subgraph-studio.mdx (Turkish) --- .../deploying/using-subgraph-studio.mdx | 66 +++++++++---------- 1 file changed, 30 insertions(+), 36 deletions(-) diff --git a/website/src/pages/tr/subgraphs/developing/deploying/using-subgraph-studio.mdx b/website/src/pages/tr/subgraphs/developing/deploying/using-subgraph-studio.mdx index d7aaee820f01..a4e8ca41d951 100644 --- a/website/src/pages/tr/subgraphs/developing/deploying/using-subgraph-studio.mdx +++ b/website/src/pages/tr/subgraphs/developing/deploying/using-subgraph-studio.mdx @@ -2,23 +2,23 @@ title: Deploying Using Subgraph Studio --- -Subgraph'inizi Subgraph Studio'da dağıtma adımlarını öğrenin. +Learn how to deploy your Subgraph to Subgraph Studio. -> Note: When you deploy a subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a subgraph, you're publishing it onchain. +> Note: When you deploy a Subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a Subgraph, you're publishing it onchain. ## Subgraph Studio'ya Genel Bakış [Subgraph Studio](https://thegraph.com/studio/)'da aşağıdakileri yapabilirsiniz: -- Oluşturmuş olduğunuz sugraph'lerin listesini görüntülemek -- Belirli bir subgraph'i yönetmek, subgraph'in detaylarını görmek ve durumunu görüntülemek -- Belirli subgraph'ler için API anahtarlarınızı oluşturmak ve yönetmek +- View a list of Subgraphs you've created +- Manage, view details, and visualize the status of a specific Subgraph +- Create and manage your API keys for specific Subgraphs - API anahtarlarınızı belirli alanlara sınırlamak ve yalnızca belirli Endeksleyicilerin bu anahtarlarla sorgulama yapmasına izin vermek -- Subgraph'inizi oluşturmak -- Subgraph'inizi The Graph CLI'yi kullanarak dağıtmak -- Subgraph'inizi playground ortamında test etmek -- Geliştirme sorgu URL'sini kullanarak subgraph’inizi hazırlama ortamına entegre etmek -- Subgraph'inizi The Graph Ağında yayımlamak +- Create your Subgraph +- Deploy your Subgraph using The Graph CLI +- Test your Subgraph in the playground environment +- Integrate your Subgraph in staging using the development query URL +- Publish your Subgraph to The Graph Network - Faturalarınızı yönetmek ## The Graph CLI'yi Yükleme @@ -44,10 +44,10 @@ npm install -g @graphprotocol/graph-cli 1. [Subgraph Studio](https://thegraph.com/studio/)'yu açın. 2. Giriş yapmak için cüzdanınızı bağlayın. - Cüzdan bağlamak için MetaMask, Conbase Wallet, WalletConnect ya da Safe kullanabilirsiniz. -3. Giriş yaptıktan sonra, benzersiz yayına alma anahtarınız subgraph ayrıntıları sayfasında görünecektir. - - Dağıtma anahtarınız subgraph'lerinizi yayımlamanızı veya API anahtarlarınızı ve faturanızı yönetmenizi sağlar. Dağıtma anahtarınız benzersizdir; ancak anahtarınızın ele geçirildiğini düşünüyorsanız bu anahtarı yeniden yaratabilirsiniz. +3. After you sign in, your unique deploy key will be displayed on your Subgraph details page. + - The deploy key allows you to publish your Subgraphs or manage your API keys and billing. It is unique but can be regenerated if you think it has been compromised. -> Önemli not: Subgraph'leri sorgulamak için bir API anahtarına sahip olmanız gerekmektedir +> Important: You need an API key to query Subgraphs ### Subgraph Stüdyo'da Subgraph Nasıl Oluşturulur @@ -57,31 +57,25 @@ npm install -g @graphprotocol/graph-cli ### The Graph Ağı ile Subgraph Uyumluluğu -Subgraph'lerin Graph Ağı Endeksleyicileri tarafından desteklenebilmesi için şu gereklilikleri karşılaması gerekir: - -- Index a [supported network](/supported-networks/) -- Aşağıdaki özelliklerden hiçbirini kullanmamalı: - - ipfs.cat & ipfs.map - - Ölümcül Olmayan Hatalar - - Aşılama +To be supported by Indexers on The Graph Network, Subgraphs must index a [supported network](/supported-networks/). For a full list of supported and unsupported features, check out the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) repo. ## Subgraph'inizi İlklendirme -Subgraph’iniz Subgraph Studio’da oluşturulduktan sonra, aşağıdaki komutla CLI üzerinden subgraph kodunu ilklendirebilirsiniz: +Once your Subgraph has been created in Subgraph Studio, you can initialize its code through the CLI using this command: ```bash graph init ``` -`` değerini Subgraph Studio’daki subgraph ayrıntı sayfanızda bulabilirsiniz; aşağıdaki resme bakın: +You can find the `` value on your Subgraph details page in Subgraph Studio, see image below: ![Subgraph Studio - Slug](/img/doc-subgraph-slug.png) -`graph init` komutunu çalıştırdıktan sonra sorgulamak istediğiniz kontrat adresini, ağı ve ABI’yi girmeniz istenecektir. Bu komut, yerel makinenizde subgraph’inizle çalışmaya başlamanız için bazı temel kodları içeren yeni bir klasör oluşturacaktır. Sonrasında subgraph'inizi işlevselliğini test ederek nihayetlendirebilirsiniz. +After running `graph init`, you will be asked to input the contract address, network, and an ABI that you want to query. This will generate a new folder on your local machine with some basic code to start working on your Subgraph. You can then finalize your Subgraph to make sure it works as expected. ## Graph Auth -Subgraph’inizi Subgraph Studio’da yayına alabilmek için önce CLI üzerinden hesabınıza giriş yapmanız gerekmektedir. Bunun için, subgraph ayrıntıları sayfanızda bulabileceğiniz yayına alma anahtarınıza ihtiyacınız olacak. +Before you can deploy your Subgraph to Subgraph Studio, you need to log into your account within the CLI. To do this, you will need your deploy key, which you can find under your Subgraph details page. CLI üzerinden kimlik doğrulaması yapmak için aşağıdaki komutu kullanın: @@ -91,11 +85,11 @@ graph auth ## Bir Subgraph’i Dağıtma -Hazır olduğunuzda subgraph’inizi Subgraph Studio’da dağıtabilirsiniz. +Once you are ready, you can deploy your Subgraph to Subgraph Studio. -> CLI ile bir subgraph dağıtmak, onu Studio’ya iletir; burada subgraph'i test edip meta verilerini güncelleyebilirsiniz. Bu işlem, subgraph’inizi merkeziyetsiz ağda yayımlamaz. +> Deploying a Subgraph with the CLI pushes it to the Studio, where you can test it and update the metadata. This action won't publish your Subgraph to the decentralized network. -Subgraph’inizi dağıtmak için aşağıdaki CLI komutunu kullanın: +Use the following CLI command to deploy your Subgraph: ```bash graph deploy @@ -108,30 +102,30 @@ Bu komutu çalıştırdıktan sonra CLI sizden bir sürüm etiketi isteyecektir. ## Subgraph’inizi Test Etme -Yayına aldıktan sonra, subgraph’inizi (Subgraph Studio’da veya sorgu URL’si ile kendi uygulamanızda) test edebilir, yeni bir sürüm yayına alabilir, meta verileri güncelleyebilir ve hazır olduğunuzda [Graph Gezgini](https://thegraph.com/explorer)'nde yayımlayabilirsiniz. +After deploying, you can test your Subgraph (either in Subgraph Studio or in your own app, with the deployment query URL), deploy another version, update the metadata, and publish to [Graph Explorer](https://thegraph.com/explorer) when you are ready. -Subgraph Studio’da günlükleri kontrol ederek subgraph’inizle ilgili hataları görebilirsiniz. +Use Subgraph Studio to check the logs on the dashboard and look for any errors with your Subgraph. ## Subgraph’inizi Yayımlama -In order to publish your subgraph successfully, review [publishing a subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/). +In order to publish your Subgraph successfully, review [publishing a Subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/). ## CLI ile Subgraph’inizi Sürümleme -Subgraph’inizi güncellemek isterseniz, aşağıdaki adımları izleyebilirsiniz: +If you want to update your Subgraph, you can do the following: - CLI kullanarak Studio’da yeni bir sürüm dağıtabilirsiniz (bu sürüm yalnızca özel olarak kalacaktır). - Memnun kaldığınızda, yeni dağıtımınızı [Graph Gezgini](https://thegraph.com/explorer)'nde yayımlayabilirsiniz. -- Bu işlem, küratörlerin sinyal vermeye başlayabileceği ve Endeksleyicilerin endeksleyebileceği, subgraph'inizin yeni bir sürümünü oluşturur. +- This action will create a new version of your Subgraph that Curators can start signaling on and Indexers can index. -You can also update your subgraph's metadata without publishing a new version. You can update your subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates subgraph details in Explorer without having to publish a new version with a new deployment. +You can also update your Subgraph's metadata without publishing a new version. You can update your Subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates Subgraph details in Explorer without having to publish a new version with a new deployment. -> Note: There are costs associated with publishing a new version of a subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). +> Note: There are costs associated with publishing a new version of a Subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your Subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). ## Subgraph Sürümlerinin Otomatik Arşivlenmesi -Subgraph Studio’da yeni bir subgraph sürümü yayına aldığınızda, önceki sürüm arşivlenecektir. Arşivlenen sürümler endekslenmez/senkronize edilmez ve bu nedenle sorgulanamaz. Subgraph’inizin arşivlenen bir sürümünü Subgraph Studio'da arşivden çıkarabilirsiniz. +Whenever you deploy a new Subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your Subgraph in Subgraph Studio. -> Not: Studio’da yayına alınan ancak yayımlanmamış subgraph'lerin önceki sürümlerinin otomatik olarak arşivlenecektir. +> Note: Previous versions of non-published Subgraphs deployed to Studio will be automatically archived. ![Subgraph Studio - Arşivden Çıkarma](/img/Unarchive.png) From 70769f778c93c95e9d5447e75159a707bb2d1b4b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:23:26 -0500 Subject: [PATCH 0861/1789] New translations using-subgraph-studio.mdx (Ukrainian) --- .../deploying/using-subgraph-studio.mdx | 66 +++++++++---------- 1 file changed, 30 insertions(+), 36 deletions(-) diff --git a/website/src/pages/uk/subgraphs/developing/deploying/using-subgraph-studio.mdx b/website/src/pages/uk/subgraphs/developing/deploying/using-subgraph-studio.mdx index db3f790fdfe6..2b4c1c11efa0 100644 --- a/website/src/pages/uk/subgraphs/developing/deploying/using-subgraph-studio.mdx +++ b/website/src/pages/uk/subgraphs/developing/deploying/using-subgraph-studio.mdx @@ -2,23 +2,23 @@ title: Deploying Using Subgraph Studio --- -Learn how to deploy your subgraph to Subgraph Studio. +Learn how to deploy your Subgraph to Subgraph Studio. -> Note: When you deploy a subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a subgraph, you're publishing it onchain. +> Note: When you deploy a Subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a Subgraph, you're publishing it onchain. ## Subgraph Studio Overview In [Subgraph Studio](https://thegraph.com/studio/), you can do the following: -- View a list of subgraphs you've created -- Manage, view details, and visualize the status of a specific subgraph -- Create and manage your API keys for specific subgraphs +- View a list of Subgraphs you've created +- Manage, view details, and visualize the status of a specific Subgraph +- Create and manage your API keys for specific Subgraphs - Restrict your API keys to specific domains and allow only certain Indexers to query with them -- Create your subgraph -- Deploy your subgraph using The Graph CLI -- Test your subgraph in the playground environment -- Integrate your subgraph in staging using the development query URL -- Publish your subgraph to The Graph Network +- Create your Subgraph +- Deploy your Subgraph using The Graph CLI +- Test your Subgraph in the playground environment +- Integrate your Subgraph in staging using the development query URL +- Publish your Subgraph to The Graph Network - Manage your billing ## Install The Graph CLI @@ -44,10 +44,10 @@ npm install -g @graphprotocol/graph-cli 1. Open [Subgraph Studio](https://thegraph.com/studio/). 2. Connect your wallet to sign in. - You can do this via MetaMask, Coinbase Wallet, WalletConnect, or Safe. -3. After you sign in, your unique deploy key will be displayed on your subgraph details page. - - The deploy key allows you to publish your subgraphs or manage your API keys and billing. It is unique but can be regenerated if you think it has been compromised. +3. After you sign in, your unique deploy key will be displayed on your Subgraph details page. + - The deploy key allows you to publish your Subgraphs or manage your API keys and billing. It is unique but can be regenerated if you think it has been compromised. -> Important: You need an API key to query subgraphs +> Important: You need an API key to query Subgraphs ### How to Create a Subgraph in Subgraph Studio @@ -57,31 +57,25 @@ npm install -g @graphprotocol/graph-cli ### Subgraph Compatibility with The Graph Network -In order to be supported by Indexers on The Graph Network, subgraphs must: - -- Index a [supported network](/supported-networks/) -- Must not use any of the following features: - - ipfs.cat & ipfs.map - - Non-fatal errors - - Grafting +To be supported by Indexers on The Graph Network, Subgraphs must index a [supported network](/supported-networks/). For a full list of supported and unsupported features, check out the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) repo. ## Initialize Your Subgraph -Once your subgraph has been created in Subgraph Studio, you can initialize its code through the CLI using this command: +Once your Subgraph has been created in Subgraph Studio, you can initialize its code through the CLI using this command: ```bash graph init ``` -You can find the `` value on your subgraph details page in Subgraph Studio, see image below: +You can find the `` value on your Subgraph details page in Subgraph Studio, see image below: ![Subgraph Studio - Slug](/img/doc-subgraph-slug.png) -After running `graph init`, you will be asked to input the contract address, network, and an ABI that you want to query. This will generate a new folder on your local machine with some basic code to start working on your subgraph. You can then finalize your subgraph to make sure it works as expected. +After running `graph init`, you will be asked to input the contract address, network, and an ABI that you want to query. This will generate a new folder on your local machine with some basic code to start working on your Subgraph. You can then finalize your Subgraph to make sure it works as expected. ## Graph Auth -Before you can deploy your subgraph to Subgraph Studio, you need to log into your account within the CLI. To do this, you will need your deploy key, which you can find under your subgraph details page. +Before you can deploy your Subgraph to Subgraph Studio, you need to log into your account within the CLI. To do this, you will need your deploy key, which you can find under your Subgraph details page. Then, use the following command to authenticate from the CLI: @@ -91,11 +85,11 @@ graph auth ## Deploying a Subgraph -Once you are ready, you can deploy your subgraph to Subgraph Studio. +Once you are ready, you can deploy your Subgraph to Subgraph Studio. -> Deploying a subgraph with the CLI pushes it to the Studio, where you can test it and update the metadata. This action won't publish your subgraph to the decentralized network. +> Deploying a Subgraph with the CLI pushes it to the Studio, where you can test it and update the metadata. This action won't publish your Subgraph to the decentralized network. -Use the following CLI command to deploy your subgraph: +Use the following CLI command to deploy your Subgraph: ```bash graph deploy @@ -108,30 +102,30 @@ After running this command, the CLI will ask for a version label. ## Testing Your Subgraph -After deploying, you can test your subgraph (either in Subgraph Studio or in your own app, with the deployment query URL), deploy another version, update the metadata, and publish to [Graph Explorer](https://thegraph.com/explorer) when you are ready. +After deploying, you can test your Subgraph (either in Subgraph Studio or in your own app, with the deployment query URL), deploy another version, update the metadata, and publish to [Graph Explorer](https://thegraph.com/explorer) when you are ready. -Use Subgraph Studio to check the logs on the dashboard and look for any errors with your subgraph. +Use Subgraph Studio to check the logs on the dashboard and look for any errors with your Subgraph. ## Publish Your Subgraph -In order to publish your subgraph successfully, review [publishing a subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/). +In order to publish your Subgraph successfully, review [publishing a Subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/). ## Versioning Your Subgraph with the CLI -If you want to update your subgraph, you can do the following: +If you want to update your Subgraph, you can do the following: - You can deploy a new version to Studio using the CLI (it will only be private at this point). - Once you're happy with it, you can publish your new deployment to [Graph Explorer](https://thegraph.com/explorer). -- This action will create a new version of your subgraph that Curators can start signaling on and Indexers can index. +- This action will create a new version of your Subgraph that Curators can start signaling on and Indexers can index. -You can also update your subgraph's metadata without publishing a new version. You can update your subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates subgraph details in Explorer without having to publish a new version with a new deployment. +You can also update your Subgraph's metadata without publishing a new version. You can update your Subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates Subgraph details in Explorer without having to publish a new version with a new deployment. -> Note: There are costs associated with publishing a new version of a subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). +> Note: There are costs associated with publishing a new version of a Subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your Subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). ## Automatic Archiving of Subgraph Versions -Whenever you deploy a new subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your subgraph in Subgraph Studio. +Whenever you deploy a new Subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your Subgraph in Subgraph Studio. -> Note: Previous versions of non-published subgraphs deployed to Studio will be automatically archived. +> Note: Previous versions of non-published Subgraphs deployed to Studio will be automatically archived. ![Subgraph Studio - Unarchive](/img/Unarchive.png) From 59534e3c9e6288f0da7441cf19acbf719ab19c7b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:23:28 -0500 Subject: [PATCH 0862/1789] New translations using-subgraph-studio.mdx (Chinese Simplified) --- .../deploying/using-subgraph-studio.mdx | 66 +++++++++---------- 1 file changed, 30 insertions(+), 36 deletions(-) diff --git a/website/src/pages/zh/subgraphs/developing/deploying/using-subgraph-studio.mdx b/website/src/pages/zh/subgraphs/developing/deploying/using-subgraph-studio.mdx index 0e20b7f2a2a0..f2edb6992fe0 100644 --- a/website/src/pages/zh/subgraphs/developing/deploying/using-subgraph-studio.mdx +++ b/website/src/pages/zh/subgraphs/developing/deploying/using-subgraph-studio.mdx @@ -2,23 +2,23 @@ title: Deploying Using Subgraph Studio --- -Learn how to deploy your subgraph to Subgraph Studio. +Learn how to deploy your Subgraph to Subgraph Studio. -> Note: When you deploy a subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a subgraph, you're publishing it onchain. +> Note: When you deploy a Subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a Subgraph, you're publishing it onchain. ## Subgraph Studio Overview In [Subgraph Studio](https://thegraph.com/studio/), you can do the following: -- View a list of subgraphs you've created -- Manage, view details, and visualize the status of a specific subgraph -- 为特定子图创建和管理 API 密钥 +- View a list of Subgraphs you've created +- Manage, view details, and visualize the status of a specific Subgraph +- Create and manage your API keys for specific Subgraphs - Restrict your API keys to specific domains and allow only certain Indexers to query with them -- Create your subgraph -- Deploy your subgraph using The Graph CLI -- Test your subgraph in the playground environment -- Integrate your subgraph in staging using the development query URL -- Publish your subgraph to The Graph Network +- Create your Subgraph +- Deploy your Subgraph using The Graph CLI +- Test your Subgraph in the playground environment +- Integrate your Subgraph in staging using the development query URL +- Publish your Subgraph to The Graph Network - Manage your billing ## Install The Graph CLI @@ -44,10 +44,10 @@ npm install -g @graphprotocol/graph-cli 1. Open [Subgraph Studio](https://thegraph.com/studio/). 2. Connect your wallet to sign in. - You can do this via MetaMask, Coinbase Wallet, WalletConnect, or Safe. -3. After you sign in, your unique deploy key will be displayed on your subgraph details page. - - The deploy key allows you to publish your subgraphs or manage your API keys and billing. It is unique but can be regenerated if you think it has been compromised. +3. After you sign in, your unique deploy key will be displayed on your Subgraph details page. + - The deploy key allows you to publish your Subgraphs or manage your API keys and billing. It is unique but can be regenerated if you think it has been compromised. -> Important: You need an API key to query subgraphs +> Important: You need an API key to query Subgraphs ### 如何在子图工作室中创建子图 @@ -57,31 +57,25 @@ npm install -g @graphprotocol/graph-cli ### 子图与图形网络的兼容性 -In order to be supported by Indexers on The Graph Network, subgraphs must: - -- Index a [supported network](/supported-networks/) -- 不得使用以下任何功能: - - ipfs.cat & ipfs.map - - 非致命错误 - - 嫁接 +To be supported by Indexers on The Graph Network, Subgraphs must index a [supported network](/supported-networks/). For a full list of supported and unsupported features, check out the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) repo. ## Initialize Your Subgraph -Once your subgraph has been created in Subgraph Studio, you can initialize its code through the CLI using this command: +Once your Subgraph has been created in Subgraph Studio, you can initialize its code through the CLI using this command: ```bash graph init ``` -You can find the `` value on your subgraph details page in Subgraph Studio, see image below: +You can find the `` value on your Subgraph details page in Subgraph Studio, see image below: ![Subgraph Studio - Slug](/img/doc-subgraph-slug.png) -After running `graph init`, you will be asked to input the contract address, network, and an ABI that you want to query. This will generate a new folder on your local machine with some basic code to start working on your subgraph. You can then finalize your subgraph to make sure it works as expected. +After running `graph init`, you will be asked to input the contract address, network, and an ABI that you want to query. This will generate a new folder on your local machine with some basic code to start working on your Subgraph. You can then finalize your Subgraph to make sure it works as expected. ## Graph 认证 -Before you can deploy your subgraph to Subgraph Studio, you need to log into your account within the CLI. To do this, you will need your deploy key, which you can find under your subgraph details page. +Before you can deploy your Subgraph to Subgraph Studio, you need to log into your account within the CLI. To do this, you will need your deploy key, which you can find under your Subgraph details page. Then, use the following command to authenticate from the CLI: @@ -91,11 +85,11 @@ graph auth ## Deploying a Subgraph -Once you are ready, you can deploy your subgraph to Subgraph Studio. +Once you are ready, you can deploy your Subgraph to Subgraph Studio. -> Deploying a subgraph with the CLI pushes it to the Studio, where you can test it and update the metadata. This action won't publish your subgraph to the decentralized network. +> Deploying a Subgraph with the CLI pushes it to the Studio, where you can test it and update the metadata. This action won't publish your Subgraph to the decentralized network. -Use the following CLI command to deploy your subgraph: +Use the following CLI command to deploy your Subgraph: ```bash graph deploy @@ -108,30 +102,30 @@ After running this command, the CLI will ask for a version label. ## Testing Your Subgraph -After deploying, you can test your subgraph (either in Subgraph Studio or in your own app, with the deployment query URL), deploy another version, update the metadata, and publish to [Graph Explorer](https://thegraph.com/explorer) when you are ready. +After deploying, you can test your Subgraph (either in Subgraph Studio or in your own app, with the deployment query URL), deploy another version, update the metadata, and publish to [Graph Explorer](https://thegraph.com/explorer) when you are ready. -Use Subgraph Studio to check the logs on the dashboard and look for any errors with your subgraph. +Use Subgraph Studio to check the logs on the dashboard and look for any errors with your Subgraph. ## Publish Your Subgraph -In order to publish your subgraph successfully, review [publishing a subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/). +In order to publish your Subgraph successfully, review [publishing a Subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/). ## Versioning Your Subgraph with the CLI -If you want to update your subgraph, you can do the following: +If you want to update your Subgraph, you can do the following: - You can deploy a new version to Studio using the CLI (it will only be private at this point). - Once you're happy with it, you can publish your new deployment to [Graph Explorer](https://thegraph.com/explorer). -- This action will create a new version of your subgraph that Curators can start signaling on and Indexers can index. +- This action will create a new version of your Subgraph that Curators can start signaling on and Indexers can index. -You can also update your subgraph's metadata without publishing a new version. You can update your subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates subgraph details in Explorer without having to publish a new version with a new deployment. +You can also update your Subgraph's metadata without publishing a new version. You can update your Subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates Subgraph details in Explorer without having to publish a new version with a new deployment. -> Note: There are costs associated with publishing a new version of a subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). +> Note: There are costs associated with publishing a new version of a Subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your Subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). ## 子图版本的自动归档 -Whenever you deploy a new subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your subgraph in Subgraph Studio. +Whenever you deploy a new Subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your Subgraph in Subgraph Studio. -> Note: Previous versions of non-published subgraphs deployed to Studio will be automatically archived. +> Note: Previous versions of non-published Subgraphs deployed to Studio will be automatically archived. ![Subgraph Studio - Unarchive](/img/Unarchive.png) From 33cbc52c5783a43dcc4a1c03a273b72d8276dabc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:23:29 -0500 Subject: [PATCH 0863/1789] New translations using-subgraph-studio.mdx (Urdu (Pakistan)) --- .../deploying/using-subgraph-studio.mdx | 66 +++++++++---------- 1 file changed, 30 insertions(+), 36 deletions(-) diff --git a/website/src/pages/ur/subgraphs/developing/deploying/using-subgraph-studio.mdx b/website/src/pages/ur/subgraphs/developing/deploying/using-subgraph-studio.mdx index 2d16e87e3f7a..f41499fd9c51 100644 --- a/website/src/pages/ur/subgraphs/developing/deploying/using-subgraph-studio.mdx +++ b/website/src/pages/ur/subgraphs/developing/deploying/using-subgraph-studio.mdx @@ -2,23 +2,23 @@ title: Deploying Using Subgraph Studio --- -Learn how to deploy your subgraph to Subgraph Studio. +Learn how to deploy your Subgraph to Subgraph Studio. -> Note: When you deploy a subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a subgraph, you're publishing it onchain. +> Note: When you deploy a Subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a Subgraph, you're publishing it onchain. ## Subgraph Studio Overview In [Subgraph Studio](https://thegraph.com/studio/), you can do the following: -- View a list of subgraphs you've created -- Manage, view details, and visualize the status of a specific subgraph -- مخصوص سب گرافس کے لۓ API کیز بنائیں اور ان کا انتظام کریں +- View a list of Subgraphs you've created +- Manage, view details, and visualize the status of a specific Subgraph +- Create and manage your API keys for specific Subgraphs - Restrict your API keys to specific domains and allow only certain Indexers to query with them -- Create your subgraph -- Deploy your subgraph using The Graph CLI -- Test your subgraph in the playground environment -- Integrate your subgraph in staging using the development query URL -- Publish your subgraph to The Graph Network +- Create your Subgraph +- Deploy your Subgraph using The Graph CLI +- Test your Subgraph in the playground environment +- Integrate your Subgraph in staging using the development query URL +- Publish your Subgraph to The Graph Network - Manage your billing ## Install The Graph CLI @@ -44,10 +44,10 @@ npm install -g @graphprotocol/graph-cli 1. Open [Subgraph Studio](https://thegraph.com/studio/). 2. Connect your wallet to sign in. - You can do this via MetaMask, Coinbase Wallet, WalletConnect, or Safe. -3. After you sign in, your unique deploy key will be displayed on your subgraph details page. - - The deploy key allows you to publish your subgraphs or manage your API keys and billing. It is unique but can be regenerated if you think it has been compromised. +3. After you sign in, your unique deploy key will be displayed on your Subgraph details page. + - The deploy key allows you to publish your Subgraphs or manage your API keys and billing. It is unique but can be regenerated if you think it has been compromised. -> Important: You need an API key to query subgraphs +> Important: You need an API key to query Subgraphs ### How to Create a Subgraph in Subgraph Studio @@ -57,31 +57,25 @@ npm install -g @graphprotocol/graph-cli ### گراف نیٹ ورک کے ساتھ سب گراف مطابقت -In order to be supported by Indexers on The Graph Network, subgraphs must: - -- Index a [supported network](/supported-networks/) -- درج ذیل خصوصیات میں سے کوئی بھی استعمال نہیں کرنا چاہیے: - - ipfs.cat & ipfs.map - - Non-fatal errors - - گرافٹنگ +To be supported by Indexers on The Graph Network, Subgraphs must index a [supported network](/supported-networks/). For a full list of supported and unsupported features, check out the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) repo. ## Initialize Your Subgraph -Once your subgraph has been created in Subgraph Studio, you can initialize its code through the CLI using this command: +Once your Subgraph has been created in Subgraph Studio, you can initialize its code through the CLI using this command: ```bash graph init ``` -You can find the `` value on your subgraph details page in Subgraph Studio, see image below: +You can find the `` value on your Subgraph details page in Subgraph Studio, see image below: ![Subgraph Studio - Slug](/img/doc-subgraph-slug.png) -After running `graph init`, you will be asked to input the contract address, network, and an ABI that you want to query. This will generate a new folder on your local machine with some basic code to start working on your subgraph. You can then finalize your subgraph to make sure it works as expected. +After running `graph init`, you will be asked to input the contract address, network, and an ABI that you want to query. This will generate a new folder on your local machine with some basic code to start working on your Subgraph. You can then finalize your Subgraph to make sure it works as expected. ## Graph Auth -Before you can deploy your subgraph to Subgraph Studio, you need to log into your account within the CLI. To do this, you will need your deploy key, which you can find under your subgraph details page. +Before you can deploy your Subgraph to Subgraph Studio, you need to log into your account within the CLI. To do this, you will need your deploy key, which you can find under your Subgraph details page. Then, use the following command to authenticate from the CLI: @@ -91,11 +85,11 @@ graph auth ## Deploying a Subgraph -Once you are ready, you can deploy your subgraph to Subgraph Studio. +Once you are ready, you can deploy your Subgraph to Subgraph Studio. -> Deploying a subgraph with the CLI pushes it to the Studio, where you can test it and update the metadata. This action won't publish your subgraph to the decentralized network. +> Deploying a Subgraph with the CLI pushes it to the Studio, where you can test it and update the metadata. This action won't publish your Subgraph to the decentralized network. -Use the following CLI command to deploy your subgraph: +Use the following CLI command to deploy your Subgraph: ```bash graph deploy @@ -108,30 +102,30 @@ After running this command, the CLI will ask for a version label. ## Testing Your Subgraph -After deploying, you can test your subgraph (either in Subgraph Studio or in your own app, with the deployment query URL), deploy another version, update the metadata, and publish to [Graph Explorer](https://thegraph.com/explorer) when you are ready. +After deploying, you can test your Subgraph (either in Subgraph Studio or in your own app, with the deployment query URL), deploy another version, update the metadata, and publish to [Graph Explorer](https://thegraph.com/explorer) when you are ready. -Use Subgraph Studio to check the logs on the dashboard and look for any errors with your subgraph. +Use Subgraph Studio to check the logs on the dashboard and look for any errors with your Subgraph. ## Publish Your Subgraph -In order to publish your subgraph successfully, review [publishing a subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/). +In order to publish your Subgraph successfully, review [publishing a Subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/). ## Versioning Your Subgraph with the CLI -If you want to update your subgraph, you can do the following: +If you want to update your Subgraph, you can do the following: - You can deploy a new version to Studio using the CLI (it will only be private at this point). - Once you're happy with it, you can publish your new deployment to [Graph Explorer](https://thegraph.com/explorer). -- This action will create a new version of your subgraph that Curators can start signaling on and Indexers can index. +- This action will create a new version of your Subgraph that Curators can start signaling on and Indexers can index. -You can also update your subgraph's metadata without publishing a new version. You can update your subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates subgraph details in Explorer without having to publish a new version with a new deployment. +You can also update your Subgraph's metadata without publishing a new version. You can update your Subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates Subgraph details in Explorer without having to publish a new version with a new deployment. -> Note: There are costs associated with publishing a new version of a subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). +> Note: There are costs associated with publishing a new version of a Subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your Subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). ## سب گراف ورژن کی خودکار آرکائیونگ -Whenever you deploy a new subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your subgraph in Subgraph Studio. +Whenever you deploy a new Subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your Subgraph in Subgraph Studio. -> Note: Previous versions of non-published subgraphs deployed to Studio will be automatically archived. +> Note: Previous versions of non-published Subgraphs deployed to Studio will be automatically archived. ![Subgraph Studio - Unarchive](/img/Unarchive.png) From d365c3d357416c73210e2bfa5ee74bf2a1e38274 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:23:30 -0500 Subject: [PATCH 0864/1789] New translations using-subgraph-studio.mdx (Vietnamese) --- .../deploying/using-subgraph-studio.mdx | 66 +++++++++---------- 1 file changed, 30 insertions(+), 36 deletions(-) diff --git a/website/src/pages/vi/subgraphs/developing/deploying/using-subgraph-studio.mdx b/website/src/pages/vi/subgraphs/developing/deploying/using-subgraph-studio.mdx index 98602d583746..8e89b2999d96 100644 --- a/website/src/pages/vi/subgraphs/developing/deploying/using-subgraph-studio.mdx +++ b/website/src/pages/vi/subgraphs/developing/deploying/using-subgraph-studio.mdx @@ -2,23 +2,23 @@ title: Deploying Using Subgraph Studio --- -Learn how to deploy your subgraph to Subgraph Studio. +Learn how to deploy your Subgraph to Subgraph Studio. -> Note: When you deploy a subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a subgraph, you're publishing it onchain. +> Note: When you deploy a Subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a Subgraph, you're publishing it onchain. ## Subgraph Studio Overview In [Subgraph Studio](https://thegraph.com/studio/), you can do the following: -- View a list of subgraphs you've created -- Manage, view details, and visualize the status of a specific subgraph -- Create and manage your API keys for specific subgraphs +- View a list of Subgraphs you've created +- Manage, view details, and visualize the status of a specific Subgraph +- Create and manage your API keys for specific Subgraphs - Restrict your API keys to specific domains and allow only certain Indexers to query with them -- Create your subgraph -- Deploy your subgraph using The Graph CLI -- Test your subgraph in the playground environment -- Integrate your subgraph in staging using the development query URL -- Publish your subgraph to The Graph Network +- Create your Subgraph +- Deploy your Subgraph using The Graph CLI +- Test your Subgraph in the playground environment +- Integrate your Subgraph in staging using the development query URL +- Publish your Subgraph to The Graph Network - Manage your billing ## Install The Graph CLI @@ -44,10 +44,10 @@ npm install -g @graphprotocol/graph-cli 1. Open [Subgraph Studio](https://thegraph.com/studio/). 2. Connect your wallet to sign in. - You can do this via MetaMask, Coinbase Wallet, WalletConnect, or Safe. -3. After you sign in, your unique deploy key will be displayed on your subgraph details page. - - The deploy key allows you to publish your subgraphs or manage your API keys and billing. It is unique but can be regenerated if you think it has been compromised. +3. After you sign in, your unique deploy key will be displayed on your Subgraph details page. + - The deploy key allows you to publish your Subgraphs or manage your API keys and billing. It is unique but can be regenerated if you think it has been compromised. -> Important: You need an API key to query subgraphs +> Important: You need an API key to query Subgraphs ### How to Create a Subgraph in Subgraph Studio @@ -57,31 +57,25 @@ npm install -g @graphprotocol/graph-cli ### Subgraph Compatibility with The Graph Network -In order to be supported by Indexers on The Graph Network, subgraphs must: - -- Index a [supported network](/supported-networks/) -- Must not use any of the following features: - - ipfs.cat & ipfs.map - - Lỗi không nghiêm trọng - - Ghép +To be supported by Indexers on The Graph Network, Subgraphs must index a [supported network](/supported-networks/). For a full list of supported and unsupported features, check out the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) repo. ## Initialize Your Subgraph -Once your subgraph has been created in Subgraph Studio, you can initialize its code through the CLI using this command: +Once your Subgraph has been created in Subgraph Studio, you can initialize its code through the CLI using this command: ```bash graph init ``` -You can find the `` value on your subgraph details page in Subgraph Studio, see image below: +You can find the `` value on your Subgraph details page in Subgraph Studio, see image below: ![Subgraph Studio - Slug](/img/doc-subgraph-slug.png) -After running `graph init`, you will be asked to input the contract address, network, and an ABI that you want to query. This will generate a new folder on your local machine with some basic code to start working on your subgraph. You can then finalize your subgraph to make sure it works as expected. +After running `graph init`, you will be asked to input the contract address, network, and an ABI that you want to query. This will generate a new folder on your local machine with some basic code to start working on your Subgraph. You can then finalize your Subgraph to make sure it works as expected. ## Graph Auth -Before you can deploy your subgraph to Subgraph Studio, you need to log into your account within the CLI. To do this, you will need your deploy key, which you can find under your subgraph details page. +Before you can deploy your Subgraph to Subgraph Studio, you need to log into your account within the CLI. To do this, you will need your deploy key, which you can find under your Subgraph details page. Then, use the following command to authenticate from the CLI: @@ -91,11 +85,11 @@ graph auth ## Deploying a Subgraph -Once you are ready, you can deploy your subgraph to Subgraph Studio. +Once you are ready, you can deploy your Subgraph to Subgraph Studio. -> Deploying a subgraph with the CLI pushes it to the Studio, where you can test it and update the metadata. This action won't publish your subgraph to the decentralized network. +> Deploying a Subgraph with the CLI pushes it to the Studio, where you can test it and update the metadata. This action won't publish your Subgraph to the decentralized network. -Use the following CLI command to deploy your subgraph: +Use the following CLI command to deploy your Subgraph: ```bash graph deploy @@ -108,30 +102,30 @@ After running this command, the CLI will ask for a version label. ## Testing Your Subgraph -After deploying, you can test your subgraph (either in Subgraph Studio or in your own app, with the deployment query URL), deploy another version, update the metadata, and publish to [Graph Explorer](https://thegraph.com/explorer) when you are ready. +After deploying, you can test your Subgraph (either in Subgraph Studio or in your own app, with the deployment query URL), deploy another version, update the metadata, and publish to [Graph Explorer](https://thegraph.com/explorer) when you are ready. -Use Subgraph Studio to check the logs on the dashboard and look for any errors with your subgraph. +Use Subgraph Studio to check the logs on the dashboard and look for any errors with your Subgraph. ## Publish Your Subgraph -In order to publish your subgraph successfully, review [publishing a subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/). +In order to publish your Subgraph successfully, review [publishing a Subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/). ## Versioning Your Subgraph with the CLI -If you want to update your subgraph, you can do the following: +If you want to update your Subgraph, you can do the following: - You can deploy a new version to Studio using the CLI (it will only be private at this point). - Once you're happy with it, you can publish your new deployment to [Graph Explorer](https://thegraph.com/explorer). -- This action will create a new version of your subgraph that Curators can start signaling on and Indexers can index. +- This action will create a new version of your Subgraph that Curators can start signaling on and Indexers can index. -You can also update your subgraph's metadata without publishing a new version. You can update your subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates subgraph details in Explorer without having to publish a new version with a new deployment. +You can also update your Subgraph's metadata without publishing a new version. You can update your Subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates Subgraph details in Explorer without having to publish a new version with a new deployment. -> Note: There are costs associated with publishing a new version of a subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). +> Note: There are costs associated with publishing a new version of a Subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your Subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). ## Automatic Archiving of Subgraph Versions -Whenever you deploy a new subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your subgraph in Subgraph Studio. +Whenever you deploy a new Subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your Subgraph in Subgraph Studio. -> Note: Previous versions of non-published subgraphs deployed to Studio will be automatically archived. +> Note: Previous versions of non-published Subgraphs deployed to Studio will be automatically archived. ![Subgraph Studio - Unarchive](/img/Unarchive.png) From a81544955b914f29b3fe722fbd210e6e82270761 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:23:31 -0500 Subject: [PATCH 0865/1789] New translations using-subgraph-studio.mdx (Marathi) --- .../deploying/using-subgraph-studio.mdx | 66 +++++++++---------- 1 file changed, 30 insertions(+), 36 deletions(-) diff --git a/website/src/pages/mr/subgraphs/developing/deploying/using-subgraph-studio.mdx b/website/src/pages/mr/subgraphs/developing/deploying/using-subgraph-studio.mdx index 4769cbc3408b..2319974d45ed 100644 --- a/website/src/pages/mr/subgraphs/developing/deploying/using-subgraph-studio.mdx +++ b/website/src/pages/mr/subgraphs/developing/deploying/using-subgraph-studio.mdx @@ -2,23 +2,23 @@ title: Deploying Using Subgraph Studio --- -Learn how to deploy your subgraph to Subgraph Studio. +Learn how to deploy your Subgraph to Subgraph Studio. -> Note: When you deploy a subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a subgraph, you're publishing it onchain. +> Note: When you deploy a Subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a Subgraph, you're publishing it onchain. ## Subgraph Studio Overview In [Subgraph Studio](https://thegraph.com/studio/), you can do the following: -- View a list of subgraphs you've created -- Manage, view details, and visualize the status of a specific subgraph -- विशिष्ट सबग्राफसाठी तुमच्या API की तयार करा आणि व्यवस्थापित करा +- View a list of Subgraphs you've created +- Manage, view details, and visualize the status of a specific Subgraph +- Create and manage your API keys for specific Subgraphs - Restrict your API keys to specific domains and allow only certain Indexers to query with them -- Create your subgraph -- Deploy your subgraph using The Graph CLI -- Test your subgraph in the playground environment -- Integrate your subgraph in staging using the development query URL -- Publish your subgraph to The Graph Network +- Create your Subgraph +- Deploy your Subgraph using The Graph CLI +- Test your Subgraph in the playground environment +- Integrate your Subgraph in staging using the development query URL +- Publish your Subgraph to The Graph Network - Manage your billing ## Install The Graph CLI @@ -44,10 +44,10 @@ npm install -g @graphprotocol/graph-cli 1. Open [Subgraph Studio](https://thegraph.com/studio/). 2. Connect your wallet to sign in. - You can do this via MetaMask, Coinbase Wallet, WalletConnect, or Safe. -3. After you sign in, your unique deploy key will be displayed on your subgraph details page. - - The deploy key allows you to publish your subgraphs or manage your API keys and billing. It is unique but can be regenerated if you think it has been compromised. +3. After you sign in, your unique deploy key will be displayed on your Subgraph details page. + - The deploy key allows you to publish your Subgraphs or manage your API keys and billing. It is unique but can be regenerated if you think it has been compromised. -> Important: You need an API key to query subgraphs +> Important: You need an API key to query Subgraphs ### How to Create a Subgraph in Subgraph Studio @@ -57,31 +57,25 @@ npm install -g @graphprotocol/graph-cli ### ग्राफ नेटवर्कसह सबग्राफ सुसंगतता -In order to be supported by Indexers on The Graph Network, subgraphs must: - -- Index a [supported network](/supported-networks/) -- खालीलपैकी कोणतीही वैशिष्ट्ये वापरू नयेत: - - ipfs.cat & ipfs.map - - गैर-घातक त्रुटी - - कलम करणे +To be supported by Indexers on The Graph Network, Subgraphs must index a [supported network](/supported-networks/). For a full list of supported and unsupported features, check out the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) repo. ## Initialize Your Subgraph -Once your subgraph has been created in Subgraph Studio, you can initialize its code through the CLI using this command: +Once your Subgraph has been created in Subgraph Studio, you can initialize its code through the CLI using this command: ```bash graph init ``` -You can find the `` value on your subgraph details page in Subgraph Studio, see image below: +You can find the `` value on your Subgraph details page in Subgraph Studio, see image below: ![Subgraph Studio - Slug](/img/doc-subgraph-slug.png) -After running `graph init`, you will be asked to input the contract address, network, and an ABI that you want to query. This will generate a new folder on your local machine with some basic code to start working on your subgraph. You can then finalize your subgraph to make sure it works as expected. +After running `graph init`, you will be asked to input the contract address, network, and an ABI that you want to query. This will generate a new folder on your local machine with some basic code to start working on your Subgraph. You can then finalize your Subgraph to make sure it works as expected. ## आलेख प्रमाणीकरण -Before you can deploy your subgraph to Subgraph Studio, you need to log into your account within the CLI. To do this, you will need your deploy key, which you can find under your subgraph details page. +Before you can deploy your Subgraph to Subgraph Studio, you need to log into your account within the CLI. To do this, you will need your deploy key, which you can find under your Subgraph details page. Then, use the following command to authenticate from the CLI: @@ -91,11 +85,11 @@ graph auth ## Deploying a Subgraph -Once you are ready, you can deploy your subgraph to Subgraph Studio. +Once you are ready, you can deploy your Subgraph to Subgraph Studio. -> Deploying a subgraph with the CLI pushes it to the Studio, where you can test it and update the metadata. This action won't publish your subgraph to the decentralized network. +> Deploying a Subgraph with the CLI pushes it to the Studio, where you can test it and update the metadata. This action won't publish your Subgraph to the decentralized network. -Use the following CLI command to deploy your subgraph: +Use the following CLI command to deploy your Subgraph: ```bash graph deploy @@ -108,30 +102,30 @@ After running this command, the CLI will ask for a version label. ## Testing Your Subgraph -After deploying, you can test your subgraph (either in Subgraph Studio or in your own app, with the deployment query URL), deploy another version, update the metadata, and publish to [Graph Explorer](https://thegraph.com/explorer) when you are ready. +After deploying, you can test your Subgraph (either in Subgraph Studio or in your own app, with the deployment query URL), deploy another version, update the metadata, and publish to [Graph Explorer](https://thegraph.com/explorer) when you are ready. -Use Subgraph Studio to check the logs on the dashboard and look for any errors with your subgraph. +Use Subgraph Studio to check the logs on the dashboard and look for any errors with your Subgraph. ## Publish Your Subgraph -In order to publish your subgraph successfully, review [publishing a subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/). +In order to publish your Subgraph successfully, review [publishing a Subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/). ## Versioning Your Subgraph with the CLI -If you want to update your subgraph, you can do the following: +If you want to update your Subgraph, you can do the following: - You can deploy a new version to Studio using the CLI (it will only be private at this point). - Once you're happy with it, you can publish your new deployment to [Graph Explorer](https://thegraph.com/explorer). -- This action will create a new version of your subgraph that Curators can start signaling on and Indexers can index. +- This action will create a new version of your Subgraph that Curators can start signaling on and Indexers can index. -You can also update your subgraph's metadata without publishing a new version. You can update your subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates subgraph details in Explorer without having to publish a new version with a new deployment. +You can also update your Subgraph's metadata without publishing a new version. You can update your Subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates Subgraph details in Explorer without having to publish a new version with a new deployment. -> Note: There are costs associated with publishing a new version of a subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). +> Note: There are costs associated with publishing a new version of a Subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your Subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). ## सबग्राफ आवृत्त्यांचे स्वयंचलित संग्रहण -Whenever you deploy a new subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your subgraph in Subgraph Studio. +Whenever you deploy a new Subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your Subgraph in Subgraph Studio. -> Note: Previous versions of non-published subgraphs deployed to Studio will be automatically archived. +> Note: Previous versions of non-published Subgraphs deployed to Studio will be automatically archived. ![Subgraph Studio - Unarchive](/img/Unarchive.png) From 1d1f83302763116debaba2aea993e3e6d2f3a63b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:23:32 -0500 Subject: [PATCH 0866/1789] New translations using-subgraph-studio.mdx (Hindi) --- .../deploying/using-subgraph-studio.mdx | 68 +++++++++---------- 1 file changed, 31 insertions(+), 37 deletions(-) diff --git a/website/src/pages/hi/subgraphs/developing/deploying/using-subgraph-studio.mdx b/website/src/pages/hi/subgraphs/developing/deploying/using-subgraph-studio.mdx index 3fa668ee3535..4ab6dece55a9 100644 --- a/website/src/pages/hi/subgraphs/developing/deploying/using-subgraph-studio.mdx +++ b/website/src/pages/hi/subgraphs/developing/deploying/using-subgraph-studio.mdx @@ -2,30 +2,30 @@ title: Deploying Using Subgraph Studio --- -अपने subgraph को Subgraph Studio में डिप्लॉय करना सीखें। +Learn how to deploy your Subgraph to Subgraph Studio. -> Note: When you deploy a subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a subgraph, you're publishing it onchain. +> Note: When you deploy a Subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a Subgraph, you're publishing it onchain. ## Subgraph Studio का अवलोकन In Subgraph Studio,आप निम्नलिखित कर सकते हैं: -- आपने बनाए गए subgraphs की सूची देखें -- एक विशेष subgraph की स्थिति को प्रबंधित करें, विवरण देखें और दृश्य रूप में प्रदर्शित करें -- विशिष्ट सबग्राफ के लिए अपनी एपीआई keys बनाएं और प्रबंधित करें +- View a list of Subgraphs you've created +- Manage, view details, and visualize the status of a specific Subgraph +- Create and manage your API keys for specific Subgraphs - अपने API कुंजी को विशेष डोमेन तक सीमित करें और केवल कुछ Indexers को उनके साथ क्वेरी करने की अनुमति दें -- अपना subgraph बनाएं -- अपने subgraph को The Graph CLI का उपयोग करके डिप्लॉय करें -- अपने 'subgraph' को 'playground' वातावरण में टेस्ट करें -- अपने स्टेजिंग में 'subgraph' को विकास क्वेरी URL का उपयोग करके एकीकृत करें -- अपने subgraph को The Graph Network पर प्रकाशित करें +- Create your Subgraph +- Deploy your Subgraph using The Graph CLI +- Test your Subgraph in the playground environment +- Integrate your Subgraph in staging using the development query URL +- Publish your Subgraph to The Graph Network - अपने बिलिंग को प्रबंधित करें ## The Graph CLI स्थापित करें Deploy करने से पहले, आपको The Graph CLI इंस्टॉल करना होगा। -आपको The Graph CLI का उपयोग करने के लिए Node.js(https://nodejs.org/) और आपकी पसंद का पैकेज मैनेजर (npm, yarn या pnpm) स्थापित होना चाहिए। सबसे हालिया (https://github.com/graphprotocol/graph-tooling/releases?q=%40graphprotocol%2Fgraph-cli&expanded=true) CLI संस्करण की जांच करें। +आपको The Graph CLI का उपयोग करने के लिए Node.js(https://nodejs.org/) और आपकी पसंद का पैकेज मैनेजर (npm, yarn या pnpm) स्थापित होना चाहिए। सबसे हालिया (https://github.com/graphprotocol/graph-tooling/releases?q=%40graphprotocol%2Fgraph-cli&expanded=true) CLI संस्करण की जांच करें। ### इंस्टॉल करें 'yarn' के साथ @@ -44,10 +44,10 @@ npm install -g @graphprotocol/graph-cli 1. खोलें [Subgraph Studio](https://thegraph.com/studio/). 2. अपने वॉलेट से साइन इन करें। - आप इसे MetaMask, Coinbase Wallet, WalletConnect, या Safe के माध्यम से कर सकते हैं। -3. साइन इन करने के बाद, आपका यूनिक डिप्लॉय की आपकी subgraph विवरण पृष्ठ पर प्रदर्शित होगा। - - Deploy key आपको अपने subgraphs को प्रकाशित करने या अपने API keys और billing को प्रबंधित करने की अनुमति देता है। यह अद्वितीय है लेकिन यदि आपको लगता है कि यह समझौता किया गया है, तो इसे पुनः उत्पन्न किया जा सकता है। +3. After you sign in, your unique deploy key will be displayed on your Subgraph details page. + - The deploy key allows you to publish your Subgraphs or manage your API keys and billing. It is unique but can be regenerated if you think it has been compromised. -> महत्वपूर्ण: आपको subgraphs को क्वेरी करने के लिए एक API कुंजी की आवश्यकता है +> Important: You need an API key to query Subgraphs ### How to Create a Subgraph in Subgraph Studio @@ -57,31 +57,25 @@ npm install -g @graphprotocol/graph-cli ### ग्राफ नेटवर्क के साथ सबग्राफ अनुकूलता -In order to be supported by Indexers on The Graph Network, subgraphs must: - -- Index a [supported network](/supported-networks/) -- निम्नलिखित सुविधाओं में से किसी का उपयोग नहीं करना चाहिए: - - ipfs.cat & ipfs.map - - गैर-घातक त्रुटियाँ - - ग्राफ्टिंग +To be supported by Indexers on The Graph Network, Subgraphs must index a [supported network](/supported-networks/). For a full list of supported and unsupported features, check out the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) repo. ## अपने Subgraph को प्रारंभ करें -एक बार जब आपका subgraph Subgraph Studio में बना दिया गया है, तो आप इस कमांड का उपयोग करके CLI के माध्यम से इसके कोड को प्रारंभ कर सकते हैं: +Once your Subgraph has been created in Subgraph Studio, you can initialize its code through the CLI using this command: ```bash graph init ``` -आप `` मान को अपने subgraph विवरण पृष्ठ पर Subgraph Studio में पा सकते हैं, नीचे दी गई छवि देखें: +You can find the `` value on your Subgraph details page in Subgraph Studio, see image below: ![Subgraph Studio - Slug](/img/doc-subgraph-slug.png) -`graph init` चलाने के बाद, आपसे संपर्क पता, नेटवर्क, और एक ABI इनपुट करने के लिए कहा जाएगा जिसे आप क्वेरी करना चाहते हैं। यह आपके स्थानीय मशीन पर एक नया फोल्डर उत्पन्न करेगा जिसमें आपके Subgraph पर काम करना शुरू करने के लिए कुछ मूल कोड होगा। आप फिर अपने Subgraph को अंतिम रूप दे सकते हैं ताकि यह सुनिश्चित किया जा सके कि यह अपेक्षित रूप से काम करता है। +After running `graph init`, you will be asked to input the contract address, network, and an ABI that you want to query. This will generate a new folder on your local machine with some basic code to start working on your Subgraph. You can then finalize your Subgraph to make sure it works as expected. ## ग्राफ प्रमाणीकरण -अपने subgraph को Subgraph Studio पर डिप्लॉय करने से पहले, आपको CLI के भीतर अपने खाते में लॉग इन करना होगा। ऐसा करने के लिए, आपको अपना deploy key चाहिए होगा, जिसे आप अपने subgraph विवरण पृष्ठ के तहत पा सकते हैं। +Before you can deploy your Subgraph to Subgraph Studio, you need to log into your account within the CLI. To do this, you will need your deploy key, which you can find under your Subgraph details page. फिर, CLI से प्रमाणित करने के लिए निम्नलिखित आदेश का उपयोग करें: @@ -91,11 +85,11 @@ graph auth ## Subgraph डिप्लॉय करना -जब आप तैयार हों, तो आप अपना subgraph को Subgraph Studio पर डिप्लॉय कर सकते हैं। +Once you are ready, you can deploy your Subgraph to Subgraph Studio. -> CLI का उपयोग करके subgraph को डिप्लॉय करना उसे Studio में पुश करता है, जहां आप इसे टेस्ट कर सकते हैं और मेटाडेटा को अपडेट कर सकते हैं। यह क्रिया आपके subgraph को विकेंद्रीकृत नेटवर्क पर प्रकाशित नहीं करेगी। +> Deploying a Subgraph with the CLI pushes it to the Studio, where you can test it and update the metadata. This action won't publish your Subgraph to the decentralized network. -निम्नलिखित CLI कमांड का उपयोग करके अपना subgraph डिप्लॉय करें: +Use the following CLI command to deploy your Subgraph: ```bash graph deploy @@ -108,30 +102,30 @@ graph deploy ## अपने Subgraph का परीक्षण करें -डिप्लॉय करने के बाद, आप अपने subgraph का परीक्षण कर सकते हैं (या तो Subgraph Studio में या अपने ऐप में, डिप्लॉयमेंट क्वेरी URL के साथ), एक और संस्करण डिप्लॉय करें, मेटाडेटा को अपडेट करें, और जब आप तैयार हों, तो Graph Explorer(https://thegraph.com/explorer) पर प्रकाशित करें। +After deploying, you can test your Subgraph (either in Subgraph Studio or in your own app, with the deployment query URL), deploy another version, update the metadata, and publish to [Graph Explorer](https://thegraph.com/explorer) when you are ready. -Subgraph Studio का उपयोग करके डैशबोर्ड पर लॉग्स की जांच करें और अपने subgraph के साथ किसी भी त्रुटियों की तलाश करें। +Use Subgraph Studio to check the logs on the dashboard and look for any errors with your Subgraph. ## अपने Subgraph को प्रकाशित करें -In order to publish your subgraph successfully, review [publishing a subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/). +In order to publish your Subgraph successfully, review [publishing a Subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/). ## अपने Subgraph को CLI के साथ संस्करण बनाना -यदि आप अपने subgraph को अपडेट करना चाहते हैं, तो आप निम्नलिखित कर सकते हैं: +If you want to update your Subgraph, you can do the following: - आप स्टूडियो में CLI का उपयोग करके एक नया संस्करण डिप्लॉय कर सकते हैं (इस समय यह केवल निजी होगा)। - एक बार जब आप इससे संतुष्ट हो जाएं, तो आप अपने नए डिप्लॉयमेंट को Graph Explorer(https://thegraph.com/explorer). पर प्रकाशित कर सकते हैं। -- यह क्रिया आपके नए संस्करण का निर्माण करेगी जिसे Curators सिग्नल करना शुरू कर सकते हैं और Indexers अनुक्रमित कर सकते हैं। +- This action will create a new version of your Subgraph that Curators can start signaling on and Indexers can index. -You can also update your subgraph's metadata without publishing a new version. You can update your subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates subgraph details in Explorer without having to publish a new version with a new deployment. +You can also update your Subgraph's metadata without publishing a new version. You can update your Subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates Subgraph details in Explorer without having to publish a new version with a new deployment. -> Note: There are costs associated with publishing a new version of a subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). +> Note: There are costs associated with publishing a new version of a Subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your Subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). ## सबग्राफ संस्करणों का स्वचालित संग्रह -जब भी आप Subgraph Studio में एक नया subgraph संस्करण डिप्लॉय करते हैं, तो पिछले संस्करण को आर्काइव कर दिया जाएगा। आर्काइव किए गए संस्करणों को इंडेक्स/सिंक नहीं किया जाएगा और इसलिए उन्हें क्वेरी नहीं किया जा सकता। आप Subgraph Studio में अपने subgraph के आर्काइव किए गए संस्करण को अनआर्काइव कर सकते हैं। +Whenever you deploy a new Subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your Subgraph in Subgraph Studio. -> नोट: स्टूडियो में डिप्लॉय किए गए गैर-प्रकाशित subgraphs के पिछले संस्करणों को स्वचालित रूप से आर्काइव किया जाएगा। +> Note: Previous versions of non-published Subgraphs deployed to Studio will be automatically archived. ![Subgraph Studio - Unarchive](/img/Unarchive.png) From 6f7a33e08f7cc34b0718af23cb448d1efeada1e1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:23:33 -0500 Subject: [PATCH 0867/1789] New translations using-subgraph-studio.mdx (Swahili) --- .../deploying/using-subgraph-studio.mdx | 131 ++++++++++++++++++ 1 file changed, 131 insertions(+) create mode 100644 website/src/pages/sw/subgraphs/developing/deploying/using-subgraph-studio.mdx diff --git a/website/src/pages/sw/subgraphs/developing/deploying/using-subgraph-studio.mdx b/website/src/pages/sw/subgraphs/developing/deploying/using-subgraph-studio.mdx new file mode 100644 index 000000000000..77d10212c770 --- /dev/null +++ b/website/src/pages/sw/subgraphs/developing/deploying/using-subgraph-studio.mdx @@ -0,0 +1,131 @@ +--- +title: Deploying Using Subgraph Studio +--- + +Learn how to deploy your Subgraph to Subgraph Studio. + +> Note: When you deploy a Subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a Subgraph, you're publishing it onchain. + +## Subgraph Studio Overview + +In [Subgraph Studio](https://thegraph.com/studio/), you can do the following: + +- View a list of Subgraphs you've created +- Manage, view details, and visualize the status of a specific Subgraph +- Create and manage your API keys for specific Subgraphs +- Restrict your API keys to specific domains and allow only certain Indexers to query with them +- Create your Subgraph +- Deploy your Subgraph using The Graph CLI +- Test your Subgraph in the playground environment +- Integrate your Subgraph in staging using the development query URL +- Publish your Subgraph to The Graph Network +- Manage your billing + +## Install The Graph CLI + +Before deploying, you must install The Graph CLI. + +You must have [Node.js](https://nodejs.org/) and a package manager of your choice (`npm`, `yarn` or `pnpm`) installed to use The Graph CLI. Check for the [most recent](https://github.com/graphprotocol/graph-tooling/releases?q=%40graphprotocol%2Fgraph-cli&expanded=true) CLI version. + +### Install with yarn + +```bash +yarn global add @graphprotocol/graph-cli +``` + +### Install with npm + +```bash +npm install -g @graphprotocol/graph-cli +``` + +## Get Started + +1. Open [Subgraph Studio](https://thegraph.com/studio/). +2. Connect your wallet to sign in. + - You can do this via MetaMask, Coinbase Wallet, WalletConnect, or Safe. +3. After you sign in, your unique deploy key will be displayed on your Subgraph details page. + - The deploy key allows you to publish your Subgraphs or manage your API keys and billing. It is unique but can be regenerated if you think it has been compromised. + +> Important: You need an API key to query Subgraphs + +### How to Create a Subgraph in Subgraph Studio + + + +> For additional written detail, review the [Quick Start](/subgraphs/quick-start/). + +### Subgraph Compatibility with The Graph Network + +To be supported by Indexers on The Graph Network, Subgraphs must index a [supported network](/supported-networks/). For a full list of supported and unsupported features, check out the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) repo. + +## Initialize Your Subgraph + +Once your Subgraph has been created in Subgraph Studio, you can initialize its code through the CLI using this command: + +```bash +graph init +``` + +You can find the `` value on your Subgraph details page in Subgraph Studio, see image below: + +![Subgraph Studio - Slug](/img/doc-subgraph-slug.png) + +After running `graph init`, you will be asked to input the contract address, network, and an ABI that you want to query. This will generate a new folder on your local machine with some basic code to start working on your Subgraph. You can then finalize your Subgraph to make sure it works as expected. + +## Graph Auth + +Before you can deploy your Subgraph to Subgraph Studio, you need to log into your account within the CLI. To do this, you will need your deploy key, which you can find under your Subgraph details page. + +Then, use the following command to authenticate from the CLI: + +```bash +graph auth +``` + +## Deploying a Subgraph + +Once you are ready, you can deploy your Subgraph to Subgraph Studio. + +> Deploying a Subgraph with the CLI pushes it to the Studio, where you can test it and update the metadata. This action won't publish your Subgraph to the decentralized network. + +Use the following CLI command to deploy your Subgraph: + +```bash +graph deploy +``` + +After running this command, the CLI will ask for a version label. + +- It's strongly recommended to use [semver](https://semver.org/) for versioning like `0.0.1`. That said, you are free to choose any string as version such as `v1`, `version1`, or `asdf`. +- The labels you create will be visible in Graph Explorer and can be used by curators to decide if they want to signal on a specific version or not, so choose them wisely. + +## Testing Your Subgraph + +After deploying, you can test your Subgraph (either in Subgraph Studio or in your own app, with the deployment query URL), deploy another version, update the metadata, and publish to [Graph Explorer](https://thegraph.com/explorer) when you are ready. + +Use Subgraph Studio to check the logs on the dashboard and look for any errors with your Subgraph. + +## Publish Your Subgraph + +In order to publish your Subgraph successfully, review [publishing a Subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/). + +## Versioning Your Subgraph with the CLI + +If you want to update your Subgraph, you can do the following: + +- You can deploy a new version to Studio using the CLI (it will only be private at this point). +- Once you're happy with it, you can publish your new deployment to [Graph Explorer](https://thegraph.com/explorer). +- This action will create a new version of your Subgraph that Curators can start signaling on and Indexers can index. + +You can also update your Subgraph's metadata without publishing a new version. You can update your Subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates Subgraph details in Explorer without having to publish a new version with a new deployment. + +> Note: There are costs associated with publishing a new version of a Subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your Subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). + +## Automatic Archiving of Subgraph Versions + +Whenever you deploy a new Subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your Subgraph in Subgraph Studio. + +> Note: Previous versions of non-published Subgraphs deployed to Studio will be automatically archived. + +![Subgraph Studio - Unarchive](/img/Unarchive.png) From 0e27ce128fc296faa073d0633d8894d1bfed34f6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:23:34 -0500 Subject: [PATCH 0868/1789] New translations developer-faq.mdx (Romanian) --- .../ro/subgraphs/developing/developer-faq.mdx | 46 +++++++++---------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/website/src/pages/ro/subgraphs/developing/developer-faq.mdx b/website/src/pages/ro/subgraphs/developing/developer-faq.mdx index 8dbe6d23ad39..e45141294523 100644 --- a/website/src/pages/ro/subgraphs/developing/developer-faq.mdx +++ b/website/src/pages/ro/subgraphs/developing/developer-faq.mdx @@ -7,37 +7,37 @@ This page summarizes some of the most common questions for developers building o ## Subgraph Related -### 1. What is a subgraph? +### 1. What is a Subgraph? -A subgraph is a custom API built on blockchain data. Subgraphs are queried using the GraphQL query language and are deployed to a Graph Node using The Graph CLI. Once deployed and published to The Graph's decentralized network, Indexers process subgraphs and make them available for subgraph consumers to query. +A Subgraph is a custom API built on blockchain data. Subgraphs are queried using the GraphQL query language and are deployed to a Graph Node using The Graph CLI. Once deployed and published to The Graph's decentralized network, Indexers process Subgraphs and make them available for Subgraph consumers to query. -### 2. What is the first step to create a subgraph? +### 2. What is the first step to create a Subgraph? -To successfully create a subgraph, you will need to install The Graph CLI. Review the [Quick Start](/subgraphs/quick-start/) to get started. For detailed information, see [Creating a Subgraph](/developing/creating-a-subgraph/). +To successfully create a Subgraph, you will need to install The Graph CLI. Review the [Quick Start](/subgraphs/quick-start/) to get started. For detailed information, see [Creating a Subgraph](/developing/creating-a-subgraph/). -### 3. Can I still create a subgraph if my smart contracts don't have events? +### 3. Can I still create a Subgraph if my smart contracts don't have events? -It is highly recommended that you structure your smart contracts to have events associated with data you are interested in querying. Event handlers in the subgraph are triggered by contract events and are the fastest way to retrieve useful data. +It is highly recommended that you structure your smart contracts to have events associated with data you are interested in querying. Event handlers in the Subgraph are triggered by contract events and are the fastest way to retrieve useful data. -If the contracts you work with do not contain events, your subgraph can use call and block handlers to trigger indexing. However, this is not recommended, as performance will be significantly slower. +If the contracts you work with do not contain events, your Subgraph can use call and block handlers to trigger indexing. However, this is not recommended, as performance will be significantly slower. -### 4. Can I change the GitHub account associated with my subgraph? +### 4. Can I change the GitHub account associated with my Subgraph? -No. Once a subgraph is created, the associated GitHub account cannot be changed. Please make sure to carefully consider this before creating your subgraph. +No. Once a Subgraph is created, the associated GitHub account cannot be changed. Please make sure to carefully consider this before creating your Subgraph. -### 5. How do I update a subgraph on mainnet? +### 5. How do I update a Subgraph on mainnet? -You can deploy a new version of your subgraph to Subgraph Studio using the CLI. This action maintains your subgraph private, but once you’re happy with it, you can publish to Graph Explorer. This will create a new version of your subgraph that Curators can start signaling on. +You can deploy a new version of your Subgraph to Subgraph Studio using the CLI. This action maintains your Subgraph private, but once you’re happy with it, you can publish to Graph Explorer. This will create a new version of your Subgraph that Curators can start signaling on. -### 6. Is it possible to duplicate a subgraph to another account or endpoint without redeploying? +### 6. Is it possible to duplicate a Subgraph to another account or endpoint without redeploying? -You have to redeploy the subgraph, but if the subgraph ID (IPFS hash) doesn't change, it won't have to sync from the beginning. +You have to redeploy the Subgraph, but if the Subgraph ID (IPFS hash) doesn't change, it won't have to sync from the beginning. -### 7. How do I call a contract function or access a public state variable from my subgraph mappings? +### 7. How do I call a contract function or access a public state variable from my Subgraph mappings? Take a look at `Access to smart contract` state inside the section [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/#access-to-smart-contract-state). -### 8. Can I import `ethers.js` or other JS libraries into my subgraph mappings? +### 8. Can I import `ethers.js` or other JS libraries into my Subgraph mappings? Not currently, as mappings are written in AssemblyScript. @@ -45,15 +45,15 @@ One possible alternative solution to this is to store raw data in entities and p ### 9. When listening to multiple contracts, is it possible to select the contract order to listen to events? -Within a subgraph, the events are always processed in the order they appear in the blocks, regardless of whether that is across multiple contracts or not. +Within a Subgraph, the events are always processed in the order they appear in the blocks, regardless of whether that is across multiple contracts or not. ### 10. How are templates different from data sources? -Templates allow you to create data sources quickly, while your subgraph is indexing. Your contract might spawn new contracts as people interact with it. Since you know the shape of those contracts (ABI, events, etc.) upfront, you can define how you want to index them in a template. When they are spawned, your subgraph will create a dynamic data source by supplying the contract address. +Templates allow you to create data sources quickly, while your Subgraph is indexing. Your contract might spawn new contracts as people interact with it. Since you know the shape of those contracts (ABI, events, etc.) upfront, you can define how you want to index them in a template. When they are spawned, your Subgraph will create a dynamic data source by supplying the contract address. Check out the "Instantiating a data source template" section on: [Data Source Templates](/developing/creating-a-subgraph/#data-source-templates). -### 11. Is it possible to set up a subgraph using `graph init` from `graph-cli` with two contracts? Or should I manually add another dataSource in `subgraph.yaml` after running `graph init`? +### 11. Is it possible to set up a Subgraph using `graph init` from `graph-cli` with two contracts? Or should I manually add another dataSource in `subgraph.yaml` after running `graph init`? Yes. On `graph init` command itself you can add multiple dataSources by entering contracts one after the other. @@ -79,9 +79,9 @@ docker pull graphprotocol/graph-node:latest If only one entity is created during the event and if there's nothing better available, then the transaction hash + log index would be unique. You can obfuscate these by converting that to Bytes and then piping it through `crypto.keccak256` but this won't make it more unique. -### 15. Can I delete my subgraph? +### 15. Can I delete my Subgraph? -Yes, you can [delete](/subgraphs/developing/managing/deleting-a-subgraph/) and [transfer](/subgraphs/developing/managing/transferring-a-subgraph/) your subgraph. +Yes, you can [delete](/subgraphs/developing/managing/deleting-a-subgraph/) and [transfer](/subgraphs/developing/managing/transferring-a-subgraph/) your Subgraph. ## Network Related @@ -110,11 +110,11 @@ Yes. Sepolia supports block handlers, call handlers and event handlers. It shoul Yes. `dataSources.source.startBlock` in the `subgraph.yaml` file specifies the number of the block that the dataSource starts indexing from. In most cases, we suggest using the block where the contract was created: [Start blocks](/developing/creating-a-subgraph/#start-blocks) -### 20. What are some tips to increase the performance of indexing? My subgraph is taking a very long time to sync +### 20. What are some tips to increase the performance of indexing? My Subgraph is taking a very long time to sync Yes, you should take a look at the optional start block feature to start indexing from the block where the contract was deployed: [Start blocks](/developing/creating-a-subgraph/#start-blocks) -### 21. Is there a way to query the subgraph directly to determine the latest block number it has indexed? +### 21. Is there a way to query the Subgraph directly to determine the latest block number it has indexed? Yes! Try the following command, substituting "organization/subgraphName" with the organization under it is published and the name of your subgraph: @@ -132,7 +132,7 @@ someCollection(first: 1000, skip: ) { ... } ### 23. If my dapp frontend uses The Graph for querying, do I need to write my API key into the frontend directly? What if we pay query fees for users – will malicious users cause our query fees to be very high? -Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. +Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and Subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. ## Miscellaneous From 3725d14e24bfa4614eea266ce1d67a6e34c41d7c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:23:35 -0500 Subject: [PATCH 0869/1789] New translations developer-faq.mdx (French) --- .../fr/subgraphs/developing/developer-faq.mdx | 46 +++++++++---------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/website/src/pages/fr/subgraphs/developing/developer-faq.mdx b/website/src/pages/fr/subgraphs/developing/developer-faq.mdx index e2bb16ce90af..b644f56354a8 100644 --- a/website/src/pages/fr/subgraphs/developing/developer-faq.mdx +++ b/website/src/pages/fr/subgraphs/developing/developer-faq.mdx @@ -7,37 +7,37 @@ Cette page résume certaines des questions les plus courantes pour les développ ## Relatif aux Subgraphs -### 1. Qu'est-ce qu'un subgraph ? +### 1. What is a Subgraph? -Un subgraph est une API personnalisée construite sur des données blockchain. Les subgraphs sont interrogés en utilisant le langage de requête GraphQL et sont déployés sur Graph Node en utilisant Graph CLI. Une fois déployés et publiés sur le réseau décentralisé de The Graph, les Indexeurs traitent les subgraphs et les rendent disponibles pour que les consommateurs de subgraphs puissent les interroger. +A Subgraph is a custom API built on blockchain data. Subgraphs are queried using the GraphQL query language and are deployed to a Graph Node using The Graph CLI. Once deployed and published to The Graph's decentralized network, Indexers process Subgraphs and make them available for Subgraph consumers to query. -### 2. Quelle est la première étape pour créer un subgraph ? +### 2. What is the first step to create a Subgraph? -Pour créer un subgraph avec succès, vous devez installer Graph CLI. Consultez le [Démarrage rapide](/subgraphs/quick-start/) pour commencer. Pour des informations détaillées, consultez [Création d'un subgraph](/developing/creating-a-subgraph/). +To successfully create a Subgraph, you will need to install The Graph CLI. Review the [Quick Start](/subgraphs/quick-start/) to get started. For detailed information, see [Creating a Subgraph](/developing/creating-a-subgraph/). -### 3. Suis-je toujours en mesure de créer un subgraph si mes smart contracts n'ont pas d'événements ? +### 3. Can I still create a Subgraph if my smart contracts don't have events? -Il est fortement recommandé de structurer vos smart contracts pour avoir des événements associés aux données que vous souhaitez interroger. Les gestionnaires d'événements du subgraph sont déclenchés par des événements de contrat et constituent le moyen le plus rapide de récupérer des données utiles. +It is highly recommended that you structure your smart contracts to have events associated with data you are interested in querying. Event handlers in the Subgraph are triggered by contract events and are the fastest way to retrieve useful data. -Si les contrats avec lesquels vous travaillez ne contiennent pas d'événements, votre subgraph peut utiliser des gestionnaires d'appels et de blocs pour déclencher l'indexation. Cependant, ceci n'est pas recommandé, car les performances seront nettement plus lentes. +If the contracts you work with do not contain events, your Subgraph can use call and block handlers to trigger indexing. However, this is not recommended, as performance will be significantly slower. -### 4. Puis-je modifier le compte GitHub associé à mon subgraph ? +### 4. Can I change the GitHub account associated with my Subgraph? -Non. Une fois un subgraph créé, le compte GitHub associé ne peut pas être modifié. Veuillez vous assurer de bien prendre en compte ce détail avant de créer votre subgraph. +No. Once a Subgraph is created, the associated GitHub account cannot be changed. Please make sure to carefully consider this before creating your Subgraph. -### 5. Comment mettre à jour un subgraph sur le mainnet ? +### 5. How do I update a Subgraph on mainnet? -Vous pouvez déployer une nouvelle version de votre subgraph sur Subgraph Studio en utilisant la CLI. Cette action maintient votre subgraph privé, mais une fois que vous en êtes satisfait, vous pouvez le publier sur Graph Explorer. Cela créera une nouvelle version de votre subgraph sur laquelle les Curateurs pourront commencer à signaler. +You can deploy a new version of your Subgraph to Subgraph Studio using the CLI. This action maintains your Subgraph private, but once you’re happy with it, you can publish to Graph Explorer. This will create a new version of your Subgraph that Curators can start signaling on. -### 6. Est-il possible de dupliquer un subgraph vers un autre compte ou endpoint sans le redéployer ? +### 6. Is it possible to duplicate a Subgraph to another account or endpoint without redeploying? -Vous devez redéployer le subgraph, mais si l'ID de subgraph (hachage IPFS) ne change pas, il n'aura pas à se synchroniser depuis le début. +You have to redeploy the Subgraph, but if the Subgraph ID (IPFS hash) doesn't change, it won't have to sync from the beginning. -### 7. Comment puis-je appeler une fonction d'un contrat ou accéder à une variable d'état publique depuis mes mappages de subgraph ? +### 7. How do I call a contract function or access a public state variable from my Subgraph mappings? Jetez un œil à l’état `Accès au contrat intelligent` dans la section [API AssemblyScript](/subgraphs/developing/creating/graph-ts/api/#access-to-smart-contract-state). -### 8. Puis-je importer `ethers.js` ou d'autres bibliothèques JS dans mes mappages de subgraphs ? +### 8. Can I import `ethers.js` or other JS libraries into my Subgraph mappings? Actuellement non, car les mappages sont écrits en AssemblyScript. @@ -45,15 +45,15 @@ Une solution alternative possible serait de stocker des données brutes dans des ### 9. Lorsqu'on écoute plusieurs contrats, est-il possible de sélectionner l'ordre des contrats pour écouter les événements ? -Dans un subgraph, les événements sont toujours traités dans l'ordre dans lequel ils apparaissent dans les blocs, que ce soit sur plusieurs contrats ou non. +Within a Subgraph, the events are always processed in the order they appear in the blocks, regardless of whether that is across multiple contracts or not. ### 10. En quoi les modèles sont-ils différents des sources de données ? -Les modèles vous permettent de créer rapidement des sources de données , pendant que votre subgraph est en cours d'indexation. Votre contrat peut générer de nouveaux contrats à mesure que les gens interagissent avec lui. Étant donné que vous connaissez la structure de ces contrats (ABI, événements, etc.) à l'avance, vous pouvez définir comment vous souhaitez les indexer dans un modèle. Lorsqu'ils sont générés, votre subgraph créera une source de données dynamique en fournissant l'adresse du contrat. +Templates allow you to create data sources quickly, while your Subgraph is indexing. Your contract might spawn new contracts as people interact with it. Since you know the shape of those contracts (ABI, events, etc.) upfront, you can define how you want to index them in a template. When they are spawned, your Subgraph will create a dynamic data source by supplying the contract address. Consultez la section "Instanciation d'un modèle de source de données" sur : [Modèles de sources de données](/developing/creating-a-subgraph/#data-source-templates). -### 11. Est-il possible de configurer un subgraph en utilisant `graph init` à partir de `graph-cli` avec deux contrats ? Ou dois-je ajouter manuellement une autre source de données dans `subgraph.yaml` après avoir lancé `graph init` ? +### 11. Is it possible to set up a Subgraph using `graph init` from `graph-cli` with two contracts? Or should I manually add another dataSource in `subgraph.yaml` after running `graph init`? Oui. Dans la commande `graph init` elle-même, vous pouvez ajouter plusieurs sources de données en entrant des contrats l'un après l'autre. @@ -79,9 +79,9 @@ docker pull graphprotocol/graph-node:dernier Si une seule entité est créée pendant l'événement et s'il n'y a rien de mieux disponible, alors le hash de la transaction + l'index du journal seront uniques. Vous pouvez les obscurcir en les convertissant en Bytes et en les faisant passer par `crypto.keccak256`, mais cela ne les rendra pas plus uniques. -### 15. Puis-je supprimer mon subgraph ? +### 15. Can I delete my Subgraph? -Oui, vous pouvez [supprimer](/subgraphs/developing/managing/deleting-a-subgraph/) et [transférer](/subgraphs/developing/managing/transferring-a-subgraph/) votre subgraph. +Yes, you can [delete](/subgraphs/developing/managing/deleting-a-subgraph/) and [transfer](/subgraphs/developing/managing/transferring-a-subgraph/) your Subgraph. ## Relatif au Réseau @@ -110,11 +110,11 @@ Oui. Sepolia prend en charge les gestionnaires de blocs, les gestionnaires d'app Oui. `dataSources.source.startBlock` dans le fichier `subgraph.yaml` spécifie le numéro du bloc à partir duquel la source de données commence l'indexation. Dans la plupart des cas, nous suggérons d'utiliser le bloc où le contrat a été créé : [Start blocks](/developing/creating-a-subgraph/#start-blocks) -### 20. Quels sont quelques conseils pour augmenter les performances d'indexation? Mon subgraph prend beaucoup de temps à se synchroniser +### 20. What are some tips to increase the performance of indexing? My Subgraph is taking a very long time to sync Oui, vous devriez jeter un coup d'œil à la fonctionnalité optionnelle de bloc de démarrage pour commencer l'indexation à partir du bloc où le contrat a été déployé : [Start blocks](/developing/creating-a-subgraph/#start-blocks) -### 21. Existe-t-il un moyen d'interroger directement le subgraph pour déterminer le dernier numéro de bloc qu'il a indexé? +### 21. Is there a way to query the Subgraph directly to determine the latest block number it has indexed? Oui ! Essayez la commande suivante, en remplaçant "organization/subgraphName" par l'organisation sous laquelle elle est publiée et le nom de votre subgraphe : @@ -132,7 +132,7 @@ someCollection(first: 1000, skip: ) { ... } ### 23. Si mon application décentralisée (dapp) utilise The Graph pour effectuer des requêtes, dois-je écrire ma clé API directement dans le code du frontend ? Et si nous payons les frais de requête pour les utilisateurs – des utilisateurs malveillants pourraient-ils faire augmenter considérablement nos frais de requête ? -Actuellement, l'approche recommandée pour une dapp est d'ajouter la clé au frontend et de l'exposer aux utilisateurs finaux. Cela dit, vous pouvez limiter cette clé à un nom d'hôte, comme _yourdapp.io_ et subgraph. La passerelle est actuellement gérée par Edge & Node. Une partie de la responsabilité d'une passerelle est de surveiller les comportements abusifs et de bloquer le trafic des clients malveillants. +Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and Subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. ## Divers From 31e61fe76448df06cb6d121502d3476cb15abbd6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:23:36 -0500 Subject: [PATCH 0870/1789] New translations developer-faq.mdx (Spanish) --- .../es/subgraphs/developing/developer-faq.mdx | 46 +++++++++---------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/website/src/pages/es/subgraphs/developing/developer-faq.mdx b/website/src/pages/es/subgraphs/developing/developer-faq.mdx index 0a3bad37fd09..6bf2d3eb2199 100644 --- a/website/src/pages/es/subgraphs/developing/developer-faq.mdx +++ b/website/src/pages/es/subgraphs/developing/developer-faq.mdx @@ -7,37 +7,37 @@ This page summarizes some of the most common questions for developers building o ## Subgraph Related -### 1. ¿Qué es un subgrafo? +### 1. What is a Subgraph? -A subgraph is a custom API built on blockchain data. Subgraphs are queried using the GraphQL query language and are deployed to a Graph Node using The Graph CLI. Once deployed and published to The Graph's decentralized network, Indexers process subgraphs and make them available for subgraph consumers to query. +A Subgraph is a custom API built on blockchain data. Subgraphs are queried using the GraphQL query language and are deployed to a Graph Node using The Graph CLI. Once deployed and published to The Graph's decentralized network, Indexers process Subgraphs and make them available for Subgraph consumers to query. -### 2. What is the first step to create a subgraph? +### 2. What is the first step to create a Subgraph? -To successfully create a subgraph, you will need to install The Graph CLI. Review the [Quick Start](/subgraphs/quick-start/) to get started. For detailed information, see [Creating a Subgraph](/developing/creating-a-subgraph/). +To successfully create a Subgraph, you will need to install The Graph CLI. Review the [Quick Start](/subgraphs/quick-start/) to get started. For detailed information, see [Creating a Subgraph](/developing/creating-a-subgraph/). -### 3. Can I still create a subgraph if my smart contracts don't have events? +### 3. Can I still create a Subgraph if my smart contracts don't have events? -It is highly recommended that you structure your smart contracts to have events associated with data you are interested in querying. Event handlers in the subgraph are triggered by contract events and are the fastest way to retrieve useful data. +It is highly recommended that you structure your smart contracts to have events associated with data you are interested in querying. Event handlers in the Subgraph are triggered by contract events and are the fastest way to retrieve useful data. -If the contracts you work with do not contain events, your subgraph can use call and block handlers to trigger indexing. However, this is not recommended, as performance will be significantly slower. +If the contracts you work with do not contain events, your Subgraph can use call and block handlers to trigger indexing. However, this is not recommended, as performance will be significantly slower. -### 4. ¿Puedo cambiar la cuenta de GitHub asociada con mi subgrafo? +### 4. Can I change the GitHub account associated with my Subgraph? -No. Once a subgraph is created, the associated GitHub account cannot be changed. Please make sure to carefully consider this before creating your subgraph. +No. Once a Subgraph is created, the associated GitHub account cannot be changed. Please make sure to carefully consider this before creating your Subgraph. -### 5. How do I update a subgraph on mainnet? +### 5. How do I update a Subgraph on mainnet? -You can deploy a new version of your subgraph to Subgraph Studio using the CLI. This action maintains your subgraph private, but once you’re happy with it, you can publish to Graph Explorer. This will create a new version of your subgraph that Curators can start signaling on. +You can deploy a new version of your Subgraph to Subgraph Studio using the CLI. This action maintains your Subgraph private, but once you’re happy with it, you can publish to Graph Explorer. This will create a new version of your Subgraph that Curators can start signaling on. -### 6. Is it possible to duplicate a subgraph to another account or endpoint without redeploying? +### 6. Is it possible to duplicate a Subgraph to another account or endpoint without redeploying? -Tienes que volver a realizar el deploy del subgrafo, pero si el ID del subgrafo (hash IPFS) no cambia, no tendrá que sincronizarse desde el principio. +You have to redeploy the Subgraph, but if the Subgraph ID (IPFS hash) doesn't change, it won't have to sync from the beginning. -### 7. How do I call a contract function or access a public state variable from my subgraph mappings? +### 7. How do I call a contract function or access a public state variable from my Subgraph mappings? Take a look at `Access to smart contract` state inside the section [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/#access-to-smart-contract-state). -### 8. Can I import `ethers.js` or other JS libraries into my subgraph mappings? +### 8. Can I import `ethers.js` or other JS libraries into my Subgraph mappings? Not currently, as mappings are written in AssemblyScript. @@ -45,15 +45,15 @@ One possible alternative solution to this is to store raw data in entities and p ### 9. When listening to multiple contracts, is it possible to select the contract order to listen to events? -Dentro de un subgrafo, los eventos se procesan siempre en el orden en que aparecen en los bloques, independientemente de que sea a través de múltiples contratos o no. +Within a Subgraph, the events are always processed in the order they appear in the blocks, regardless of whether that is across multiple contracts or not. ### 10. How are templates different from data sources? -Templates allow you to create data sources quickly, while your subgraph is indexing. Your contract might spawn new contracts as people interact with it. Since you know the shape of those contracts (ABI, events, etc.) upfront, you can define how you want to index them in a template. When they are spawned, your subgraph will create a dynamic data source by supplying the contract address. +Templates allow you to create data sources quickly, while your Subgraph is indexing. Your contract might spawn new contracts as people interact with it. Since you know the shape of those contracts (ABI, events, etc.) upfront, you can define how you want to index them in a template. When they are spawned, your Subgraph will create a dynamic data source by supplying the contract address. Check out the "Instantiating a data source template" section on: [Data Source Templates](/developing/creating-a-subgraph/#data-source-templates). -### 11. Is it possible to set up a subgraph using `graph init` from `graph-cli` with two contracts? Or should I manually add another dataSource in `subgraph.yaml` after running `graph init`? +### 11. Is it possible to set up a Subgraph using `graph init` from `graph-cli` with two contracts? Or should I manually add another dataSource in `subgraph.yaml` after running `graph init`? Yes. On `graph init` command itself you can add multiple dataSources by entering contracts one after the other. @@ -79,9 +79,9 @@ docker pull graphprotocol/graph-node:latest If only one entity is created during the event and if there's nothing better available, then the transaction hash + log index would be unique. You can obfuscate these by converting that to Bytes and then piping it through `crypto.keccak256` but this won't make it more unique. -### 15. Can I delete my subgraph? +### 15. Can I delete my Subgraph? -Yes, you can [delete](/subgraphs/developing/managing/deleting-a-subgraph/) and [transfer](/subgraphs/developing/managing/transferring-a-subgraph/) your subgraph. +Yes, you can [delete](/subgraphs/developing/managing/deleting-a-subgraph/) and [transfer](/subgraphs/developing/managing/transferring-a-subgraph/) your Subgraph. ## Network Related @@ -110,11 +110,11 @@ Yes. Sepolia supports block handlers, call handlers and event handlers. It shoul Yes. `dataSources.source.startBlock` in the `subgraph.yaml` file specifies the number of the block that the dataSource starts indexing from. In most cases, we suggest using the block where the contract was created: [Start blocks](/developing/creating-a-subgraph/#start-blocks) -### 20. What are some tips to increase the performance of indexing? My subgraph is taking a very long time to sync +### 20. What are some tips to increase the performance of indexing? My Subgraph is taking a very long time to sync Yes, you should take a look at the optional start block feature to start indexing from the block where the contract was deployed: [Start blocks](/developing/creating-a-subgraph/#start-blocks) -### 21. Is there a way to query the subgraph directly to determine the latest block number it has indexed? +### 21. Is there a way to query the Subgraph directly to determine the latest block number it has indexed? ¡Sí es posible! Prueba el siguiente comando, sustituyendo "organization/subgraphName" por la organización bajo la que se publica y el nombre de tu subgrafo: @@ -132,7 +132,7 @@ someCollection(first: 1000, skip: ) { ... } ### 23. If my dapp frontend uses The Graph for querying, do I need to write my API key into the frontend directly? What if we pay query fees for users – will malicious users cause our query fees to be very high? -Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. +Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and Subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. ## Miscellaneous From af46526f2f086dd22a9991125f1429d5543abdb7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:23:37 -0500 Subject: [PATCH 0871/1789] New translations developer-faq.mdx (Arabic) --- .../ar/subgraphs/developing/developer-faq.mdx | 46 +++++++++---------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/website/src/pages/ar/subgraphs/developing/developer-faq.mdx b/website/src/pages/ar/subgraphs/developing/developer-faq.mdx index f0e9ba0cd865..016a7a8e5a04 100644 --- a/website/src/pages/ar/subgraphs/developing/developer-faq.mdx +++ b/website/src/pages/ar/subgraphs/developing/developer-faq.mdx @@ -7,37 +7,37 @@ This page summarizes some of the most common questions for developers building o ## Subgraph Related -### 1. What is a subgraph? +### 1. What is a Subgraph? -A subgraph is a custom API built on blockchain data. Subgraphs are queried using the GraphQL query language and are deployed to a Graph Node using The Graph CLI. Once deployed and published to The Graph's decentralized network, Indexers process subgraphs and make them available for subgraph consumers to query. +A Subgraph is a custom API built on blockchain data. Subgraphs are queried using the GraphQL query language and are deployed to a Graph Node using The Graph CLI. Once deployed and published to The Graph's decentralized network, Indexers process Subgraphs and make them available for Subgraph consumers to query. -### 2. What is the first step to create a subgraph? +### 2. What is the first step to create a Subgraph? -To successfully create a subgraph, you will need to install The Graph CLI. Review the [Quick Start](/subgraphs/quick-start/) to get started. For detailed information, see [Creating a Subgraph](/developing/creating-a-subgraph/). +To successfully create a Subgraph, you will need to install The Graph CLI. Review the [Quick Start](/subgraphs/quick-start/) to get started. For detailed information, see [Creating a Subgraph](/developing/creating-a-subgraph/). -### 3. Can I still create a subgraph if my smart contracts don't have events? +### 3. Can I still create a Subgraph if my smart contracts don't have events? -It is highly recommended that you structure your smart contracts to have events associated with data you are interested in querying. Event handlers in the subgraph are triggered by contract events and are the fastest way to retrieve useful data. +It is highly recommended that you structure your smart contracts to have events associated with data you are interested in querying. Event handlers in the Subgraph are triggered by contract events and are the fastest way to retrieve useful data. -If the contracts you work with do not contain events, your subgraph can use call and block handlers to trigger indexing. However, this is not recommended, as performance will be significantly slower. +If the contracts you work with do not contain events, your Subgraph can use call and block handlers to trigger indexing. However, this is not recommended, as performance will be significantly slower. -### 4. Can I change the GitHub account associated with my subgraph? +### 4. Can I change the GitHub account associated with my Subgraph? -No. Once a subgraph is created, the associated GitHub account cannot be changed. Please make sure to carefully consider this before creating your subgraph. +No. Once a Subgraph is created, the associated GitHub account cannot be changed. Please make sure to carefully consider this before creating your Subgraph. -### 5. How do I update a subgraph on mainnet? +### 5. How do I update a Subgraph on mainnet? -You can deploy a new version of your subgraph to Subgraph Studio using the CLI. This action maintains your subgraph private, but once you’re happy with it, you can publish to Graph Explorer. This will create a new version of your subgraph that Curators can start signaling on. +You can deploy a new version of your Subgraph to Subgraph Studio using the CLI. This action maintains your Subgraph private, but once you’re happy with it, you can publish to Graph Explorer. This will create a new version of your Subgraph that Curators can start signaling on. -### 6. Is it possible to duplicate a subgraph to another account or endpoint without redeploying? +### 6. Is it possible to duplicate a Subgraph to another account or endpoint without redeploying? -يجب عليك إعادة نشر ال الفرعيةرسم بياني ، ولكن إذا لم يتغير الفرعيةرسم بياني (ID (IPFS hash ، فلن يضطر إلى المزامنة من البداية. +You have to redeploy the Subgraph, but if the Subgraph ID (IPFS hash) doesn't change, it won't have to sync from the beginning. -### 7. How do I call a contract function or access a public state variable from my subgraph mappings? +### 7. How do I call a contract function or access a public state variable from my Subgraph mappings? Take a look at `Access to smart contract` state inside the section [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/#access-to-smart-contract-state). -### 8. Can I import `ethers.js` or other JS libraries into my subgraph mappings? +### 8. Can I import `ethers.js` or other JS libraries into my Subgraph mappings? Not currently, as mappings are written in AssemblyScript. @@ -45,15 +45,15 @@ One possible alternative solution to this is to store raw data in entities and p ### 9. When listening to multiple contracts, is it possible to select the contract order to listen to events? -ضمن ال Subgraph ، تتم معالجة الأحداث دائمًا بالترتيب الذي تظهر به في الكتل ، بغض النظر عما إذا كان ذلك عبر عقود متعددة أم لا. +Within a Subgraph, the events are always processed in the order they appear in the blocks, regardless of whether that is across multiple contracts or not. ### 10. How are templates different from data sources? -Templates allow you to create data sources quickly, while your subgraph is indexing. Your contract might spawn new contracts as people interact with it. Since you know the shape of those contracts (ABI, events, etc.) upfront, you can define how you want to index them in a template. When they are spawned, your subgraph will create a dynamic data source by supplying the contract address. +Templates allow you to create data sources quickly, while your Subgraph is indexing. Your contract might spawn new contracts as people interact with it. Since you know the shape of those contracts (ABI, events, etc.) upfront, you can define how you want to index them in a template. When they are spawned, your Subgraph will create a dynamic data source by supplying the contract address. Check out the "Instantiating a data source template" section on: [Data Source Templates](/developing/creating-a-subgraph/#data-source-templates). -### 11. Is it possible to set up a subgraph using `graph init` from `graph-cli` with two contracts? Or should I manually add another dataSource in `subgraph.yaml` after running `graph init`? +### 11. Is it possible to set up a Subgraph using `graph init` from `graph-cli` with two contracts? Or should I manually add another dataSource in `subgraph.yaml` after running `graph init`? Yes. On `graph init` command itself you can add multiple dataSources by entering contracts one after the other. @@ -79,9 +79,9 @@ docker pull graphprotocol/graph-node:latest If only one entity is created during the event and if there's nothing better available, then the transaction hash + log index would be unique. You can obfuscate these by converting that to Bytes and then piping it through `crypto.keccak256` but this won't make it more unique. -### 15. Can I delete my subgraph? +### 15. Can I delete my Subgraph? -Yes, you can [delete](/subgraphs/developing/managing/deleting-a-subgraph/) and [transfer](/subgraphs/developing/managing/transferring-a-subgraph/) your subgraph. +Yes, you can [delete](/subgraphs/developing/managing/deleting-a-subgraph/) and [transfer](/subgraphs/developing/managing/transferring-a-subgraph/) your Subgraph. ## Network Related @@ -110,11 +110,11 @@ Yes. Sepolia supports block handlers, call handlers and event handlers. It shoul Yes. `dataSources.source.startBlock` in the `subgraph.yaml` file specifies the number of the block that the dataSource starts indexing from. In most cases, we suggest using the block where the contract was created: [Start blocks](/developing/creating-a-subgraph/#start-blocks) -### 20. What are some tips to increase the performance of indexing? My subgraph is taking a very long time to sync +### 20. What are some tips to increase the performance of indexing? My Subgraph is taking a very long time to sync Yes, you should take a look at the optional start block feature to start indexing from the block where the contract was deployed: [Start blocks](/developing/creating-a-subgraph/#start-blocks) -### 21. Is there a way to query the subgraph directly to determine the latest block number it has indexed? +### 21. Is there a way to query the Subgraph directly to determine the latest block number it has indexed? نعم! جرب الأمر التالي ، مع استبدال "Organization / subgraphName" بالمؤسسة واسم الـ subgraph الخاص بك: @@ -132,7 +132,7 @@ someCollection(first: 1000, skip: ) { ... } ### 23. If my dapp frontend uses The Graph for querying, do I need to write my API key into the frontend directly? What if we pay query fees for users – will malicious users cause our query fees to be very high? -Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. +Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and Subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. ## Miscellaneous From e71ff12cfb138ceaf5f09db87a6ef89e1652bcce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:23:38 -0500 Subject: [PATCH 0872/1789] New translations developer-faq.mdx (Czech) --- .../cs/subgraphs/developing/developer-faq.mdx | 46 +++++++++---------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/website/src/pages/cs/subgraphs/developing/developer-faq.mdx b/website/src/pages/cs/subgraphs/developing/developer-faq.mdx index e07a7f06fb48..2c5d8903c4d9 100644 --- a/website/src/pages/cs/subgraphs/developing/developer-faq.mdx +++ b/website/src/pages/cs/subgraphs/developing/developer-faq.mdx @@ -7,37 +7,37 @@ This page summarizes some of the most common questions for developers building o ## Subgraph Related -### 1. Co je to podgraf? +### 1. What is a Subgraph? -A subgraph is a custom API built on blockchain data. Subgraphs are queried using the GraphQL query language and are deployed to a Graph Node using The Graph CLI. Once deployed and published to The Graph's decentralized network, Indexers process subgraphs and make them available for subgraph consumers to query. +A Subgraph is a custom API built on blockchain data. Subgraphs are queried using the GraphQL query language and are deployed to a Graph Node using The Graph CLI. Once deployed and published to The Graph's decentralized network, Indexers process Subgraphs and make them available for Subgraph consumers to query. -### 2. What is the first step to create a subgraph? +### 2. What is the first step to create a Subgraph? -To successfully create a subgraph, you will need to install The Graph CLI. Review the [Quick Start](/subgraphs/quick-start/) to get started. For detailed information, see [Creating a Subgraph](/developing/creating-a-subgraph/). +To successfully create a Subgraph, you will need to install The Graph CLI. Review the [Quick Start](/subgraphs/quick-start/) to get started. For detailed information, see [Creating a Subgraph](/developing/creating-a-subgraph/). -### 3. Can I still create a subgraph if my smart contracts don't have events? +### 3. Can I still create a Subgraph if my smart contracts don't have events? -It is highly recommended that you structure your smart contracts to have events associated with data you are interested in querying. Event handlers in the subgraph are triggered by contract events and are the fastest way to retrieve useful data. +It is highly recommended that you structure your smart contracts to have events associated with data you are interested in querying. Event handlers in the Subgraph are triggered by contract events and are the fastest way to retrieve useful data. -If the contracts you work with do not contain events, your subgraph can use call and block handlers to trigger indexing. However, this is not recommended, as performance will be significantly slower. +If the contracts you work with do not contain events, your Subgraph can use call and block handlers to trigger indexing. However, this is not recommended, as performance will be significantly slower. -### 4. Mohu změnit účet GitHub přidružený k mému podgrafu? +### 4. Can I change the GitHub account associated with my Subgraph? -No. Once a subgraph is created, the associated GitHub account cannot be changed. Please make sure to carefully consider this before creating your subgraph. +No. Once a Subgraph is created, the associated GitHub account cannot be changed. Please make sure to carefully consider this before creating your Subgraph. -### 5. How do I update a subgraph on mainnet? +### 5. How do I update a Subgraph on mainnet? -You can deploy a new version of your subgraph to Subgraph Studio using the CLI. This action maintains your subgraph private, but once you’re happy with it, you can publish to Graph Explorer. This will create a new version of your subgraph that Curators can start signaling on. +You can deploy a new version of your Subgraph to Subgraph Studio using the CLI. This action maintains your Subgraph private, but once you’re happy with it, you can publish to Graph Explorer. This will create a new version of your Subgraph that Curators can start signaling on. -### 6. Is it possible to duplicate a subgraph to another account or endpoint without redeploying? +### 6. Is it possible to duplicate a Subgraph to another account or endpoint without redeploying? -Podgraf musíte znovu nasadit, ale pokud se ID podgrafu (hash IPFS) nezmění, nebude se muset synchronizovat od začátku. +You have to redeploy the Subgraph, but if the Subgraph ID (IPFS hash) doesn't change, it won't have to sync from the beginning. -### 7. How do I call a contract function or access a public state variable from my subgraph mappings? +### 7. How do I call a contract function or access a public state variable from my Subgraph mappings? Take a look at `Access to smart contract` state inside the section [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/#access-to-smart-contract-state). -### 8. Can I import `ethers.js` or other JS libraries into my subgraph mappings? +### 8. Can I import `ethers.js` or other JS libraries into my Subgraph mappings? Not currently, as mappings are written in AssemblyScript. @@ -45,15 +45,15 @@ One possible alternative solution to this is to store raw data in entities and p ### 9. When listening to multiple contracts, is it possible to select the contract order to listen to events? -V rámci podgrafu se události zpracovávají vždy v pořadí, v jakém se objevují v blocích, bez ohledu na to, zda se jedná o více smluv. +Within a Subgraph, the events are always processed in the order they appear in the blocks, regardless of whether that is across multiple contracts or not. ### 10. How are templates different from data sources? -Templates allow you to create data sources quickly, while your subgraph is indexing. Your contract might spawn new contracts as people interact with it. Since you know the shape of those contracts (ABI, events, etc.) upfront, you can define how you want to index them in a template. When they are spawned, your subgraph will create a dynamic data source by supplying the contract address. +Templates allow you to create data sources quickly, while your Subgraph is indexing. Your contract might spawn new contracts as people interact with it. Since you know the shape of those contracts (ABI, events, etc.) upfront, you can define how you want to index them in a template. When they are spawned, your Subgraph will create a dynamic data source by supplying the contract address. Check out the "Instantiating a data source template" section on: [Data Source Templates](/developing/creating-a-subgraph/#data-source-templates). -### 11. Is it possible to set up a subgraph using `graph init` from `graph-cli` with two contracts? Or should I manually add another dataSource in `subgraph.yaml` after running `graph init`? +### 11. Is it possible to set up a Subgraph using `graph init` from `graph-cli` with two contracts? Or should I manually add another dataSource in `subgraph.yaml` after running `graph init`? Yes. On `graph init` command itself you can add multiple dataSources by entering contracts one after the other. @@ -79,9 +79,9 @@ docker pull graphprotocol/graph-node:latest If only one entity is created during the event and if there's nothing better available, then the transaction hash + log index would be unique. You can obfuscate these by converting that to Bytes and then piping it through `crypto.keccak256` but this won't make it more unique. -### 15. Can I delete my subgraph? +### 15. Can I delete my Subgraph? -Yes, you can [delete](/subgraphs/developing/managing/deleting-a-subgraph/) and [transfer](/subgraphs/developing/managing/transferring-a-subgraph/) your subgraph. +Yes, you can [delete](/subgraphs/developing/managing/deleting-a-subgraph/) and [transfer](/subgraphs/developing/managing/transferring-a-subgraph/) your Subgraph. ## Network Related @@ -110,11 +110,11 @@ Yes. Sepolia supports block handlers, call handlers and event handlers. It shoul Yes. `dataSources.source.startBlock` in the `subgraph.yaml` file specifies the number of the block that the dataSource starts indexing from. In most cases, we suggest using the block where the contract was created: [Start blocks](/developing/creating-a-subgraph/#start-blocks) -### 20. What are some tips to increase the performance of indexing? My subgraph is taking a very long time to sync +### 20. What are some tips to increase the performance of indexing? My Subgraph is taking a very long time to sync Yes, you should take a look at the optional start block feature to start indexing from the block where the contract was deployed: [Start blocks](/developing/creating-a-subgraph/#start-blocks) -### 21. Is there a way to query the subgraph directly to determine the latest block number it has indexed? +### 21. Is there a way to query the Subgraph directly to determine the latest block number it has indexed? Ano! Vyzkoušejte následující příkaz, přičemž "organization/subgraphName" nahraďte názvem organizace, pod kterou je publikován, a názvem vašeho podgrafu: @@ -132,7 +132,7 @@ someCollection(first: 1000, skip: ) { ... } ### 23. If my dapp frontend uses The Graph for querying, do I need to write my API key into the frontend directly? What if we pay query fees for users – will malicious users cause our query fees to be very high? -Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. +Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and Subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. ## Miscellaneous From 8430fd3107ceac21b6b74999ffc30f821f5e22f7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:23:39 -0500 Subject: [PATCH 0873/1789] New translations developer-faq.mdx (German) --- .../de/subgraphs/developing/developer-faq.mdx | 46 +++++++++---------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/website/src/pages/de/subgraphs/developing/developer-faq.mdx b/website/src/pages/de/subgraphs/developing/developer-faq.mdx index 8dbe6d23ad39..e45141294523 100644 --- a/website/src/pages/de/subgraphs/developing/developer-faq.mdx +++ b/website/src/pages/de/subgraphs/developing/developer-faq.mdx @@ -7,37 +7,37 @@ This page summarizes some of the most common questions for developers building o ## Subgraph Related -### 1. What is a subgraph? +### 1. What is a Subgraph? -A subgraph is a custom API built on blockchain data. Subgraphs are queried using the GraphQL query language and are deployed to a Graph Node using The Graph CLI. Once deployed and published to The Graph's decentralized network, Indexers process subgraphs and make them available for subgraph consumers to query. +A Subgraph is a custom API built on blockchain data. Subgraphs are queried using the GraphQL query language and are deployed to a Graph Node using The Graph CLI. Once deployed and published to The Graph's decentralized network, Indexers process Subgraphs and make them available for Subgraph consumers to query. -### 2. What is the first step to create a subgraph? +### 2. What is the first step to create a Subgraph? -To successfully create a subgraph, you will need to install The Graph CLI. Review the [Quick Start](/subgraphs/quick-start/) to get started. For detailed information, see [Creating a Subgraph](/developing/creating-a-subgraph/). +To successfully create a Subgraph, you will need to install The Graph CLI. Review the [Quick Start](/subgraphs/quick-start/) to get started. For detailed information, see [Creating a Subgraph](/developing/creating-a-subgraph/). -### 3. Can I still create a subgraph if my smart contracts don't have events? +### 3. Can I still create a Subgraph if my smart contracts don't have events? -It is highly recommended that you structure your smart contracts to have events associated with data you are interested in querying. Event handlers in the subgraph are triggered by contract events and are the fastest way to retrieve useful data. +It is highly recommended that you structure your smart contracts to have events associated with data you are interested in querying. Event handlers in the Subgraph are triggered by contract events and are the fastest way to retrieve useful data. -If the contracts you work with do not contain events, your subgraph can use call and block handlers to trigger indexing. However, this is not recommended, as performance will be significantly slower. +If the contracts you work with do not contain events, your Subgraph can use call and block handlers to trigger indexing. However, this is not recommended, as performance will be significantly slower. -### 4. Can I change the GitHub account associated with my subgraph? +### 4. Can I change the GitHub account associated with my Subgraph? -No. Once a subgraph is created, the associated GitHub account cannot be changed. Please make sure to carefully consider this before creating your subgraph. +No. Once a Subgraph is created, the associated GitHub account cannot be changed. Please make sure to carefully consider this before creating your Subgraph. -### 5. How do I update a subgraph on mainnet? +### 5. How do I update a Subgraph on mainnet? -You can deploy a new version of your subgraph to Subgraph Studio using the CLI. This action maintains your subgraph private, but once you’re happy with it, you can publish to Graph Explorer. This will create a new version of your subgraph that Curators can start signaling on. +You can deploy a new version of your Subgraph to Subgraph Studio using the CLI. This action maintains your Subgraph private, but once you’re happy with it, you can publish to Graph Explorer. This will create a new version of your Subgraph that Curators can start signaling on. -### 6. Is it possible to duplicate a subgraph to another account or endpoint without redeploying? +### 6. Is it possible to duplicate a Subgraph to another account or endpoint without redeploying? -You have to redeploy the subgraph, but if the subgraph ID (IPFS hash) doesn't change, it won't have to sync from the beginning. +You have to redeploy the Subgraph, but if the Subgraph ID (IPFS hash) doesn't change, it won't have to sync from the beginning. -### 7. How do I call a contract function or access a public state variable from my subgraph mappings? +### 7. How do I call a contract function or access a public state variable from my Subgraph mappings? Take a look at `Access to smart contract` state inside the section [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/#access-to-smart-contract-state). -### 8. Can I import `ethers.js` or other JS libraries into my subgraph mappings? +### 8. Can I import `ethers.js` or other JS libraries into my Subgraph mappings? Not currently, as mappings are written in AssemblyScript. @@ -45,15 +45,15 @@ One possible alternative solution to this is to store raw data in entities and p ### 9. When listening to multiple contracts, is it possible to select the contract order to listen to events? -Within a subgraph, the events are always processed in the order they appear in the blocks, regardless of whether that is across multiple contracts or not. +Within a Subgraph, the events are always processed in the order they appear in the blocks, regardless of whether that is across multiple contracts or not. ### 10. How are templates different from data sources? -Templates allow you to create data sources quickly, while your subgraph is indexing. Your contract might spawn new contracts as people interact with it. Since you know the shape of those contracts (ABI, events, etc.) upfront, you can define how you want to index them in a template. When they are spawned, your subgraph will create a dynamic data source by supplying the contract address. +Templates allow you to create data sources quickly, while your Subgraph is indexing. Your contract might spawn new contracts as people interact with it. Since you know the shape of those contracts (ABI, events, etc.) upfront, you can define how you want to index them in a template. When they are spawned, your Subgraph will create a dynamic data source by supplying the contract address. Check out the "Instantiating a data source template" section on: [Data Source Templates](/developing/creating-a-subgraph/#data-source-templates). -### 11. Is it possible to set up a subgraph using `graph init` from `graph-cli` with two contracts? Or should I manually add another dataSource in `subgraph.yaml` after running `graph init`? +### 11. Is it possible to set up a Subgraph using `graph init` from `graph-cli` with two contracts? Or should I manually add another dataSource in `subgraph.yaml` after running `graph init`? Yes. On `graph init` command itself you can add multiple dataSources by entering contracts one after the other. @@ -79,9 +79,9 @@ docker pull graphprotocol/graph-node:latest If only one entity is created during the event and if there's nothing better available, then the transaction hash + log index would be unique. You can obfuscate these by converting that to Bytes and then piping it through `crypto.keccak256` but this won't make it more unique. -### 15. Can I delete my subgraph? +### 15. Can I delete my Subgraph? -Yes, you can [delete](/subgraphs/developing/managing/deleting-a-subgraph/) and [transfer](/subgraphs/developing/managing/transferring-a-subgraph/) your subgraph. +Yes, you can [delete](/subgraphs/developing/managing/deleting-a-subgraph/) and [transfer](/subgraphs/developing/managing/transferring-a-subgraph/) your Subgraph. ## Network Related @@ -110,11 +110,11 @@ Yes. Sepolia supports block handlers, call handlers and event handlers. It shoul Yes. `dataSources.source.startBlock` in the `subgraph.yaml` file specifies the number of the block that the dataSource starts indexing from. In most cases, we suggest using the block where the contract was created: [Start blocks](/developing/creating-a-subgraph/#start-blocks) -### 20. What are some tips to increase the performance of indexing? My subgraph is taking a very long time to sync +### 20. What are some tips to increase the performance of indexing? My Subgraph is taking a very long time to sync Yes, you should take a look at the optional start block feature to start indexing from the block where the contract was deployed: [Start blocks](/developing/creating-a-subgraph/#start-blocks) -### 21. Is there a way to query the subgraph directly to determine the latest block number it has indexed? +### 21. Is there a way to query the Subgraph directly to determine the latest block number it has indexed? Yes! Try the following command, substituting "organization/subgraphName" with the organization under it is published and the name of your subgraph: @@ -132,7 +132,7 @@ someCollection(first: 1000, skip: ) { ... } ### 23. If my dapp frontend uses The Graph for querying, do I need to write my API key into the frontend directly? What if we pay query fees for users – will malicious users cause our query fees to be very high? -Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. +Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and Subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. ## Miscellaneous From 6d38a180f8b2b21b3d6cbc3a2cc2bc6b843518a8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:23:40 -0500 Subject: [PATCH 0874/1789] New translations developer-faq.mdx (Italian) --- .../it/subgraphs/developing/developer-faq.mdx | 46 +++++++++---------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/website/src/pages/it/subgraphs/developing/developer-faq.mdx b/website/src/pages/it/subgraphs/developing/developer-faq.mdx index 8dbe6d23ad39..e45141294523 100644 --- a/website/src/pages/it/subgraphs/developing/developer-faq.mdx +++ b/website/src/pages/it/subgraphs/developing/developer-faq.mdx @@ -7,37 +7,37 @@ This page summarizes some of the most common questions for developers building o ## Subgraph Related -### 1. What is a subgraph? +### 1. What is a Subgraph? -A subgraph is a custom API built on blockchain data. Subgraphs are queried using the GraphQL query language and are deployed to a Graph Node using The Graph CLI. Once deployed and published to The Graph's decentralized network, Indexers process subgraphs and make them available for subgraph consumers to query. +A Subgraph is a custom API built on blockchain data. Subgraphs are queried using the GraphQL query language and are deployed to a Graph Node using The Graph CLI. Once deployed and published to The Graph's decentralized network, Indexers process Subgraphs and make them available for Subgraph consumers to query. -### 2. What is the first step to create a subgraph? +### 2. What is the first step to create a Subgraph? -To successfully create a subgraph, you will need to install The Graph CLI. Review the [Quick Start](/subgraphs/quick-start/) to get started. For detailed information, see [Creating a Subgraph](/developing/creating-a-subgraph/). +To successfully create a Subgraph, you will need to install The Graph CLI. Review the [Quick Start](/subgraphs/quick-start/) to get started. For detailed information, see [Creating a Subgraph](/developing/creating-a-subgraph/). -### 3. Can I still create a subgraph if my smart contracts don't have events? +### 3. Can I still create a Subgraph if my smart contracts don't have events? -It is highly recommended that you structure your smart contracts to have events associated with data you are interested in querying. Event handlers in the subgraph are triggered by contract events and are the fastest way to retrieve useful data. +It is highly recommended that you structure your smart contracts to have events associated with data you are interested in querying. Event handlers in the Subgraph are triggered by contract events and are the fastest way to retrieve useful data. -If the contracts you work with do not contain events, your subgraph can use call and block handlers to trigger indexing. However, this is not recommended, as performance will be significantly slower. +If the contracts you work with do not contain events, your Subgraph can use call and block handlers to trigger indexing. However, this is not recommended, as performance will be significantly slower. -### 4. Can I change the GitHub account associated with my subgraph? +### 4. Can I change the GitHub account associated with my Subgraph? -No. Once a subgraph is created, the associated GitHub account cannot be changed. Please make sure to carefully consider this before creating your subgraph. +No. Once a Subgraph is created, the associated GitHub account cannot be changed. Please make sure to carefully consider this before creating your Subgraph. -### 5. How do I update a subgraph on mainnet? +### 5. How do I update a Subgraph on mainnet? -You can deploy a new version of your subgraph to Subgraph Studio using the CLI. This action maintains your subgraph private, but once you’re happy with it, you can publish to Graph Explorer. This will create a new version of your subgraph that Curators can start signaling on. +You can deploy a new version of your Subgraph to Subgraph Studio using the CLI. This action maintains your Subgraph private, but once you’re happy with it, you can publish to Graph Explorer. This will create a new version of your Subgraph that Curators can start signaling on. -### 6. Is it possible to duplicate a subgraph to another account or endpoint without redeploying? +### 6. Is it possible to duplicate a Subgraph to another account or endpoint without redeploying? -You have to redeploy the subgraph, but if the subgraph ID (IPFS hash) doesn't change, it won't have to sync from the beginning. +You have to redeploy the Subgraph, but if the Subgraph ID (IPFS hash) doesn't change, it won't have to sync from the beginning. -### 7. How do I call a contract function or access a public state variable from my subgraph mappings? +### 7. How do I call a contract function or access a public state variable from my Subgraph mappings? Take a look at `Access to smart contract` state inside the section [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/#access-to-smart-contract-state). -### 8. Can I import `ethers.js` or other JS libraries into my subgraph mappings? +### 8. Can I import `ethers.js` or other JS libraries into my Subgraph mappings? Not currently, as mappings are written in AssemblyScript. @@ -45,15 +45,15 @@ One possible alternative solution to this is to store raw data in entities and p ### 9. When listening to multiple contracts, is it possible to select the contract order to listen to events? -Within a subgraph, the events are always processed in the order they appear in the blocks, regardless of whether that is across multiple contracts or not. +Within a Subgraph, the events are always processed in the order they appear in the blocks, regardless of whether that is across multiple contracts or not. ### 10. How are templates different from data sources? -Templates allow you to create data sources quickly, while your subgraph is indexing. Your contract might spawn new contracts as people interact with it. Since you know the shape of those contracts (ABI, events, etc.) upfront, you can define how you want to index them in a template. When they are spawned, your subgraph will create a dynamic data source by supplying the contract address. +Templates allow you to create data sources quickly, while your Subgraph is indexing. Your contract might spawn new contracts as people interact with it. Since you know the shape of those contracts (ABI, events, etc.) upfront, you can define how you want to index them in a template. When they are spawned, your Subgraph will create a dynamic data source by supplying the contract address. Check out the "Instantiating a data source template" section on: [Data Source Templates](/developing/creating-a-subgraph/#data-source-templates). -### 11. Is it possible to set up a subgraph using `graph init` from `graph-cli` with two contracts? Or should I manually add another dataSource in `subgraph.yaml` after running `graph init`? +### 11. Is it possible to set up a Subgraph using `graph init` from `graph-cli` with two contracts? Or should I manually add another dataSource in `subgraph.yaml` after running `graph init`? Yes. On `graph init` command itself you can add multiple dataSources by entering contracts one after the other. @@ -79,9 +79,9 @@ docker pull graphprotocol/graph-node:latest If only one entity is created during the event and if there's nothing better available, then the transaction hash + log index would be unique. You can obfuscate these by converting that to Bytes and then piping it through `crypto.keccak256` but this won't make it more unique. -### 15. Can I delete my subgraph? +### 15. Can I delete my Subgraph? -Yes, you can [delete](/subgraphs/developing/managing/deleting-a-subgraph/) and [transfer](/subgraphs/developing/managing/transferring-a-subgraph/) your subgraph. +Yes, you can [delete](/subgraphs/developing/managing/deleting-a-subgraph/) and [transfer](/subgraphs/developing/managing/transferring-a-subgraph/) your Subgraph. ## Network Related @@ -110,11 +110,11 @@ Yes. Sepolia supports block handlers, call handlers and event handlers. It shoul Yes. `dataSources.source.startBlock` in the `subgraph.yaml` file specifies the number of the block that the dataSource starts indexing from. In most cases, we suggest using the block where the contract was created: [Start blocks](/developing/creating-a-subgraph/#start-blocks) -### 20. What are some tips to increase the performance of indexing? My subgraph is taking a very long time to sync +### 20. What are some tips to increase the performance of indexing? My Subgraph is taking a very long time to sync Yes, you should take a look at the optional start block feature to start indexing from the block where the contract was deployed: [Start blocks](/developing/creating-a-subgraph/#start-blocks) -### 21. Is there a way to query the subgraph directly to determine the latest block number it has indexed? +### 21. Is there a way to query the Subgraph directly to determine the latest block number it has indexed? Yes! Try the following command, substituting "organization/subgraphName" with the organization under it is published and the name of your subgraph: @@ -132,7 +132,7 @@ someCollection(first: 1000, skip: ) { ... } ### 23. If my dapp frontend uses The Graph for querying, do I need to write my API key into the frontend directly? What if we pay query fees for users – will malicious users cause our query fees to be very high? -Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. +Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and Subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. ## Miscellaneous From a8b6e2c94e8c8fe9a082e96662f7c6d0e83708cc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:23:41 -0500 Subject: [PATCH 0875/1789] New translations developer-faq.mdx (Japanese) --- .../ja/subgraphs/developing/developer-faq.mdx | 46 +++++++++---------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/website/src/pages/ja/subgraphs/developing/developer-faq.mdx b/website/src/pages/ja/subgraphs/developing/developer-faq.mdx index 9744d7d9a53d..54a9d8b3a865 100644 --- a/website/src/pages/ja/subgraphs/developing/developer-faq.mdx +++ b/website/src/pages/ja/subgraphs/developing/developer-faq.mdx @@ -7,37 +7,37 @@ This page summarizes some of the most common questions for developers building o ## Subgraph Related -### 1. サブグラフとは +### 1. What is a Subgraph? -A subgraph is a custom API built on blockchain data. Subgraphs are queried using the GraphQL query language and are deployed to a Graph Node using The Graph CLI. Once deployed and published to The Graph's decentralized network, Indexers process subgraphs and make them available for subgraph consumers to query. +A Subgraph is a custom API built on blockchain data. Subgraphs are queried using the GraphQL query language and are deployed to a Graph Node using The Graph CLI. Once deployed and published to The Graph's decentralized network, Indexers process Subgraphs and make them available for Subgraph consumers to query. -### 2. What is the first step to create a subgraph? +### 2. What is the first step to create a Subgraph? -To successfully create a subgraph, you will need to install The Graph CLI. Review the [Quick Start](/subgraphs/quick-start/) to get started. For detailed information, see [Creating a Subgraph](/developing/creating-a-subgraph/). +To successfully create a Subgraph, you will need to install The Graph CLI. Review the [Quick Start](/subgraphs/quick-start/) to get started. For detailed information, see [Creating a Subgraph](/developing/creating-a-subgraph/). -### 3. Can I still create a subgraph if my smart contracts don't have events? +### 3. Can I still create a Subgraph if my smart contracts don't have events? -It is highly recommended that you structure your smart contracts to have events associated with data you are interested in querying. Event handlers in the subgraph are triggered by contract events and are the fastest way to retrieve useful data. +It is highly recommended that you structure your smart contracts to have events associated with data you are interested in querying. Event handlers in the Subgraph are triggered by contract events and are the fastest way to retrieve useful data. -If the contracts you work with do not contain events, your subgraph can use call and block handlers to trigger indexing. However, this is not recommended, as performance will be significantly slower. +If the contracts you work with do not contain events, your Subgraph can use call and block handlers to trigger indexing. However, this is not recommended, as performance will be significantly slower. -### 4. サブグラフに関連付けられている GitHub アカウントを変更できますか? +### 4. Can I change the GitHub account associated with my Subgraph? -No. Once a subgraph is created, the associated GitHub account cannot be changed. Please make sure to carefully consider this before creating your subgraph. +No. Once a Subgraph is created, the associated GitHub account cannot be changed. Please make sure to carefully consider this before creating your Subgraph. -### 5. How do I update a subgraph on mainnet? +### 5. How do I update a Subgraph on mainnet? -You can deploy a new version of your subgraph to Subgraph Studio using the CLI. This action maintains your subgraph private, but once you’re happy with it, you can publish to Graph Explorer. This will create a new version of your subgraph that Curators can start signaling on. +You can deploy a new version of your Subgraph to Subgraph Studio using the CLI. This action maintains your Subgraph private, but once you’re happy with it, you can publish to Graph Explorer. This will create a new version of your Subgraph that Curators can start signaling on. -### 6. Is it possible to duplicate a subgraph to another account or endpoint without redeploying? +### 6. Is it possible to duplicate a Subgraph to another account or endpoint without redeploying? -サブグラフを再デプロイする必要がありますが、サブグラフの ID(IPFS ハッシュ)が変わらなければ、最初から同期する必要はありません。 +You have to redeploy the Subgraph, but if the Subgraph ID (IPFS hash) doesn't change, it won't have to sync from the beginning. -### 7. How do I call a contract function or access a public state variable from my subgraph mappings? +### 7. How do I call a contract function or access a public state variable from my Subgraph mappings? Take a look at `Access to smart contract` state inside the section [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/#access-to-smart-contract-state). -### 8. Can I import `ethers.js` or other JS libraries into my subgraph mappings? +### 8. Can I import `ethers.js` or other JS libraries into my Subgraph mappings? Not currently, as mappings are written in AssemblyScript. @@ -45,15 +45,15 @@ One possible alternative solution to this is to store raw data in entities and p ### 9. When listening to multiple contracts, is it possible to select the contract order to listen to events? -サブグラフ内では、複数のコントラクトにまたがっているかどうかにかかわらず、イベントは常にブロックに表示される順序で処理されます。 +Within a Subgraph, the events are always processed in the order they appear in the blocks, regardless of whether that is across multiple contracts or not. ### 10. How are templates different from data sources? -Templates allow you to create data sources quickly, while your subgraph is indexing. Your contract might spawn new contracts as people interact with it. Since you know the shape of those contracts (ABI, events, etc.) upfront, you can define how you want to index them in a template. When they are spawned, your subgraph will create a dynamic data source by supplying the contract address. +Templates allow you to create data sources quickly, while your Subgraph is indexing. Your contract might spawn new contracts as people interact with it. Since you know the shape of those contracts (ABI, events, etc.) upfront, you can define how you want to index them in a template. When they are spawned, your Subgraph will create a dynamic data source by supplying the contract address. Check out the "Instantiating a data source template" section on: [Data Source Templates](/developing/creating-a-subgraph/#data-source-templates). -### 11. Is it possible to set up a subgraph using `graph init` from `graph-cli` with two contracts? Or should I manually add another dataSource in `subgraph.yaml` after running `graph init`? +### 11. Is it possible to set up a Subgraph using `graph init` from `graph-cli` with two contracts? Or should I manually add another dataSource in `subgraph.yaml` after running `graph init`? Yes. On `graph init` command itself you can add multiple dataSources by entering contracts one after the other. @@ -79,9 +79,9 @@ docker pull graphprotocol/graph-node:latest If only one entity is created during the event and if there's nothing better available, then the transaction hash + log index would be unique. You can obfuscate these by converting that to Bytes and then piping it through `crypto.keccak256` but this won't make it more unique. -### 15. Can I delete my subgraph? +### 15. Can I delete my Subgraph? -Yes, you can [delete](/subgraphs/developing/managing/deleting-a-subgraph/) and [transfer](/subgraphs/developing/managing/transferring-a-subgraph/) your subgraph. +Yes, you can [delete](/subgraphs/developing/managing/deleting-a-subgraph/) and [transfer](/subgraphs/developing/managing/transferring-a-subgraph/) your Subgraph. ## Network Related @@ -110,11 +110,11 @@ Yes. Sepolia supports block handlers, call handlers and event handlers. It shoul Yes. `dataSources.source.startBlock` in the `subgraph.yaml` file specifies the number of the block that the dataSource starts indexing from. In most cases, we suggest using the block where the contract was created: [Start blocks](/developing/creating-a-subgraph/#start-blocks) -### 20. What are some tips to increase the performance of indexing? My subgraph is taking a very long time to sync +### 20. What are some tips to increase the performance of indexing? My Subgraph is taking a very long time to sync Yes, you should take a look at the optional start block feature to start indexing from the block where the contract was deployed: [Start blocks](/developing/creating-a-subgraph/#start-blocks) -### 21. Is there a way to query the subgraph directly to determine the latest block number it has indexed? +### 21. Is there a way to query the Subgraph directly to determine the latest block number it has indexed? はい、あります。organization/subgraphName」を公開先の組織とサブグラフの名前に置き換えて、以下のコマンドを実行してみてください: @@ -132,7 +132,7 @@ someCollection(first: 1000, skip: ) { ... } ### 23. If my dapp frontend uses The Graph for querying, do I need to write my API key into the frontend directly? What if we pay query fees for users – will malicious users cause our query fees to be very high? -Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. +Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and Subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. ## Miscellaneous From a53a2593c98242269cc3cf293dd015a6c46cb3f9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:23:42 -0500 Subject: [PATCH 0876/1789] New translations developer-faq.mdx (Korean) --- .../ko/subgraphs/developing/developer-faq.mdx | 46 +++++++++---------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/website/src/pages/ko/subgraphs/developing/developer-faq.mdx b/website/src/pages/ko/subgraphs/developing/developer-faq.mdx index 8dbe6d23ad39..e45141294523 100644 --- a/website/src/pages/ko/subgraphs/developing/developer-faq.mdx +++ b/website/src/pages/ko/subgraphs/developing/developer-faq.mdx @@ -7,37 +7,37 @@ This page summarizes some of the most common questions for developers building o ## Subgraph Related -### 1. What is a subgraph? +### 1. What is a Subgraph? -A subgraph is a custom API built on blockchain data. Subgraphs are queried using the GraphQL query language and are deployed to a Graph Node using The Graph CLI. Once deployed and published to The Graph's decentralized network, Indexers process subgraphs and make them available for subgraph consumers to query. +A Subgraph is a custom API built on blockchain data. Subgraphs are queried using the GraphQL query language and are deployed to a Graph Node using The Graph CLI. Once deployed and published to The Graph's decentralized network, Indexers process Subgraphs and make them available for Subgraph consumers to query. -### 2. What is the first step to create a subgraph? +### 2. What is the first step to create a Subgraph? -To successfully create a subgraph, you will need to install The Graph CLI. Review the [Quick Start](/subgraphs/quick-start/) to get started. For detailed information, see [Creating a Subgraph](/developing/creating-a-subgraph/). +To successfully create a Subgraph, you will need to install The Graph CLI. Review the [Quick Start](/subgraphs/quick-start/) to get started. For detailed information, see [Creating a Subgraph](/developing/creating-a-subgraph/). -### 3. Can I still create a subgraph if my smart contracts don't have events? +### 3. Can I still create a Subgraph if my smart contracts don't have events? -It is highly recommended that you structure your smart contracts to have events associated with data you are interested in querying. Event handlers in the subgraph are triggered by contract events and are the fastest way to retrieve useful data. +It is highly recommended that you structure your smart contracts to have events associated with data you are interested in querying. Event handlers in the Subgraph are triggered by contract events and are the fastest way to retrieve useful data. -If the contracts you work with do not contain events, your subgraph can use call and block handlers to trigger indexing. However, this is not recommended, as performance will be significantly slower. +If the contracts you work with do not contain events, your Subgraph can use call and block handlers to trigger indexing. However, this is not recommended, as performance will be significantly slower. -### 4. Can I change the GitHub account associated with my subgraph? +### 4. Can I change the GitHub account associated with my Subgraph? -No. Once a subgraph is created, the associated GitHub account cannot be changed. Please make sure to carefully consider this before creating your subgraph. +No. Once a Subgraph is created, the associated GitHub account cannot be changed. Please make sure to carefully consider this before creating your Subgraph. -### 5. How do I update a subgraph on mainnet? +### 5. How do I update a Subgraph on mainnet? -You can deploy a new version of your subgraph to Subgraph Studio using the CLI. This action maintains your subgraph private, but once you’re happy with it, you can publish to Graph Explorer. This will create a new version of your subgraph that Curators can start signaling on. +You can deploy a new version of your Subgraph to Subgraph Studio using the CLI. This action maintains your Subgraph private, but once you’re happy with it, you can publish to Graph Explorer. This will create a new version of your Subgraph that Curators can start signaling on. -### 6. Is it possible to duplicate a subgraph to another account or endpoint without redeploying? +### 6. Is it possible to duplicate a Subgraph to another account or endpoint without redeploying? -You have to redeploy the subgraph, but if the subgraph ID (IPFS hash) doesn't change, it won't have to sync from the beginning. +You have to redeploy the Subgraph, but if the Subgraph ID (IPFS hash) doesn't change, it won't have to sync from the beginning. -### 7. How do I call a contract function or access a public state variable from my subgraph mappings? +### 7. How do I call a contract function or access a public state variable from my Subgraph mappings? Take a look at `Access to smart contract` state inside the section [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/#access-to-smart-contract-state). -### 8. Can I import `ethers.js` or other JS libraries into my subgraph mappings? +### 8. Can I import `ethers.js` or other JS libraries into my Subgraph mappings? Not currently, as mappings are written in AssemblyScript. @@ -45,15 +45,15 @@ One possible alternative solution to this is to store raw data in entities and p ### 9. When listening to multiple contracts, is it possible to select the contract order to listen to events? -Within a subgraph, the events are always processed in the order they appear in the blocks, regardless of whether that is across multiple contracts or not. +Within a Subgraph, the events are always processed in the order they appear in the blocks, regardless of whether that is across multiple contracts or not. ### 10. How are templates different from data sources? -Templates allow you to create data sources quickly, while your subgraph is indexing. Your contract might spawn new contracts as people interact with it. Since you know the shape of those contracts (ABI, events, etc.) upfront, you can define how you want to index them in a template. When they are spawned, your subgraph will create a dynamic data source by supplying the contract address. +Templates allow you to create data sources quickly, while your Subgraph is indexing. Your contract might spawn new contracts as people interact with it. Since you know the shape of those contracts (ABI, events, etc.) upfront, you can define how you want to index them in a template. When they are spawned, your Subgraph will create a dynamic data source by supplying the contract address. Check out the "Instantiating a data source template" section on: [Data Source Templates](/developing/creating-a-subgraph/#data-source-templates). -### 11. Is it possible to set up a subgraph using `graph init` from `graph-cli` with two contracts? Or should I manually add another dataSource in `subgraph.yaml` after running `graph init`? +### 11. Is it possible to set up a Subgraph using `graph init` from `graph-cli` with two contracts? Or should I manually add another dataSource in `subgraph.yaml` after running `graph init`? Yes. On `graph init` command itself you can add multiple dataSources by entering contracts one after the other. @@ -79,9 +79,9 @@ docker pull graphprotocol/graph-node:latest If only one entity is created during the event and if there's nothing better available, then the transaction hash + log index would be unique. You can obfuscate these by converting that to Bytes and then piping it through `crypto.keccak256` but this won't make it more unique. -### 15. Can I delete my subgraph? +### 15. Can I delete my Subgraph? -Yes, you can [delete](/subgraphs/developing/managing/deleting-a-subgraph/) and [transfer](/subgraphs/developing/managing/transferring-a-subgraph/) your subgraph. +Yes, you can [delete](/subgraphs/developing/managing/deleting-a-subgraph/) and [transfer](/subgraphs/developing/managing/transferring-a-subgraph/) your Subgraph. ## Network Related @@ -110,11 +110,11 @@ Yes. Sepolia supports block handlers, call handlers and event handlers. It shoul Yes. `dataSources.source.startBlock` in the `subgraph.yaml` file specifies the number of the block that the dataSource starts indexing from. In most cases, we suggest using the block where the contract was created: [Start blocks](/developing/creating-a-subgraph/#start-blocks) -### 20. What are some tips to increase the performance of indexing? My subgraph is taking a very long time to sync +### 20. What are some tips to increase the performance of indexing? My Subgraph is taking a very long time to sync Yes, you should take a look at the optional start block feature to start indexing from the block where the contract was deployed: [Start blocks](/developing/creating-a-subgraph/#start-blocks) -### 21. Is there a way to query the subgraph directly to determine the latest block number it has indexed? +### 21. Is there a way to query the Subgraph directly to determine the latest block number it has indexed? Yes! Try the following command, substituting "organization/subgraphName" with the organization under it is published and the name of your subgraph: @@ -132,7 +132,7 @@ someCollection(first: 1000, skip: ) { ... } ### 23. If my dapp frontend uses The Graph for querying, do I need to write my API key into the frontend directly? What if we pay query fees for users – will malicious users cause our query fees to be very high? -Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. +Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and Subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. ## Miscellaneous From 593ae46032bed4736d97288a0b9cfda4c305774c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:23:43 -0500 Subject: [PATCH 0877/1789] New translations developer-faq.mdx (Dutch) --- .../nl/subgraphs/developing/developer-faq.mdx | 46 +++++++++---------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/website/src/pages/nl/subgraphs/developing/developer-faq.mdx b/website/src/pages/nl/subgraphs/developing/developer-faq.mdx index 8dbe6d23ad39..e45141294523 100644 --- a/website/src/pages/nl/subgraphs/developing/developer-faq.mdx +++ b/website/src/pages/nl/subgraphs/developing/developer-faq.mdx @@ -7,37 +7,37 @@ This page summarizes some of the most common questions for developers building o ## Subgraph Related -### 1. What is a subgraph? +### 1. What is a Subgraph? -A subgraph is a custom API built on blockchain data. Subgraphs are queried using the GraphQL query language and are deployed to a Graph Node using The Graph CLI. Once deployed and published to The Graph's decentralized network, Indexers process subgraphs and make them available for subgraph consumers to query. +A Subgraph is a custom API built on blockchain data. Subgraphs are queried using the GraphQL query language and are deployed to a Graph Node using The Graph CLI. Once deployed and published to The Graph's decentralized network, Indexers process Subgraphs and make them available for Subgraph consumers to query. -### 2. What is the first step to create a subgraph? +### 2. What is the first step to create a Subgraph? -To successfully create a subgraph, you will need to install The Graph CLI. Review the [Quick Start](/subgraphs/quick-start/) to get started. For detailed information, see [Creating a Subgraph](/developing/creating-a-subgraph/). +To successfully create a Subgraph, you will need to install The Graph CLI. Review the [Quick Start](/subgraphs/quick-start/) to get started. For detailed information, see [Creating a Subgraph](/developing/creating-a-subgraph/). -### 3. Can I still create a subgraph if my smart contracts don't have events? +### 3. Can I still create a Subgraph if my smart contracts don't have events? -It is highly recommended that you structure your smart contracts to have events associated with data you are interested in querying. Event handlers in the subgraph are triggered by contract events and are the fastest way to retrieve useful data. +It is highly recommended that you structure your smart contracts to have events associated with data you are interested in querying. Event handlers in the Subgraph are triggered by contract events and are the fastest way to retrieve useful data. -If the contracts you work with do not contain events, your subgraph can use call and block handlers to trigger indexing. However, this is not recommended, as performance will be significantly slower. +If the contracts you work with do not contain events, your Subgraph can use call and block handlers to trigger indexing. However, this is not recommended, as performance will be significantly slower. -### 4. Can I change the GitHub account associated with my subgraph? +### 4. Can I change the GitHub account associated with my Subgraph? -No. Once a subgraph is created, the associated GitHub account cannot be changed. Please make sure to carefully consider this before creating your subgraph. +No. Once a Subgraph is created, the associated GitHub account cannot be changed. Please make sure to carefully consider this before creating your Subgraph. -### 5. How do I update a subgraph on mainnet? +### 5. How do I update a Subgraph on mainnet? -You can deploy a new version of your subgraph to Subgraph Studio using the CLI. This action maintains your subgraph private, but once you’re happy with it, you can publish to Graph Explorer. This will create a new version of your subgraph that Curators can start signaling on. +You can deploy a new version of your Subgraph to Subgraph Studio using the CLI. This action maintains your Subgraph private, but once you’re happy with it, you can publish to Graph Explorer. This will create a new version of your Subgraph that Curators can start signaling on. -### 6. Is it possible to duplicate a subgraph to another account or endpoint without redeploying? +### 6. Is it possible to duplicate a Subgraph to another account or endpoint without redeploying? -You have to redeploy the subgraph, but if the subgraph ID (IPFS hash) doesn't change, it won't have to sync from the beginning. +You have to redeploy the Subgraph, but if the Subgraph ID (IPFS hash) doesn't change, it won't have to sync from the beginning. -### 7. How do I call a contract function or access a public state variable from my subgraph mappings? +### 7. How do I call a contract function or access a public state variable from my Subgraph mappings? Take a look at `Access to smart contract` state inside the section [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/#access-to-smart-contract-state). -### 8. Can I import `ethers.js` or other JS libraries into my subgraph mappings? +### 8. Can I import `ethers.js` or other JS libraries into my Subgraph mappings? Not currently, as mappings are written in AssemblyScript. @@ -45,15 +45,15 @@ One possible alternative solution to this is to store raw data in entities and p ### 9. When listening to multiple contracts, is it possible to select the contract order to listen to events? -Within a subgraph, the events are always processed in the order they appear in the blocks, regardless of whether that is across multiple contracts or not. +Within a Subgraph, the events are always processed in the order they appear in the blocks, regardless of whether that is across multiple contracts or not. ### 10. How are templates different from data sources? -Templates allow you to create data sources quickly, while your subgraph is indexing. Your contract might spawn new contracts as people interact with it. Since you know the shape of those contracts (ABI, events, etc.) upfront, you can define how you want to index them in a template. When they are spawned, your subgraph will create a dynamic data source by supplying the contract address. +Templates allow you to create data sources quickly, while your Subgraph is indexing. Your contract might spawn new contracts as people interact with it. Since you know the shape of those contracts (ABI, events, etc.) upfront, you can define how you want to index them in a template. When they are spawned, your Subgraph will create a dynamic data source by supplying the contract address. Check out the "Instantiating a data source template" section on: [Data Source Templates](/developing/creating-a-subgraph/#data-source-templates). -### 11. Is it possible to set up a subgraph using `graph init` from `graph-cli` with two contracts? Or should I manually add another dataSource in `subgraph.yaml` after running `graph init`? +### 11. Is it possible to set up a Subgraph using `graph init` from `graph-cli` with two contracts? Or should I manually add another dataSource in `subgraph.yaml` after running `graph init`? Yes. On `graph init` command itself you can add multiple dataSources by entering contracts one after the other. @@ -79,9 +79,9 @@ docker pull graphprotocol/graph-node:latest If only one entity is created during the event and if there's nothing better available, then the transaction hash + log index would be unique. You can obfuscate these by converting that to Bytes and then piping it through `crypto.keccak256` but this won't make it more unique. -### 15. Can I delete my subgraph? +### 15. Can I delete my Subgraph? -Yes, you can [delete](/subgraphs/developing/managing/deleting-a-subgraph/) and [transfer](/subgraphs/developing/managing/transferring-a-subgraph/) your subgraph. +Yes, you can [delete](/subgraphs/developing/managing/deleting-a-subgraph/) and [transfer](/subgraphs/developing/managing/transferring-a-subgraph/) your Subgraph. ## Network Related @@ -110,11 +110,11 @@ Yes. Sepolia supports block handlers, call handlers and event handlers. It shoul Yes. `dataSources.source.startBlock` in the `subgraph.yaml` file specifies the number of the block that the dataSource starts indexing from. In most cases, we suggest using the block where the contract was created: [Start blocks](/developing/creating-a-subgraph/#start-blocks) -### 20. What are some tips to increase the performance of indexing? My subgraph is taking a very long time to sync +### 20. What are some tips to increase the performance of indexing? My Subgraph is taking a very long time to sync Yes, you should take a look at the optional start block feature to start indexing from the block where the contract was deployed: [Start blocks](/developing/creating-a-subgraph/#start-blocks) -### 21. Is there a way to query the subgraph directly to determine the latest block number it has indexed? +### 21. Is there a way to query the Subgraph directly to determine the latest block number it has indexed? Yes! Try the following command, substituting "organization/subgraphName" with the organization under it is published and the name of your subgraph: @@ -132,7 +132,7 @@ someCollection(first: 1000, skip: ) { ... } ### 23. If my dapp frontend uses The Graph for querying, do I need to write my API key into the frontend directly? What if we pay query fees for users – will malicious users cause our query fees to be very high? -Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. +Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and Subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. ## Miscellaneous From 1750f7e10d137c3de3e879b7dabc49ca1d343661 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:23:44 -0500 Subject: [PATCH 0878/1789] New translations developer-faq.mdx (Polish) --- .../pl/subgraphs/developing/developer-faq.mdx | 46 +++++++++---------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/website/src/pages/pl/subgraphs/developing/developer-faq.mdx b/website/src/pages/pl/subgraphs/developing/developer-faq.mdx index 8dbe6d23ad39..e45141294523 100644 --- a/website/src/pages/pl/subgraphs/developing/developer-faq.mdx +++ b/website/src/pages/pl/subgraphs/developing/developer-faq.mdx @@ -7,37 +7,37 @@ This page summarizes some of the most common questions for developers building o ## Subgraph Related -### 1. What is a subgraph? +### 1. What is a Subgraph? -A subgraph is a custom API built on blockchain data. Subgraphs are queried using the GraphQL query language and are deployed to a Graph Node using The Graph CLI. Once deployed and published to The Graph's decentralized network, Indexers process subgraphs and make them available for subgraph consumers to query. +A Subgraph is a custom API built on blockchain data. Subgraphs are queried using the GraphQL query language and are deployed to a Graph Node using The Graph CLI. Once deployed and published to The Graph's decentralized network, Indexers process Subgraphs and make them available for Subgraph consumers to query. -### 2. What is the first step to create a subgraph? +### 2. What is the first step to create a Subgraph? -To successfully create a subgraph, you will need to install The Graph CLI. Review the [Quick Start](/subgraphs/quick-start/) to get started. For detailed information, see [Creating a Subgraph](/developing/creating-a-subgraph/). +To successfully create a Subgraph, you will need to install The Graph CLI. Review the [Quick Start](/subgraphs/quick-start/) to get started. For detailed information, see [Creating a Subgraph](/developing/creating-a-subgraph/). -### 3. Can I still create a subgraph if my smart contracts don't have events? +### 3. Can I still create a Subgraph if my smart contracts don't have events? -It is highly recommended that you structure your smart contracts to have events associated with data you are interested in querying. Event handlers in the subgraph are triggered by contract events and are the fastest way to retrieve useful data. +It is highly recommended that you structure your smart contracts to have events associated with data you are interested in querying. Event handlers in the Subgraph are triggered by contract events and are the fastest way to retrieve useful data. -If the contracts you work with do not contain events, your subgraph can use call and block handlers to trigger indexing. However, this is not recommended, as performance will be significantly slower. +If the contracts you work with do not contain events, your Subgraph can use call and block handlers to trigger indexing. However, this is not recommended, as performance will be significantly slower. -### 4. Can I change the GitHub account associated with my subgraph? +### 4. Can I change the GitHub account associated with my Subgraph? -No. Once a subgraph is created, the associated GitHub account cannot be changed. Please make sure to carefully consider this before creating your subgraph. +No. Once a Subgraph is created, the associated GitHub account cannot be changed. Please make sure to carefully consider this before creating your Subgraph. -### 5. How do I update a subgraph on mainnet? +### 5. How do I update a Subgraph on mainnet? -You can deploy a new version of your subgraph to Subgraph Studio using the CLI. This action maintains your subgraph private, but once you’re happy with it, you can publish to Graph Explorer. This will create a new version of your subgraph that Curators can start signaling on. +You can deploy a new version of your Subgraph to Subgraph Studio using the CLI. This action maintains your Subgraph private, but once you’re happy with it, you can publish to Graph Explorer. This will create a new version of your Subgraph that Curators can start signaling on. -### 6. Is it possible to duplicate a subgraph to another account or endpoint without redeploying? +### 6. Is it possible to duplicate a Subgraph to another account or endpoint without redeploying? -You have to redeploy the subgraph, but if the subgraph ID (IPFS hash) doesn't change, it won't have to sync from the beginning. +You have to redeploy the Subgraph, but if the Subgraph ID (IPFS hash) doesn't change, it won't have to sync from the beginning. -### 7. How do I call a contract function or access a public state variable from my subgraph mappings? +### 7. How do I call a contract function or access a public state variable from my Subgraph mappings? Take a look at `Access to smart contract` state inside the section [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/#access-to-smart-contract-state). -### 8. Can I import `ethers.js` or other JS libraries into my subgraph mappings? +### 8. Can I import `ethers.js` or other JS libraries into my Subgraph mappings? Not currently, as mappings are written in AssemblyScript. @@ -45,15 +45,15 @@ One possible alternative solution to this is to store raw data in entities and p ### 9. When listening to multiple contracts, is it possible to select the contract order to listen to events? -Within a subgraph, the events are always processed in the order they appear in the blocks, regardless of whether that is across multiple contracts or not. +Within a Subgraph, the events are always processed in the order they appear in the blocks, regardless of whether that is across multiple contracts or not. ### 10. How are templates different from data sources? -Templates allow you to create data sources quickly, while your subgraph is indexing. Your contract might spawn new contracts as people interact with it. Since you know the shape of those contracts (ABI, events, etc.) upfront, you can define how you want to index them in a template. When they are spawned, your subgraph will create a dynamic data source by supplying the contract address. +Templates allow you to create data sources quickly, while your Subgraph is indexing. Your contract might spawn new contracts as people interact with it. Since you know the shape of those contracts (ABI, events, etc.) upfront, you can define how you want to index them in a template. When they are spawned, your Subgraph will create a dynamic data source by supplying the contract address. Check out the "Instantiating a data source template" section on: [Data Source Templates](/developing/creating-a-subgraph/#data-source-templates). -### 11. Is it possible to set up a subgraph using `graph init` from `graph-cli` with two contracts? Or should I manually add another dataSource in `subgraph.yaml` after running `graph init`? +### 11. Is it possible to set up a Subgraph using `graph init` from `graph-cli` with two contracts? Or should I manually add another dataSource in `subgraph.yaml` after running `graph init`? Yes. On `graph init` command itself you can add multiple dataSources by entering contracts one after the other. @@ -79,9 +79,9 @@ docker pull graphprotocol/graph-node:latest If only one entity is created during the event and if there's nothing better available, then the transaction hash + log index would be unique. You can obfuscate these by converting that to Bytes and then piping it through `crypto.keccak256` but this won't make it more unique. -### 15. Can I delete my subgraph? +### 15. Can I delete my Subgraph? -Yes, you can [delete](/subgraphs/developing/managing/deleting-a-subgraph/) and [transfer](/subgraphs/developing/managing/transferring-a-subgraph/) your subgraph. +Yes, you can [delete](/subgraphs/developing/managing/deleting-a-subgraph/) and [transfer](/subgraphs/developing/managing/transferring-a-subgraph/) your Subgraph. ## Network Related @@ -110,11 +110,11 @@ Yes. Sepolia supports block handlers, call handlers and event handlers. It shoul Yes. `dataSources.source.startBlock` in the `subgraph.yaml` file specifies the number of the block that the dataSource starts indexing from. In most cases, we suggest using the block where the contract was created: [Start blocks](/developing/creating-a-subgraph/#start-blocks) -### 20. What are some tips to increase the performance of indexing? My subgraph is taking a very long time to sync +### 20. What are some tips to increase the performance of indexing? My Subgraph is taking a very long time to sync Yes, you should take a look at the optional start block feature to start indexing from the block where the contract was deployed: [Start blocks](/developing/creating-a-subgraph/#start-blocks) -### 21. Is there a way to query the subgraph directly to determine the latest block number it has indexed? +### 21. Is there a way to query the Subgraph directly to determine the latest block number it has indexed? Yes! Try the following command, substituting "organization/subgraphName" with the organization under it is published and the name of your subgraph: @@ -132,7 +132,7 @@ someCollection(first: 1000, skip: ) { ... } ### 23. If my dapp frontend uses The Graph for querying, do I need to write my API key into the frontend directly? What if we pay query fees for users – will malicious users cause our query fees to be very high? -Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. +Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and Subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. ## Miscellaneous From 6d72f65360379897437a639f0f37561fce15401d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:23:45 -0500 Subject: [PATCH 0879/1789] New translations developer-faq.mdx (Portuguese) --- .../pt/subgraphs/developing/developer-faq.mdx | 46 +++++++++---------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/website/src/pages/pt/subgraphs/developing/developer-faq.mdx b/website/src/pages/pt/subgraphs/developing/developer-faq.mdx index 94f963a2fa3a..f5d39bef765f 100644 --- a/website/src/pages/pt/subgraphs/developing/developer-faq.mdx +++ b/website/src/pages/pt/subgraphs/developing/developer-faq.mdx @@ -7,37 +7,37 @@ This page summarizes some of the most common questions for developers building o ## Subgraph Related -### 1. O que é um subgraph? +### 1. What is a Subgraph? -A subgraph is a custom API built on blockchain data. Subgraphs are queried using the GraphQL query language and are deployed to a Graph Node using The Graph CLI. Once deployed and published to The Graph's decentralized network, Indexers process subgraphs and make them available for subgraph consumers to query. +A Subgraph is a custom API built on blockchain data. Subgraphs are queried using the GraphQL query language and are deployed to a Graph Node using The Graph CLI. Once deployed and published to The Graph's decentralized network, Indexers process Subgraphs and make them available for Subgraph consumers to query. -### 2. What is the first step to create a subgraph? +### 2. What is the first step to create a Subgraph? -To successfully create a subgraph, you will need to install The Graph CLI. Review the [Quick Start](/subgraphs/quick-start/) to get started. For detailed information, see [Creating a Subgraph](/developing/creating-a-subgraph/). +To successfully create a Subgraph, you will need to install The Graph CLI. Review the [Quick Start](/subgraphs/quick-start/) to get started. For detailed information, see [Creating a Subgraph](/developing/creating-a-subgraph/). -### 3. Can I still create a subgraph if my smart contracts don't have events? +### 3. Can I still create a Subgraph if my smart contracts don't have events? -It is highly recommended that you structure your smart contracts to have events associated with data you are interested in querying. Event handlers in the subgraph are triggered by contract events and are the fastest way to retrieve useful data. +It is highly recommended that you structure your smart contracts to have events associated with data you are interested in querying. Event handlers in the Subgraph are triggered by contract events and are the fastest way to retrieve useful data. -If the contracts you work with do not contain events, your subgraph can use call and block handlers to trigger indexing. However, this is not recommended, as performance will be significantly slower. +If the contracts you work with do not contain events, your Subgraph can use call and block handlers to trigger indexing. However, this is not recommended, as performance will be significantly slower. -### 4. Posso mudar a conta do GitHub associada ao meu subgraph? +### 4. Can I change the GitHub account associated with my Subgraph? -No. Once a subgraph is created, the associated GitHub account cannot be changed. Please make sure to carefully consider this before creating your subgraph. +No. Once a Subgraph is created, the associated GitHub account cannot be changed. Please make sure to carefully consider this before creating your Subgraph. -### 5. How do I update a subgraph on mainnet? +### 5. How do I update a Subgraph on mainnet? -You can deploy a new version of your subgraph to Subgraph Studio using the CLI. This action maintains your subgraph private, but once you’re happy with it, you can publish to Graph Explorer. This will create a new version of your subgraph that Curators can start signaling on. +You can deploy a new version of your Subgraph to Subgraph Studio using the CLI. This action maintains your Subgraph private, but once you’re happy with it, you can publish to Graph Explorer. This will create a new version of your Subgraph that Curators can start signaling on. -### 6. Is it possible to duplicate a subgraph to another account or endpoint without redeploying? +### 6. Is it possible to duplicate a Subgraph to another account or endpoint without redeploying? -Deve relançar o subgraph, mas se a ID do subgraph (hash IPFS) não mudar, ele não precisará sincronizar do começo. +You have to redeploy the Subgraph, but if the Subgraph ID (IPFS hash) doesn't change, it won't have to sync from the beginning. -### 7. How do I call a contract function or access a public state variable from my subgraph mappings? +### 7. How do I call a contract function or access a public state variable from my Subgraph mappings? Take a look at `Access to smart contract` state inside the section [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/#access-to-smart-contract-state). -### 8. Can I import `ethers.js` or other JS libraries into my subgraph mappings? +### 8. Can I import `ethers.js` or other JS libraries into my Subgraph mappings? Not currently, as mappings are written in AssemblyScript. @@ -45,15 +45,15 @@ One possible alternative solution to this is to store raw data in entities and p ### 9. When listening to multiple contracts, is it possible to select the contract order to listen to events? -Dentro de um subgraph, os eventos são sempre processados na ordem em que aparecem nos blocos, mesmo sendo ou não através de vários contratos. +Within a Subgraph, the events are always processed in the order they appear in the blocks, regardless of whether that is across multiple contracts or not. ### 10. How are templates different from data sources? -Templates allow you to create data sources quickly, while your subgraph is indexing. Your contract might spawn new contracts as people interact with it. Since you know the shape of those contracts (ABI, events, etc.) upfront, you can define how you want to index them in a template. When they are spawned, your subgraph will create a dynamic data source by supplying the contract address. +Templates allow you to create data sources quickly, while your Subgraph is indexing. Your contract might spawn new contracts as people interact with it. Since you know the shape of those contracts (ABI, events, etc.) upfront, you can define how you want to index them in a template. When they are spawned, your Subgraph will create a dynamic data source by supplying the contract address. Check out the "Instantiating a data source template" section on: [Data Source Templates](/developing/creating-a-subgraph/#data-source-templates). -### 11. Is it possible to set up a subgraph using `graph init` from `graph-cli` with two contracts? Or should I manually add another dataSource in `subgraph.yaml` after running `graph init`? +### 11. Is it possible to set up a Subgraph using `graph init` from `graph-cli` with two contracts? Or should I manually add another dataSource in `subgraph.yaml` after running `graph init`? Yes. On `graph init` command itself you can add multiple dataSources by entering contracts one after the other. @@ -79,9 +79,9 @@ docker pull graphprotocol/graph-node:latest If only one entity is created during the event and if there's nothing better available, then the transaction hash + log index would be unique. You can obfuscate these by converting that to Bytes and then piping it through `crypto.keccak256` but this won't make it more unique. -### 15. Can I delete my subgraph? +### 15. Can I delete my Subgraph? -Yes, you can [delete](/subgraphs/developing/managing/deleting-a-subgraph/) and [transfer](/subgraphs/developing/managing/transferring-a-subgraph/) your subgraph. +Yes, you can [delete](/subgraphs/developing/managing/deleting-a-subgraph/) and [transfer](/subgraphs/developing/managing/transferring-a-subgraph/) your Subgraph. ## Network Related @@ -110,11 +110,11 @@ Sim. O Sepolia apoia handlers de blocos, chamadas e eventos. Vale notar que hand Yes. `dataSources.source.startBlock` in the `subgraph.yaml` file specifies the number of the block that the dataSource starts indexing from. In most cases, we suggest using the block where the contract was created: [Start blocks](/developing/creating-a-subgraph/#start-blocks) -### 20. What are some tips to increase the performance of indexing? My subgraph is taking a very long time to sync +### 20. What are some tips to increase the performance of indexing? My Subgraph is taking a very long time to sync Yes, you should take a look at the optional start block feature to start indexing from the block where the contract was deployed: [Start blocks](/developing/creating-a-subgraph/#start-blocks) -### 21. Is there a way to query the subgraph directly to determine the latest block number it has indexed? +### 21. Is there a way to query the Subgraph directly to determine the latest block number it has indexed? Sim! Execute o seguinte comando, com "organization/subgraphName" substituído com a organização sob a qual ele foi publicado e o nome do seu subgraph: @@ -132,7 +132,7 @@ someCollection(first: 1000, skip: ) { ... } ### 23. If my dapp frontend uses The Graph for querying, do I need to write my API key into the frontend directly? What if we pay query fees for users – will malicious users cause our query fees to be very high? -Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. +Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and Subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. ## Miscellaneous From 2864269df0a5fa4a25a7465562346eb86a0018f7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:23:46 -0500 Subject: [PATCH 0880/1789] New translations developer-faq.mdx (Russian) --- .../ru/subgraphs/developing/developer-faq.mdx | 46 +++++++++---------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/website/src/pages/ru/subgraphs/developing/developer-faq.mdx b/website/src/pages/ru/subgraphs/developing/developer-faq.mdx index 4c5aa00bf9cf..aa95b0395016 100644 --- a/website/src/pages/ru/subgraphs/developing/developer-faq.mdx +++ b/website/src/pages/ru/subgraphs/developing/developer-faq.mdx @@ -7,37 +7,37 @@ sidebarTitle: FAQ ## Вопросы, связанные с субграфом -### 1. Что такое субграф? +### 1. What is a Subgraph? -Субграф - это пользовательский API, построенный на данных блокчейна. Субграфы запрашиваются с использованием языка запросов GraphQL и развертываются на Graph Node с помощью Graph CLI. После развертывания и публикации в децентрализованной сети The Graph индексаторы обрабатывают субграфы и делают их доступными для запросов потребителей субграфов. +A Subgraph is a custom API built on blockchain data. Subgraphs are queried using the GraphQL query language and are deployed to a Graph Node using The Graph CLI. Once deployed and published to The Graph's decentralized network, Indexers process Subgraphs and make them available for Subgraph consumers to query. -### 2. Каков первый шаг в создании субграфа? +### 2. What is the first step to create a Subgraph? -To successfully create a subgraph, you will need to install The Graph CLI. Review the [Quick Start](/subgraphs/quick-start/) to get started. For detailed information, see [Creating a Subgraph](/developing/creating-a-subgraph/). +To successfully create a Subgraph, you will need to install The Graph CLI. Review the [Quick Start](/subgraphs/quick-start/) to get started. For detailed information, see [Creating a Subgraph](/developing/creating-a-subgraph/). -### 3. Могу ли я создать субграф, если в моих смарт-контрактах нет событий? +### 3. Can I still create a Subgraph if my smart contracts don't have events? -Настоятельно рекомендуется структурировать смарт-контракты так, чтобы они содержали события, связанные с данными, которые вы хотите запросить. Обработчики событий в субграфе срабатывают на события контракта и являются самым быстрым способом получения нужных данных. +It is highly recommended that you structure your smart contracts to have events associated with data you are interested in querying. Event handlers in the Subgraph are triggered by contract events and are the fastest way to retrieve useful data. -Если контракты, с которыми Вы работаете, не содержат событий, Ваш субграф может использовать обработчики вызовов и блоков для запуска индексации. Хотя это не рекомендуется, так как производительность будет существенно ниже. +If the contracts you work with do not contain events, your Subgraph can use call and block handlers to trigger indexing. However, this is not recommended, as performance will be significantly slower. -### 4. Могу ли я изменить учетную запись GitHub, связанную с моим субграфом? +### 4. Can I change the GitHub account associated with my Subgraph? -Нет. После создания субграфа связанная с ним учетная запись GitHub не может быть изменена. Пожалуйста, учтите это перед созданием субграфа. +No. Once a Subgraph is created, the associated GitHub account cannot be changed. Please make sure to carefully consider this before creating your Subgraph. -### 5. Как обновить субграф в майннете? +### 5. How do I update a Subgraph on mainnet? -Вы можете развернуть новую версию своего субграфа в Subgraph Studio с помощью интерфейса командной строки (CLI). Это действие сохраняет конфиденциальность вашего субграфа, но, если результат Вас удовлетворит, Вы сможете опубликовать его в Graph Explorer. При этом будет создана новая версия Вашего субграфа, на которую Кураторы смогут начать подавать сигналы. +You can deploy a new version of your Subgraph to Subgraph Studio using the CLI. This action maintains your Subgraph private, but once you’re happy with it, you can publish to Graph Explorer. This will create a new version of your Subgraph that Curators can start signaling on. -### 6. Можно ли дублировать субграф на другую учетную запись или конечную точку без повторного развертывания? +### 6. Is it possible to duplicate a Subgraph to another account or endpoint without redeploying? -Вы должны повторно развернуть субграф, но если идентификатор субграфа (хэш IPFS) не изменится, его не нужно будет синхронизировать с самого начала. +You have to redeploy the Subgraph, but if the Subgraph ID (IPFS hash) doesn't change, it won't have to sync from the beginning. -### 7. Как вызвать контрактную функцию или получить доступ к публичной переменной состояния из моих мэппингов субграфа? +### 7. How do I call a contract function or access a public state variable from my Subgraph mappings? Take a look at `Access to smart contract` state inside the section [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/#access-to-smart-contract-state). -### 8. Can I import `ethers.js` or other JS libraries into my subgraph mappings? +### 8. Can I import `ethers.js` or other JS libraries into my Subgraph mappings? В настоящее время нет, так как мэппинги написаны на языке AssemblyScript. @@ -45,15 +45,15 @@ Take a look at `Access to smart contract` state inside the section [AssemblyScri ### 9. При прослушивании нескольких контрактов, возможно ли выбрать порядок прослушивания событий контрактов? -Внутри субграфа события всегда обрабатываются в том порядке, в котором они появляются в блоках, независимо от того, относится ли это к нескольким контрактам или нет. +Within a Subgraph, the events are always processed in the order they appear in the blocks, regardless of whether that is across multiple contracts or not. ### 10. Чем шаблоны отличаются от источников данных? -Шаблоны позволяют Вам быстро создавать источники данных, пока Ваш субграф индексируется. Ваш контракт может создавать новые контракты по мере того, как люди будут с ним взаимодействовать. Поскольку форма этих контрактов (ABI, события и т. д.) известна заранее, Вы сможете определить, как Вы хотите индексировать их в шаблоне. Когда они будут сгенерированы, Ваш субграф создаст динамический источник данных, предоставив адрес контракта. +Templates allow you to create data sources quickly, while your Subgraph is indexing. Your contract might spawn new contracts as people interact with it. Since you know the shape of those contracts (ABI, events, etc.) upfront, you can define how you want to index them in a template. When they are spawned, your Subgraph will create a dynamic data source by supplying the contract address. Check out the "Instantiating a data source template" section on: [Data Source Templates](/developing/creating-a-subgraph/#data-source-templates). -### 11. Is it possible to set up a subgraph using `graph init` from `graph-cli` with two contracts? Or should I manually add another dataSource in `subgraph.yaml` after running `graph init`? +### 11. Is it possible to set up a Subgraph using `graph init` from `graph-cli` with two contracts? Or should I manually add another dataSource in `subgraph.yaml` after running `graph init`? Yes. On `graph init` command itself you can add multiple dataSources by entering contracts one after the other. @@ -79,9 +79,9 @@ docker pull graphprotocol/graph-node:latest If only one entity is created during the event and if there's nothing better available, then the transaction hash + log index would be unique. You can obfuscate these by converting that to Bytes and then piping it through `crypto.keccak256` but this won't make it more unique. -### 15. Могу ли я удалить свой субграф? +### 15. Can I delete my Subgraph? -Yes, you can [delete](/subgraphs/developing/managing/deleting-a-subgraph/) and [transfer](/subgraphs/developing/managing/transferring-a-subgraph/) your subgraph. +Yes, you can [delete](/subgraphs/developing/managing/deleting-a-subgraph/) and [transfer](/subgraphs/developing/managing/transferring-a-subgraph/) your Subgraph. ## Вопросы, связанный с сетью @@ -110,11 +110,11 @@ dataSource.address() Yes. `dataSources.source.startBlock` in the `subgraph.yaml` file specifies the number of the block that the dataSource starts indexing from. In most cases, we suggest using the block where the contract was created: [Start blocks](/developing/creating-a-subgraph/#start-blocks) -### 20. Есть ли какие-либо советы по увеличению производительности индексирования? Синхронизация моего субграфа занимает очень много времени +### 20. What are some tips to increase the performance of indexing? My Subgraph is taking a very long time to sync Yes, you should take a look at the optional start block feature to start indexing from the block where the contract was deployed: [Start blocks](/developing/creating-a-subgraph/#start-blocks) -### 21. Есть ли способ напрямую запросить субграф, чтобы определить номер последнего проиндексированного блока? +### 21. Is there a way to query the Subgraph directly to determine the latest block number it has indexed? Да! Попробуйте выполнить следующую команду, заменив "organization/subgraphName" на название организации, под которой она опубликована, и имя Вашего субграфа: @@ -132,7 +132,7 @@ someCollection(first: 1000, skip: ) { ... } ### 23. If my dapp frontend uses The Graph for querying, do I need to write my API key into the frontend directly? What if we pay query fees for users – will malicious users cause our query fees to be very high? -Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. +Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and Subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. ## Прочее From bd08494a2ec46cfe731a8812614608ab68b03330 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:23:47 -0500 Subject: [PATCH 0881/1789] New translations developer-faq.mdx (Swedish) --- .../sv/subgraphs/developing/developer-faq.mdx | 48 +++++++++---------- 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/website/src/pages/sv/subgraphs/developing/developer-faq.mdx b/website/src/pages/sv/subgraphs/developing/developer-faq.mdx index 347f3caa9805..36942bf1dce7 100644 --- a/website/src/pages/sv/subgraphs/developing/developer-faq.mdx +++ b/website/src/pages/sv/subgraphs/developing/developer-faq.mdx @@ -7,37 +7,37 @@ This page summarizes some of the most common questions for developers building o ## Subgraph Related -### 1. Vad är en subgraf? +### 1. What is a Subgraph? -A subgraph is a custom API built on blockchain data. Subgraphs are queried using the GraphQL query language and are deployed to a Graph Node using The Graph CLI. Once deployed and published to The Graph's decentralized network, Indexers process subgraphs and make them available for subgraph consumers to query. +A Subgraph is a custom API built on blockchain data. Subgraphs are queried using the GraphQL query language and are deployed to a Graph Node using The Graph CLI. Once deployed and published to The Graph's decentralized network, Indexers process Subgraphs and make them available for Subgraph consumers to query. -### 2. What is the first step to create a subgraph? +### 2. What is the first step to create a Subgraph? -To successfully create a subgraph, you will need to install The Graph CLI. Review the [Quick Start](/subgraphs/quick-start/) to get started. For detailed information, see [Creating a Subgraph](/developing/creating-a-subgraph/). +To successfully create a Subgraph, you will need to install The Graph CLI. Review the [Quick Start](/subgraphs/quick-start/) to get started. For detailed information, see [Creating a Subgraph](/developing/creating-a-subgraph/). -### 3. Can I still create a subgraph if my smart contracts don't have events? +### 3. Can I still create a Subgraph if my smart contracts don't have events? -It is highly recommended that you structure your smart contracts to have events associated with data you are interested in querying. Event handlers in the subgraph are triggered by contract events and are the fastest way to retrieve useful data. +It is highly recommended that you structure your smart contracts to have events associated with data you are interested in querying. Event handlers in the Subgraph are triggered by contract events and are the fastest way to retrieve useful data. -If the contracts you work with do not contain events, your subgraph can use call and block handlers to trigger indexing. However, this is not recommended, as performance will be significantly slower. +If the contracts you work with do not contain events, your Subgraph can use call and block handlers to trigger indexing. However, this is not recommended, as performance will be significantly slower. -### 4. Kan jag ändra det GitHub-konto som är kopplat till min subgraf? +### 4. Can I change the GitHub account associated with my Subgraph? -No. Once a subgraph is created, the associated GitHub account cannot be changed. Please make sure to carefully consider this before creating your subgraph. +No. Once a Subgraph is created, the associated GitHub account cannot be changed. Please make sure to carefully consider this before creating your Subgraph. -### 5. How do I update a subgraph on mainnet? +### 5. How do I update a Subgraph on mainnet? -You can deploy a new version of your subgraph to Subgraph Studio using the CLI. This action maintains your subgraph private, but once you’re happy with it, you can publish to Graph Explorer. This will create a new version of your subgraph that Curators can start signaling on. +You can deploy a new version of your Subgraph to Subgraph Studio using the CLI. This action maintains your Subgraph private, but once you’re happy with it, you can publish to Graph Explorer. This will create a new version of your Subgraph that Curators can start signaling on. -### 6. Is it possible to duplicate a subgraph to another account or endpoint without redeploying? +### 6. Is it possible to duplicate a Subgraph to another account or endpoint without redeploying? -Du måste distribuera om subgrafen, men om subgrafens ID (IPFS-hash) inte ändras behöver den inte synkroniseras från början. +You have to redeploy the Subgraph, but if the Subgraph ID (IPFS hash) doesn't change, it won't have to sync from the beginning. -### 7. How do I call a contract function or access a public state variable from my subgraph mappings? +### 7. How do I call a contract function or access a public state variable from my Subgraph mappings? Take a look at `Access to smart contract` state inside the section [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/#access-to-smart-contract-state). -### 8. Can I import `ethers.js` or other JS libraries into my subgraph mappings? +### 8. Can I import `ethers.js` or other JS libraries into my Subgraph mappings? Not currently, as mappings are written in AssemblyScript. @@ -45,15 +45,15 @@ One possible alternative solution to this is to store raw data in entities and p ### 9. When listening to multiple contracts, is it possible to select the contract order to listen to events? -Inom en subgraf behandlas händelser alltid i den ordning de visas i blocken, oavsett om det är över flera kontrakt eller inte. +Within a Subgraph, the events are always processed in the order they appear in the blocks, regardless of whether that is across multiple contracts or not. ### 10. How are templates different from data sources? -Templates allow you to create data sources quickly, while your subgraph is indexing. Your contract might spawn new contracts as people interact with it. Since you know the shape of those contracts (ABI, events, etc.) upfront, you can define how you want to index them in a template. When they are spawned, your subgraph will create a dynamic data source by supplying the contract address. +Templates allow you to create data sources quickly, while your Subgraph is indexing. Your contract might spawn new contracts as people interact with it. Since you know the shape of those contracts (ABI, events, etc.) upfront, you can define how you want to index them in a template. When they are spawned, your Subgraph will create a dynamic data source by supplying the contract address. Check out the "Instantiating a data source template" section on: [Data Source Templates](/developing/creating-a-subgraph/#data-source-templates). -### 11. Is it possible to set up a subgraph using `graph init` from `graph-cli` with two contracts? Or should I manually add another dataSource in `subgraph.yaml` after running `graph init`? +### 11. Is it possible to set up a Subgraph using `graph init` from `graph-cli` with two contracts? Or should I manually add another dataSource in `subgraph.yaml` after running `graph init`? Yes. On `graph init` command itself you can add multiple dataSources by entering contracts one after the other. @@ -79,9 +79,9 @@ docker pull graphprotocol/graph-node:latest If only one entity is created during the event and if there's nothing better available, then the transaction hash + log index would be unique. You can obfuscate these by converting that to Bytes and then piping it through `crypto.keccak256` but this won't make it more unique. -### 15. Can I delete my subgraph? +### 15. Can I delete my Subgraph? -Yes, you can [delete](/subgraphs/developing/managing/deleting-a-subgraph/) and [transfer](/subgraphs/developing/managing/transferring-a-subgraph/) your subgraph. +Yes, you can [delete](/subgraphs/developing/managing/deleting-a-subgraph/) and [transfer](/subgraphs/developing/managing/transferring-a-subgraph/) your Subgraph. ## Network Related @@ -94,7 +94,7 @@ You can find the list of the supported networks [here](/supported-networks/). Yes. You can do this by importing `graph-ts` as per the example below: ```javascript -import { dataSource } from '@graphprotocol/graph-ts' +import { dataSource } from "@graphprotocol/graph-ts" dataSource.network() dataSource.address() @@ -110,11 +110,11 @@ Yes. Sepolia supports block handlers, call handlers and event handlers. It shoul Yes. `dataSources.source.startBlock` in the `subgraph.yaml` file specifies the number of the block that the dataSource starts indexing from. In most cases, we suggest using the block where the contract was created: [Start blocks](/developing/creating-a-subgraph/#start-blocks) -### 20. What are some tips to increase the performance of indexing? My subgraph is taking a very long time to sync +### 20. What are some tips to increase the performance of indexing? My Subgraph is taking a very long time to sync Yes, you should take a look at the optional start block feature to start indexing from the block where the contract was deployed: [Start blocks](/developing/creating-a-subgraph/#start-blocks) -### 21. Is there a way to query the subgraph directly to determine the latest block number it has indexed? +### 21. Is there a way to query the Subgraph directly to determine the latest block number it has indexed? Ja! Prova följande kommando och ersätt "organization/subgraphName" med organisationen under vilken den är publicerad och namnet på din subgraf: @@ -132,7 +132,7 @@ someCollection(first: 1000, skip: ) { ... } ### 23. If my dapp frontend uses The Graph for querying, do I need to write my API key into the frontend directly? What if we pay query fees for users – will malicious users cause our query fees to be very high? -Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. +Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and Subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. ## Miscellaneous From 04ad5c7edc300fdd61df606c8cd02419dcff2836 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:23:48 -0500 Subject: [PATCH 0882/1789] New translations developer-faq.mdx (Turkish) --- .../tr/subgraphs/developing/developer-faq.mdx | 46 +++++++++---------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/website/src/pages/tr/subgraphs/developing/developer-faq.mdx b/website/src/pages/tr/subgraphs/developing/developer-faq.mdx index d464a0058dfb..ecb2f24b39e2 100644 --- a/website/src/pages/tr/subgraphs/developing/developer-faq.mdx +++ b/website/src/pages/tr/subgraphs/developing/developer-faq.mdx @@ -7,37 +7,37 @@ Bu sayfa, The Graph üzerinde geliştirme yapan geliştiricilerin sunduğu en ya ## Subgraph ile İlgili Sorular -### 1. Subgraph nedir? +### 1. What is a Subgraph? -Bir subgraph, blokzinciri verilerine dayalı olarak oluşturulmuş özel yapım bir API’dir. Subgraph'ler, GraphQL sorgu dili kullanılarak sorgulanır ve The Graph CLI kullanılarak bir Graph Düğümü'nde yayına alınır. Dağıtılıp The Graph’in merkeziyetsiz ağına yayımlandığında, Endeksleyiciler subgraph'leri işler ve sorgu yapmaları için kullanıcıların erişimine sunar. +A Subgraph is a custom API built on blockchain data. Subgraphs are queried using the GraphQL query language and are deployed to a Graph Node using The Graph CLI. Once deployed and published to The Graph's decentralized network, Indexers process Subgraphs and make them available for Subgraph consumers to query. -### 2. Subgraph oluşturmanın ilk adımı nedir? +### 2. What is the first step to create a Subgraph? -To successfully create a subgraph, you will need to install The Graph CLI. Review the [Quick Start](/subgraphs/quick-start/) to get started. For detailed information, see [Creating a Subgraph](/developing/creating-a-subgraph/). +To successfully create a Subgraph, you will need to install The Graph CLI. Review the [Quick Start](/subgraphs/quick-start/) to get started. For detailed information, see [Creating a Subgraph](/developing/creating-a-subgraph/). -### 3. Akıllı sözleşmelerim olay içermiyorsa yine de subgraph oluşturabilir miyim? +### 3. Can I still create a Subgraph if my smart contracts don't have events? -Akıllı sözleşmelerinizi, sorgulamak istediğiniz verilerle ilişkili olaylara sahip olacak şekilde yapılandırmanız şiddetle önerilir. Subgraph içindeki olay işleyicileri sözleşme olayları tarafından tetiklenir ve kullanışlı verilere erişmenin en hızlı yolu bu işleyicileri kullanmaktır. +It is highly recommended that you structure your smart contracts to have events associated with data you are interested in querying. Event handlers in the Subgraph are triggered by contract events and are the fastest way to retrieve useful data. -Eğer çalıştığınız sözleşmeler olay içermiyorsa, subgraph’inizin endekslenmesini çağrı ve blok işleyicileri kullanarak tetikleyebilirsiniz. Ancak bu tavsiye edilmeyen bir yöntemdir ve performansı önemli ölçüde yavaşlatacaktır. +If the contracts you work with do not contain events, your Subgraph can use call and block handlers to trigger indexing. However, this is not recommended, as performance will be significantly slower. -### 4. Subgraph'ımla ilişkili GitHub hesabını değiştirebilir miyim? +### 4. Can I change the GitHub account associated with my Subgraph? -Hayır. Bir subgraph oluşturulduktan sonra, ilişkili GitHub hesabı değiştirilemez. Bu nedenle, subgraph oluşturmadan önce bunu dikkatlice düşünmelisiniz. +No. Once a Subgraph is created, the associated GitHub account cannot be changed. Please make sure to carefully consider this before creating your Subgraph. -### 5. Mainnet'teki bir subgraph nasıl güncellenir? +### 5. How do I update a Subgraph on mainnet? -CLI'yi kullanarak Subgraph Studio’ya yeni bir subgraph sürümü dağıtabilirsiniz. Bu işlem subgraph’inizi gizli olarak tutar, ancak memnun kaldığınızda Graph Gezgini’nde yayımlayabilirsiniz. Bu, Küratörlerin sinyal vermeye başlayabileceği yeni bir subgraph sürümü oluşturur. +You can deploy a new version of your Subgraph to Subgraph Studio using the CLI. This action maintains your Subgraph private, but once you’re happy with it, you can publish to Graph Explorer. This will create a new version of your Subgraph that Curators can start signaling on. -### 6. Bir subgraph’i yeniden dağıtmadan başka bir hesaba veya uç noktaya kopyalayabilir miyim? +### 6. Is it possible to duplicate a Subgraph to another account or endpoint without redeploying? -Subgraph’i yeniden dağıtmanız gerekir ancak subgraph ID'si (IPFS hash’i) değişmezse, senkronizasyona baştan başlamanıza gerek kalmaz. +You have to redeploy the Subgraph, but if the Subgraph ID (IPFS hash) doesn't change, it won't have to sync from the beginning. -### 7. Subgraph eşlemelerinden sözleşme fonksiyonunu nasıl çağırabilir veya bir genel durum değişkenine nasıl erişebilirim? +### 7. How do I call a contract function or access a public state variable from my Subgraph mappings? Take a look at `Access to smart contract` state inside the section [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/#access-to-smart-contract-state). -### 8. Can I import `ethers.js` or other JS libraries into my subgraph mappings? +### 8. Can I import `ethers.js` or other JS libraries into my Subgraph mappings? Eşleyiciler AssemblyScript ile yazıldığından dolayı şu anda mümkün değil. @@ -45,15 +45,15 @@ Bunun alternatif bir çözümü, verileri varlıklarda ham halde depolayıp, JS ### 9. Birden fazla sözleşmeyi dinlerken, olayları dinlenecek sözleşmelerin sırasını seçmek mümkün müdür? -Bir subgraph içindeki olaylar, birden fazla sözleşme üzerinde olup olmamaya bakmaksızın her zaman bloklarda göründükleri sırayla işlenir. +Within a Subgraph, the events are always processed in the order they appear in the blocks, regardless of whether that is across multiple contracts or not. ### 10. Şablonlar veri kaynaklarından ne açıdan farklıdır? -Şablonlar, subgraph’iniz endeksleme yaparken veri kaynaklarını hızlıca oluşturmanızı sağlar. Sözleşmeniz, kullanıcılar etkileşime girdikçe yeni sözleşmeler yaratabilir. Bu sözleşmelerin yapısını (ABI, olaylar vb.) önceden bildiğinizden, onları nasıl endekslemek istediğinizi bir şablonda tanımlayabilirsiniz. Yeni sözleşmeler oluşturulduğunda, subgraph’iniz sözleşme adresini tespit ederek dinamik bir veri kaynağı oluşturacaktır. +Templates allow you to create data sources quickly, while your Subgraph is indexing. Your contract might spawn new contracts as people interact with it. Since you know the shape of those contracts (ABI, events, etc.) upfront, you can define how you want to index them in a template. When they are spawned, your Subgraph will create a dynamic data source by supplying the contract address. Check out the "Instantiating a data source template" section on: [Data Source Templates](/developing/creating-a-subgraph/#data-source-templates). -### 11. Is it possible to set up a subgraph using `graph init` from `graph-cli` with two contracts? Or should I manually add another dataSource in `subgraph.yaml` after running `graph init`? +### 11. Is it possible to set up a Subgraph using `graph init` from `graph-cli` with two contracts? Or should I manually add another dataSource in `subgraph.yaml` after running `graph init`? Yes. On `graph init` command itself you can add multiple dataSources by entering contracts one after the other. @@ -79,9 +79,9 @@ docker pull graphprotocol/graph-node:latest If only one entity is created during the event and if there's nothing better available, then the transaction hash + log index would be unique. You can obfuscate these by converting that to Bytes and then piping it through `crypto.keccak256` but this won't make it more unique. -### 15. Subgraph'imi silebilir miyim? +### 15. Can I delete my Subgraph? -Yes, you can [delete](/subgraphs/developing/managing/deleting-a-subgraph/) and [transfer](/subgraphs/developing/managing/transferring-a-subgraph/) your subgraph. +Yes, you can [delete](/subgraphs/developing/managing/deleting-a-subgraph/) and [transfer](/subgraphs/developing/managing/transferring-a-subgraph/) your Subgraph. ## Ağ ile İlgili Sorular @@ -110,11 +110,11 @@ Evet. Sepolia, blok işleyicileri, çağrı işleyicileri ve olay işleyicilerin Yes. `dataSources.source.startBlock` in the `subgraph.yaml` file specifies the number of the block that the dataSource starts indexing from. In most cases, we suggest using the block where the contract was created: [Start blocks](/developing/creating-a-subgraph/#start-blocks) -### 20. Endeksleme performansını artırmak için ipuçları var mı? Subgraph'imin senkronize edilmesi çok uzun zaman alıyor +### 20. What are some tips to increase the performance of indexing? My Subgraph is taking a very long time to sync Yes, you should take a look at the optional start block feature to start indexing from the block where the contract was deployed: [Start blocks](/developing/creating-a-subgraph/#start-blocks) -### 21. Subgraph üzerinde doğrudan sorgulama yaparak endekslenmiş en son blok numarasını öğrenmenin bir yolu var mı? +### 21. Is there a way to query the Subgraph directly to determine the latest block number it has indexed? Var! Aşağıdaki komutu, "organization/subgraphName" kısmına subgraph'inizi yayımladığınız organizasyon adını ve subgraph'inizin adını koyarak deneyin: @@ -132,7 +132,7 @@ someCollection(first: 1000, skip: ) { ... } ### 23. If my dapp frontend uses The Graph for querying, do I need to write my API key into the frontend directly? What if we pay query fees for users – will malicious users cause our query fees to be very high? -Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. +Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and Subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. ## Diğer From 28044d345581613556181ad648a542bb28fe45a0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:23:49 -0500 Subject: [PATCH 0883/1789] New translations developer-faq.mdx (Ukrainian) --- .../uk/subgraphs/developing/developer-faq.mdx | 46 +++++++++---------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/website/src/pages/uk/subgraphs/developing/developer-faq.mdx b/website/src/pages/uk/subgraphs/developing/developer-faq.mdx index 8dbe6d23ad39..e45141294523 100644 --- a/website/src/pages/uk/subgraphs/developing/developer-faq.mdx +++ b/website/src/pages/uk/subgraphs/developing/developer-faq.mdx @@ -7,37 +7,37 @@ This page summarizes some of the most common questions for developers building o ## Subgraph Related -### 1. What is a subgraph? +### 1. What is a Subgraph? -A subgraph is a custom API built on blockchain data. Subgraphs are queried using the GraphQL query language and are deployed to a Graph Node using The Graph CLI. Once deployed and published to The Graph's decentralized network, Indexers process subgraphs and make them available for subgraph consumers to query. +A Subgraph is a custom API built on blockchain data. Subgraphs are queried using the GraphQL query language and are deployed to a Graph Node using The Graph CLI. Once deployed and published to The Graph's decentralized network, Indexers process Subgraphs and make them available for Subgraph consumers to query. -### 2. What is the first step to create a subgraph? +### 2. What is the first step to create a Subgraph? -To successfully create a subgraph, you will need to install The Graph CLI. Review the [Quick Start](/subgraphs/quick-start/) to get started. For detailed information, see [Creating a Subgraph](/developing/creating-a-subgraph/). +To successfully create a Subgraph, you will need to install The Graph CLI. Review the [Quick Start](/subgraphs/quick-start/) to get started. For detailed information, see [Creating a Subgraph](/developing/creating-a-subgraph/). -### 3. Can I still create a subgraph if my smart contracts don't have events? +### 3. Can I still create a Subgraph if my smart contracts don't have events? -It is highly recommended that you structure your smart contracts to have events associated with data you are interested in querying. Event handlers in the subgraph are triggered by contract events and are the fastest way to retrieve useful data. +It is highly recommended that you structure your smart contracts to have events associated with data you are interested in querying. Event handlers in the Subgraph are triggered by contract events and are the fastest way to retrieve useful data. -If the contracts you work with do not contain events, your subgraph can use call and block handlers to trigger indexing. However, this is not recommended, as performance will be significantly slower. +If the contracts you work with do not contain events, your Subgraph can use call and block handlers to trigger indexing. However, this is not recommended, as performance will be significantly slower. -### 4. Can I change the GitHub account associated with my subgraph? +### 4. Can I change the GitHub account associated with my Subgraph? -No. Once a subgraph is created, the associated GitHub account cannot be changed. Please make sure to carefully consider this before creating your subgraph. +No. Once a Subgraph is created, the associated GitHub account cannot be changed. Please make sure to carefully consider this before creating your Subgraph. -### 5. How do I update a subgraph on mainnet? +### 5. How do I update a Subgraph on mainnet? -You can deploy a new version of your subgraph to Subgraph Studio using the CLI. This action maintains your subgraph private, but once you’re happy with it, you can publish to Graph Explorer. This will create a new version of your subgraph that Curators can start signaling on. +You can deploy a new version of your Subgraph to Subgraph Studio using the CLI. This action maintains your Subgraph private, but once you’re happy with it, you can publish to Graph Explorer. This will create a new version of your Subgraph that Curators can start signaling on. -### 6. Is it possible to duplicate a subgraph to another account or endpoint without redeploying? +### 6. Is it possible to duplicate a Subgraph to another account or endpoint without redeploying? -You have to redeploy the subgraph, but if the subgraph ID (IPFS hash) doesn't change, it won't have to sync from the beginning. +You have to redeploy the Subgraph, but if the Subgraph ID (IPFS hash) doesn't change, it won't have to sync from the beginning. -### 7. How do I call a contract function or access a public state variable from my subgraph mappings? +### 7. How do I call a contract function or access a public state variable from my Subgraph mappings? Take a look at `Access to smart contract` state inside the section [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/#access-to-smart-contract-state). -### 8. Can I import `ethers.js` or other JS libraries into my subgraph mappings? +### 8. Can I import `ethers.js` or other JS libraries into my Subgraph mappings? Not currently, as mappings are written in AssemblyScript. @@ -45,15 +45,15 @@ One possible alternative solution to this is to store raw data in entities and p ### 9. When listening to multiple contracts, is it possible to select the contract order to listen to events? -Within a subgraph, the events are always processed in the order they appear in the blocks, regardless of whether that is across multiple contracts or not. +Within a Subgraph, the events are always processed in the order they appear in the blocks, regardless of whether that is across multiple contracts or not. ### 10. How are templates different from data sources? -Templates allow you to create data sources quickly, while your subgraph is indexing. Your contract might spawn new contracts as people interact with it. Since you know the shape of those contracts (ABI, events, etc.) upfront, you can define how you want to index them in a template. When they are spawned, your subgraph will create a dynamic data source by supplying the contract address. +Templates allow you to create data sources quickly, while your Subgraph is indexing. Your contract might spawn new contracts as people interact with it. Since you know the shape of those contracts (ABI, events, etc.) upfront, you can define how you want to index them in a template. When they are spawned, your Subgraph will create a dynamic data source by supplying the contract address. Check out the "Instantiating a data source template" section on: [Data Source Templates](/developing/creating-a-subgraph/#data-source-templates). -### 11. Is it possible to set up a subgraph using `graph init` from `graph-cli` with two contracts? Or should I manually add another dataSource in `subgraph.yaml` after running `graph init`? +### 11. Is it possible to set up a Subgraph using `graph init` from `graph-cli` with two contracts? Or should I manually add another dataSource in `subgraph.yaml` after running `graph init`? Yes. On `graph init` command itself you can add multiple dataSources by entering contracts one after the other. @@ -79,9 +79,9 @@ docker pull graphprotocol/graph-node:latest If only one entity is created during the event and if there's nothing better available, then the transaction hash + log index would be unique. You can obfuscate these by converting that to Bytes and then piping it through `crypto.keccak256` but this won't make it more unique. -### 15. Can I delete my subgraph? +### 15. Can I delete my Subgraph? -Yes, you can [delete](/subgraphs/developing/managing/deleting-a-subgraph/) and [transfer](/subgraphs/developing/managing/transferring-a-subgraph/) your subgraph. +Yes, you can [delete](/subgraphs/developing/managing/deleting-a-subgraph/) and [transfer](/subgraphs/developing/managing/transferring-a-subgraph/) your Subgraph. ## Network Related @@ -110,11 +110,11 @@ Yes. Sepolia supports block handlers, call handlers and event handlers. It shoul Yes. `dataSources.source.startBlock` in the `subgraph.yaml` file specifies the number of the block that the dataSource starts indexing from. In most cases, we suggest using the block where the contract was created: [Start blocks](/developing/creating-a-subgraph/#start-blocks) -### 20. What are some tips to increase the performance of indexing? My subgraph is taking a very long time to sync +### 20. What are some tips to increase the performance of indexing? My Subgraph is taking a very long time to sync Yes, you should take a look at the optional start block feature to start indexing from the block where the contract was deployed: [Start blocks](/developing/creating-a-subgraph/#start-blocks) -### 21. Is there a way to query the subgraph directly to determine the latest block number it has indexed? +### 21. Is there a way to query the Subgraph directly to determine the latest block number it has indexed? Yes! Try the following command, substituting "organization/subgraphName" with the organization under it is published and the name of your subgraph: @@ -132,7 +132,7 @@ someCollection(first: 1000, skip: ) { ... } ### 23. If my dapp frontend uses The Graph for querying, do I need to write my API key into the frontend directly? What if we pay query fees for users – will malicious users cause our query fees to be very high? -Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. +Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and Subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. ## Miscellaneous From 4ceb80eccd73f5e8dad829c74ebf3356cab0fc0a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:23:51 -0500 Subject: [PATCH 0884/1789] New translations developer-faq.mdx (Chinese Simplified) --- .../zh/subgraphs/developing/developer-faq.mdx | 46 +++++++++---------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/website/src/pages/zh/subgraphs/developing/developer-faq.mdx b/website/src/pages/zh/subgraphs/developing/developer-faq.mdx index dab117b8f2b5..919b0142a953 100644 --- a/website/src/pages/zh/subgraphs/developing/developer-faq.mdx +++ b/website/src/pages/zh/subgraphs/developing/developer-faq.mdx @@ -7,37 +7,37 @@ This page summarizes some of the most common questions for developers building o ## Subgraph Related -### 什么是子图? +### 1. What is a Subgraph? -A subgraph is a custom API built on blockchain data. Subgraphs are queried using the GraphQL query language and are deployed to a Graph Node using The Graph CLI. Once deployed and published to The Graph's decentralized network, Indexers process subgraphs and make them available for subgraph consumers to query. +A Subgraph is a custom API built on blockchain data. Subgraphs are queried using the GraphQL query language and are deployed to a Graph Node using The Graph CLI. Once deployed and published to The Graph's decentralized network, Indexers process Subgraphs and make them available for Subgraph consumers to query. -### 2. What is the first step to create a subgraph? +### 2. What is the first step to create a Subgraph? -To successfully create a subgraph, you will need to install The Graph CLI. Review the [Quick Start](/subgraphs/quick-start/) to get started. For detailed information, see [Creating a Subgraph](/developing/creating-a-subgraph/). +To successfully create a Subgraph, you will need to install The Graph CLI. Review the [Quick Start](/subgraphs/quick-start/) to get started. For detailed information, see [Creating a Subgraph](/developing/creating-a-subgraph/). -### 3. Can I still create a subgraph if my smart contracts don't have events? +### 3. Can I still create a Subgraph if my smart contracts don't have events? -It is highly recommended that you structure your smart contracts to have events associated with data you are interested in querying. Event handlers in the subgraph are triggered by contract events and are the fastest way to retrieve useful data. +It is highly recommended that you structure your smart contracts to have events associated with data you are interested in querying. Event handlers in the Subgraph are triggered by contract events and are the fastest way to retrieve useful data. -If the contracts you work with do not contain events, your subgraph can use call and block handlers to trigger indexing. However, this is not recommended, as performance will be significantly slower. +If the contracts you work with do not contain events, your Subgraph can use call and block handlers to trigger indexing. However, this is not recommended, as performance will be significantly slower. -### 4. 我可以更改与我的子图关联的 GitHub 账户吗? +### 4. Can I change the GitHub account associated with my Subgraph? -No. Once a subgraph is created, the associated GitHub account cannot be changed. Please make sure to carefully consider this before creating your subgraph. +No. Once a Subgraph is created, the associated GitHub account cannot be changed. Please make sure to carefully consider this before creating your Subgraph. -### 5. How do I update a subgraph on mainnet? +### 5. How do I update a Subgraph on mainnet? -You can deploy a new version of your subgraph to Subgraph Studio using the CLI. This action maintains your subgraph private, but once you’re happy with it, you can publish to Graph Explorer. This will create a new version of your subgraph that Curators can start signaling on. +You can deploy a new version of your Subgraph to Subgraph Studio using the CLI. This action maintains your Subgraph private, but once you’re happy with it, you can publish to Graph Explorer. This will create a new version of your Subgraph that Curators can start signaling on. -### 6. Is it possible to duplicate a subgraph to another account or endpoint without redeploying? +### 6. Is it possible to duplicate a Subgraph to another account or endpoint without redeploying? -您必须重新部署子图,但如果子图 ID(IPFS hash)没有更改,则不必从头开始同步。 +You have to redeploy the Subgraph, but if the Subgraph ID (IPFS hash) doesn't change, it won't have to sync from the beginning. -### 7. How do I call a contract function or access a public state variable from my subgraph mappings? +### 7. How do I call a contract function or access a public state variable from my Subgraph mappings? Take a look at `Access to smart contract` state inside the section [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/#access-to-smart-contract-state). -### 8. Can I import `ethers.js` or other JS libraries into my subgraph mappings? +### 8. Can I import `ethers.js` or other JS libraries into my Subgraph mappings? Not currently, as mappings are written in AssemblyScript. @@ -45,15 +45,15 @@ One possible alternative solution to this is to store raw data in entities and p ### 9. When listening to multiple contracts, is it possible to select the contract order to listen to events? -在子图中,无论是否跨多个合约,事件始终按照它们在区块中出现的顺序进行处理的。 +Within a Subgraph, the events are always processed in the order they appear in the blocks, regardless of whether that is across multiple contracts or not. ### 10. How are templates different from data sources? -Templates allow you to create data sources quickly, while your subgraph is indexing. Your contract might spawn new contracts as people interact with it. Since you know the shape of those contracts (ABI, events, etc.) upfront, you can define how you want to index them in a template. When they are spawned, your subgraph will create a dynamic data source by supplying the contract address. +Templates allow you to create data sources quickly, while your Subgraph is indexing. Your contract might spawn new contracts as people interact with it. Since you know the shape of those contracts (ABI, events, etc.) upfront, you can define how you want to index them in a template. When they are spawned, your Subgraph will create a dynamic data source by supplying the contract address. Check out the "Instantiating a data source template" section on: [Data Source Templates](/developing/creating-a-subgraph/#data-source-templates). -### 11. Is it possible to set up a subgraph using `graph init` from `graph-cli` with two contracts? Or should I manually add another dataSource in `subgraph.yaml` after running `graph init`? +### 11. Is it possible to set up a Subgraph using `graph init` from `graph-cli` with two contracts? Or should I manually add another dataSource in `subgraph.yaml` after running `graph init`? Yes. On `graph init` command itself you can add multiple dataSources by entering contracts one after the other. @@ -79,9 +79,9 @@ docker pull graphprotocol/graph-node:latest If only one entity is created during the event and if there's nothing better available, then the transaction hash + log index would be unique. You can obfuscate these by converting that to Bytes and then piping it through `crypto.keccak256` but this won't make it more unique. -### 15. Can I delete my subgraph? +### 15. Can I delete my Subgraph? -Yes, you can [delete](/subgraphs/developing/managing/deleting-a-subgraph/) and [transfer](/subgraphs/developing/managing/transferring-a-subgraph/) your subgraph. +Yes, you can [delete](/subgraphs/developing/managing/deleting-a-subgraph/) and [transfer](/subgraphs/developing/managing/transferring-a-subgraph/) your Subgraph. ## Network Related @@ -110,11 +110,11 @@ Yes. Sepolia supports block handlers, call handlers and event handlers. It shoul Yes. `dataSources.source.startBlock` in the `subgraph.yaml` file specifies the number of the block that the dataSource starts indexing from. In most cases, we suggest using the block where the contract was created: [Start blocks](/developing/creating-a-subgraph/#start-blocks) -### 20. What are some tips to increase the performance of indexing? My subgraph is taking a very long time to sync +### 20. What are some tips to increase the performance of indexing? My Subgraph is taking a very long time to sync Yes, you should take a look at the optional start block feature to start indexing from the block where the contract was deployed: [Start blocks](/developing/creating-a-subgraph/#start-blocks) -### 21. Is there a way to query the subgraph directly to determine the latest block number it has indexed? +### 21. Is there a way to query the Subgraph directly to determine the latest block number it has indexed? 是的! 请尝试以下命令,并将“organization/subgraphName”替换为发布的组织和子图名称: @@ -132,7 +132,7 @@ someCollection(first: 1000, skip: ) { ... } ### 23. If my dapp frontend uses The Graph for querying, do I need to write my API key into the frontend directly? What if we pay query fees for users – will malicious users cause our query fees to be very high? -Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. +Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and Subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. ## Miscellaneous From 8dcd5dc94571e6673a6d01fc40e92b166358a8eb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:23:52 -0500 Subject: [PATCH 0885/1789] New translations developer-faq.mdx (Urdu (Pakistan)) --- .../ur/subgraphs/developing/developer-faq.mdx | 46 +++++++++---------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/website/src/pages/ur/subgraphs/developing/developer-faq.mdx b/website/src/pages/ur/subgraphs/developing/developer-faq.mdx index ca250f41cc35..955c825816d1 100644 --- a/website/src/pages/ur/subgraphs/developing/developer-faq.mdx +++ b/website/src/pages/ur/subgraphs/developing/developer-faq.mdx @@ -7,37 +7,37 @@ This page summarizes some of the most common questions for developers building o ## Subgraph Related -### سب گراف کیا ہے؟ +### 1. What is a Subgraph? -A subgraph is a custom API built on blockchain data. Subgraphs are queried using the GraphQL query language and are deployed to a Graph Node using The Graph CLI. Once deployed and published to The Graph's decentralized network, Indexers process subgraphs and make them available for subgraph consumers to query. +A Subgraph is a custom API built on blockchain data. Subgraphs are queried using the GraphQL query language and are deployed to a Graph Node using The Graph CLI. Once deployed and published to The Graph's decentralized network, Indexers process Subgraphs and make them available for Subgraph consumers to query. -### 2. What is the first step to create a subgraph? +### 2. What is the first step to create a Subgraph? -To successfully create a subgraph, you will need to install The Graph CLI. Review the [Quick Start](/subgraphs/quick-start/) to get started. For detailed information, see [Creating a Subgraph](/developing/creating-a-subgraph/). +To successfully create a Subgraph, you will need to install The Graph CLI. Review the [Quick Start](/subgraphs/quick-start/) to get started. For detailed information, see [Creating a Subgraph](/developing/creating-a-subgraph/). -### 3. Can I still create a subgraph if my smart contracts don't have events? +### 3. Can I still create a Subgraph if my smart contracts don't have events? -It is highly recommended that you structure your smart contracts to have events associated with data you are interested in querying. Event handlers in the subgraph are triggered by contract events and are the fastest way to retrieve useful data. +It is highly recommended that you structure your smart contracts to have events associated with data you are interested in querying. Event handlers in the Subgraph are triggered by contract events and are the fastest way to retrieve useful data. -If the contracts you work with do not contain events, your subgraph can use call and block handlers to trigger indexing. However, this is not recommended, as performance will be significantly slower. +If the contracts you work with do not contain events, your Subgraph can use call and block handlers to trigger indexing. However, this is not recommended, as performance will be significantly slower. -### 4. کیا میں گٹ ہب کا اکاونٹ بدل سکتا ہوں جو میرے سب گراف کے ساتھ وابستہ ہے؟ +### 4. Can I change the GitHub account associated with my Subgraph? -No. Once a subgraph is created, the associated GitHub account cannot be changed. Please make sure to carefully consider this before creating your subgraph. +No. Once a Subgraph is created, the associated GitHub account cannot be changed. Please make sure to carefully consider this before creating your Subgraph. -### 5. How do I update a subgraph on mainnet? +### 5. How do I update a Subgraph on mainnet? -You can deploy a new version of your subgraph to Subgraph Studio using the CLI. This action maintains your subgraph private, but once you’re happy with it, you can publish to Graph Explorer. This will create a new version of your subgraph that Curators can start signaling on. +You can deploy a new version of your Subgraph to Subgraph Studio using the CLI. This action maintains your Subgraph private, but once you’re happy with it, you can publish to Graph Explorer. This will create a new version of your Subgraph that Curators can start signaling on. -### 6. Is it possible to duplicate a subgraph to another account or endpoint without redeploying? +### 6. Is it possible to duplicate a Subgraph to another account or endpoint without redeploying? -آپ کو سب گراف کو دوبارہ تعینات کرنا ہوگا، لیکن اگر سب گراف ID (IPFS ہیش) تبدیل نہیں ہوتا ہے، تو اسے شروع سے مطابقت پذیر نہیں ہونا پڑے گا. +You have to redeploy the Subgraph, but if the Subgraph ID (IPFS hash) doesn't change, it won't have to sync from the beginning. -### 7. How do I call a contract function or access a public state variable from my subgraph mappings? +### 7. How do I call a contract function or access a public state variable from my Subgraph mappings? Take a look at `Access to smart contract` state inside the section [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/#access-to-smart-contract-state). -### 8. Can I import `ethers.js` or other JS libraries into my subgraph mappings? +### 8. Can I import `ethers.js` or other JS libraries into my Subgraph mappings? Not currently, as mappings are written in AssemblyScript. @@ -45,15 +45,15 @@ One possible alternative solution to this is to store raw data in entities and p ### 9. When listening to multiple contracts, is it possible to select the contract order to listen to events? -سب گراف کے اندر، ایونٹس کو ہمیشہ اسی ترتیب سے پروسیس کیا جاتا ہے جس ترتیب سے وہ بلاکس میں ظاہر ہوتے ہیں، قطع نظر اس کے کہ یہ متعدد کنٹریکٹس میں ہے یا نہیں. +Within a Subgraph, the events are always processed in the order they appear in the blocks, regardless of whether that is across multiple contracts or not. ### 10. How are templates different from data sources? -Templates allow you to create data sources quickly, while your subgraph is indexing. Your contract might spawn new contracts as people interact with it. Since you know the shape of those contracts (ABI, events, etc.) upfront, you can define how you want to index them in a template. When they are spawned, your subgraph will create a dynamic data source by supplying the contract address. +Templates allow you to create data sources quickly, while your Subgraph is indexing. Your contract might spawn new contracts as people interact with it. Since you know the shape of those contracts (ABI, events, etc.) upfront, you can define how you want to index them in a template. When they are spawned, your Subgraph will create a dynamic data source by supplying the contract address. Check out the "Instantiating a data source template" section on: [Data Source Templates](/developing/creating-a-subgraph/#data-source-templates). -### 11. Is it possible to set up a subgraph using `graph init` from `graph-cli` with two contracts? Or should I manually add another dataSource in `subgraph.yaml` after running `graph init`? +### 11. Is it possible to set up a Subgraph using `graph init` from `graph-cli` with two contracts? Or should I manually add another dataSource in `subgraph.yaml` after running `graph init`? Yes. On `graph init` command itself you can add multiple dataSources by entering contracts one after the other. @@ -79,9 +79,9 @@ docker pull graphprotocol/graph-node:latest If only one entity is created during the event and if there's nothing better available, then the transaction hash + log index would be unique. You can obfuscate these by converting that to Bytes and then piping it through `crypto.keccak256` but this won't make it more unique. -### 15. Can I delete my subgraph? +### 15. Can I delete my Subgraph? -Yes, you can [delete](/subgraphs/developing/managing/deleting-a-subgraph/) and [transfer](/subgraphs/developing/managing/transferring-a-subgraph/) your subgraph. +Yes, you can [delete](/subgraphs/developing/managing/deleting-a-subgraph/) and [transfer](/subgraphs/developing/managing/transferring-a-subgraph/) your Subgraph. ## Network Related @@ -110,11 +110,11 @@ Yes. Sepolia supports block handlers, call handlers and event handlers. It shoul Yes. `dataSources.source.startBlock` in the `subgraph.yaml` file specifies the number of the block that the dataSource starts indexing from. In most cases, we suggest using the block where the contract was created: [Start blocks](/developing/creating-a-subgraph/#start-blocks) -### 20. What are some tips to increase the performance of indexing? My subgraph is taking a very long time to sync +### 20. What are some tips to increase the performance of indexing? My Subgraph is taking a very long time to sync Yes, you should take a look at the optional start block feature to start indexing from the block where the contract was deployed: [Start blocks](/developing/creating-a-subgraph/#start-blocks) -### 21. Is there a way to query the subgraph directly to determine the latest block number it has indexed? +### 21. Is there a way to query the Subgraph directly to determine the latest block number it has indexed? جی ہاں! مندرجہ ذیل کمانڈ کو آزمائیں، "تنظیم/سب گراف نام" کو اس کے تحت شائع ہونے والی تنظیم کے ساتھ تبدیل کرتے ہوئے اور آپ کے سب گراف کا نام: @@ -132,7 +132,7 @@ someCollection(first: 1000, skip: ) { ... } ### 23. If my dapp frontend uses The Graph for querying, do I need to write my API key into the frontend directly? What if we pay query fees for users – will malicious users cause our query fees to be very high? -Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. +Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and Subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. ## Miscellaneous From 3c0a10e822bb4b1f0b3f2ebf4e0bc911a98a1f21 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:23:53 -0500 Subject: [PATCH 0886/1789] New translations developer-faq.mdx (Vietnamese) --- .../vi/subgraphs/developing/developer-faq.mdx | 46 +++++++++---------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/website/src/pages/vi/subgraphs/developing/developer-faq.mdx b/website/src/pages/vi/subgraphs/developing/developer-faq.mdx index 867c704194ab..66fbae4a568e 100644 --- a/website/src/pages/vi/subgraphs/developing/developer-faq.mdx +++ b/website/src/pages/vi/subgraphs/developing/developer-faq.mdx @@ -7,37 +7,37 @@ This page summarizes some of the most common questions for developers building o ## Subgraph Related -### 1. What is a subgraph? +### 1. What is a Subgraph? -A subgraph is a custom API built on blockchain data. Subgraphs are queried using the GraphQL query language and are deployed to a Graph Node using The Graph CLI. Once deployed and published to The Graph's decentralized network, Indexers process subgraphs and make them available for subgraph consumers to query. +A Subgraph is a custom API built on blockchain data. Subgraphs are queried using the GraphQL query language and are deployed to a Graph Node using The Graph CLI. Once deployed and published to The Graph's decentralized network, Indexers process Subgraphs and make them available for Subgraph consumers to query. -### 2. What is the first step to create a subgraph? +### 2. What is the first step to create a Subgraph? -To successfully create a subgraph, you will need to install The Graph CLI. Review the [Quick Start](/subgraphs/quick-start/) to get started. For detailed information, see [Creating a Subgraph](/developing/creating-a-subgraph/). +To successfully create a Subgraph, you will need to install The Graph CLI. Review the [Quick Start](/subgraphs/quick-start/) to get started. For detailed information, see [Creating a Subgraph](/developing/creating-a-subgraph/). -### 3. Can I still create a subgraph if my smart contracts don't have events? +### 3. Can I still create a Subgraph if my smart contracts don't have events? -It is highly recommended that you structure your smart contracts to have events associated with data you are interested in querying. Event handlers in the subgraph are triggered by contract events and are the fastest way to retrieve useful data. +It is highly recommended that you structure your smart contracts to have events associated with data you are interested in querying. Event handlers in the Subgraph are triggered by contract events and are the fastest way to retrieve useful data. -If the contracts you work with do not contain events, your subgraph can use call and block handlers to trigger indexing. However, this is not recommended, as performance will be significantly slower. +If the contracts you work with do not contain events, your Subgraph can use call and block handlers to trigger indexing. However, this is not recommended, as performance will be significantly slower. -### 4. Can I change the GitHub account associated with my subgraph? +### 4. Can I change the GitHub account associated with my Subgraph? -No. Once a subgraph is created, the associated GitHub account cannot be changed. Please make sure to carefully consider this before creating your subgraph. +No. Once a Subgraph is created, the associated GitHub account cannot be changed. Please make sure to carefully consider this before creating your Subgraph. -### 5. How do I update a subgraph on mainnet? +### 5. How do I update a Subgraph on mainnet? -You can deploy a new version of your subgraph to Subgraph Studio using the CLI. This action maintains your subgraph private, but once you’re happy with it, you can publish to Graph Explorer. This will create a new version of your subgraph that Curators can start signaling on. +You can deploy a new version of your Subgraph to Subgraph Studio using the CLI. This action maintains your Subgraph private, but once you’re happy with it, you can publish to Graph Explorer. This will create a new version of your Subgraph that Curators can start signaling on. -### 6. Is it possible to duplicate a subgraph to another account or endpoint without redeploying? +### 6. Is it possible to duplicate a Subgraph to another account or endpoint without redeploying? -Bạn phải triển khai lại subgraph, nhưng nếu ID subgraph (mã băm IPFS) không thay đổi, nó sẽ không phải đồng bộ hóa từ đầu. +You have to redeploy the Subgraph, but if the Subgraph ID (IPFS hash) doesn't change, it won't have to sync from the beginning. -### 7. How do I call a contract function or access a public state variable from my subgraph mappings? +### 7. How do I call a contract function or access a public state variable from my Subgraph mappings? Take a look at `Access to smart contract` state inside the section [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/#access-to-smart-contract-state). -### 8. Can I import `ethers.js` or other JS libraries into my subgraph mappings? +### 8. Can I import `ethers.js` or other JS libraries into my Subgraph mappings? Not currently, as mappings are written in AssemblyScript. @@ -45,15 +45,15 @@ One possible alternative solution to this is to store raw data in entities and p ### 9. When listening to multiple contracts, is it possible to select the contract order to listen to events? -Trong một subgraph, các sự kiện luôn được xử lý theo thứ tự chúng xuất hiện trong các khối, bất kể điều đó có qua nhiều hợp đồng hay không. +Within a Subgraph, the events are always processed in the order they appear in the blocks, regardless of whether that is across multiple contracts or not. ### 10. How are templates different from data sources? -Templates allow you to create data sources quickly, while your subgraph is indexing. Your contract might spawn new contracts as people interact with it. Since you know the shape of those contracts (ABI, events, etc.) upfront, you can define how you want to index them in a template. When they are spawned, your subgraph will create a dynamic data source by supplying the contract address. +Templates allow you to create data sources quickly, while your Subgraph is indexing. Your contract might spawn new contracts as people interact with it. Since you know the shape of those contracts (ABI, events, etc.) upfront, you can define how you want to index them in a template. When they are spawned, your Subgraph will create a dynamic data source by supplying the contract address. Check out the "Instantiating a data source template" section on: [Data Source Templates](/developing/creating-a-subgraph/#data-source-templates). -### 11. Is it possible to set up a subgraph using `graph init` from `graph-cli` with two contracts? Or should I manually add another dataSource in `subgraph.yaml` after running `graph init`? +### 11. Is it possible to set up a Subgraph using `graph init` from `graph-cli` with two contracts? Or should I manually add another dataSource in `subgraph.yaml` after running `graph init`? Yes. On `graph init` command itself you can add multiple dataSources by entering contracts one after the other. @@ -79,9 +79,9 @@ docker pull graphprotocol/graph-node:latest If only one entity is created during the event and if there's nothing better available, then the transaction hash + log index would be unique. You can obfuscate these by converting that to Bytes and then piping it through `crypto.keccak256` but this won't make it more unique. -### 15. Can I delete my subgraph? +### 15. Can I delete my Subgraph? -Yes, you can [delete](/subgraphs/developing/managing/deleting-a-subgraph/) and [transfer](/subgraphs/developing/managing/transferring-a-subgraph/) your subgraph. +Yes, you can [delete](/subgraphs/developing/managing/deleting-a-subgraph/) and [transfer](/subgraphs/developing/managing/transferring-a-subgraph/) your Subgraph. ## Network Related @@ -110,11 +110,11 @@ Yes. Sepolia supports block handlers, call handlers and event handlers. It shoul Yes. `dataSources.source.startBlock` in the `subgraph.yaml` file specifies the number of the block that the dataSource starts indexing from. In most cases, we suggest using the block where the contract was created: [Start blocks](/developing/creating-a-subgraph/#start-blocks) -### 20. What are some tips to increase the performance of indexing? My subgraph is taking a very long time to sync +### 20. What are some tips to increase the performance of indexing? My Subgraph is taking a very long time to sync Yes, you should take a look at the optional start block feature to start indexing from the block where the contract was deployed: [Start blocks](/developing/creating-a-subgraph/#start-blocks) -### 21. Is there a way to query the subgraph directly to determine the latest block number it has indexed? +### 21. Is there a way to query the Subgraph directly to determine the latest block number it has indexed? Có! Hãy thử lệnh sau, thay thế "organization/subgraphName" bằng tổ chức dưới nó được xuất bản và tên của subgraph của bạn: @@ -132,7 +132,7 @@ someCollection(first: 1000, skip: ) { ... } ### 23. If my dapp frontend uses The Graph for querying, do I need to write my API key into the frontend directly? What if we pay query fees for users – will malicious users cause our query fees to be very high? -Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. +Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and Subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. ## Miscellaneous From 75b7c9191ce21f2fccc6fe519ba01db23079a6ee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:23:54 -0500 Subject: [PATCH 0887/1789] New translations developer-faq.mdx (Marathi) --- .../mr/subgraphs/developing/developer-faq.mdx | 46 +++++++++---------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/website/src/pages/mr/subgraphs/developing/developer-faq.mdx b/website/src/pages/mr/subgraphs/developing/developer-faq.mdx index 8578be282aad..4f3e183375b9 100644 --- a/website/src/pages/mr/subgraphs/developing/developer-faq.mdx +++ b/website/src/pages/mr/subgraphs/developing/developer-faq.mdx @@ -7,37 +7,37 @@ This page summarizes some of the most common questions for developers building o ## Subgraph Related -### 1. सबग्राफ म्हणजे काय? +### 1. What is a Subgraph? -A subgraph is a custom API built on blockchain data. Subgraphs are queried using the GraphQL query language and are deployed to a Graph Node using The Graph CLI. Once deployed and published to The Graph's decentralized network, Indexers process subgraphs and make them available for subgraph consumers to query. +A Subgraph is a custom API built on blockchain data. Subgraphs are queried using the GraphQL query language and are deployed to a Graph Node using The Graph CLI. Once deployed and published to The Graph's decentralized network, Indexers process Subgraphs and make them available for Subgraph consumers to query. -### 2. What is the first step to create a subgraph? +### 2. What is the first step to create a Subgraph? -To successfully create a subgraph, you will need to install The Graph CLI. Review the [Quick Start](/subgraphs/quick-start/) to get started. For detailed information, see [Creating a Subgraph](/developing/creating-a-subgraph/). +To successfully create a Subgraph, you will need to install The Graph CLI. Review the [Quick Start](/subgraphs/quick-start/) to get started. For detailed information, see [Creating a Subgraph](/developing/creating-a-subgraph/). -### 3. Can I still create a subgraph if my smart contracts don't have events? +### 3. Can I still create a Subgraph if my smart contracts don't have events? -It is highly recommended that you structure your smart contracts to have events associated with data you are interested in querying. Event handlers in the subgraph are triggered by contract events and are the fastest way to retrieve useful data. +It is highly recommended that you structure your smart contracts to have events associated with data you are interested in querying. Event handlers in the Subgraph are triggered by contract events and are the fastest way to retrieve useful data. -If the contracts you work with do not contain events, your subgraph can use call and block handlers to trigger indexing. However, this is not recommended, as performance will be significantly slower. +If the contracts you work with do not contain events, your Subgraph can use call and block handlers to trigger indexing. However, this is not recommended, as performance will be significantly slower. -### 4. मी माझ्या सबग्राफशी संबंधित गिटहब खाते बदलू शकतो का? +### 4. Can I change the GitHub account associated with my Subgraph? -No. Once a subgraph is created, the associated GitHub account cannot be changed. Please make sure to carefully consider this before creating your subgraph. +No. Once a Subgraph is created, the associated GitHub account cannot be changed. Please make sure to carefully consider this before creating your Subgraph. -### 5. How do I update a subgraph on mainnet? +### 5. How do I update a Subgraph on mainnet? -You can deploy a new version of your subgraph to Subgraph Studio using the CLI. This action maintains your subgraph private, but once you’re happy with it, you can publish to Graph Explorer. This will create a new version of your subgraph that Curators can start signaling on. +You can deploy a new version of your Subgraph to Subgraph Studio using the CLI. This action maintains your Subgraph private, but once you’re happy with it, you can publish to Graph Explorer. This will create a new version of your Subgraph that Curators can start signaling on. -### 6. Is it possible to duplicate a subgraph to another account or endpoint without redeploying? +### 6. Is it possible to duplicate a Subgraph to another account or endpoint without redeploying? -तुम्हाला सबग्राफ पुन्हा तैनात करावा लागेल, परंतु सबग्राफ आयडी (IPFS हॅश) बदलत नसल्यास, त्याला सुरुवातीपासून सिंक करण्याची गरज नाही. +You have to redeploy the Subgraph, but if the Subgraph ID (IPFS hash) doesn't change, it won't have to sync from the beginning. -### 7. How do I call a contract function or access a public state variable from my subgraph mappings? +### 7. How do I call a contract function or access a public state variable from my Subgraph mappings? Take a look at `Access to smart contract` state inside the section [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/#access-to-smart-contract-state). -### 8. Can I import `ethers.js` or other JS libraries into my subgraph mappings? +### 8. Can I import `ethers.js` or other JS libraries into my Subgraph mappings? Not currently, as mappings are written in AssemblyScript. @@ -45,15 +45,15 @@ One possible alternative solution to this is to store raw data in entities and p ### 9. When listening to multiple contracts, is it possible to select the contract order to listen to events? -सबग्राफमध्‍ये, इव्‍हेंट नेहमी ब्लॉकमध्‍ये दिसण्‍याच्‍या क्रमाने संसाधित केले जातात, ते एकाधिक कॉन्ट्रॅक्टमध्‍ये असले किंवा नसले तरीही. +Within a Subgraph, the events are always processed in the order they appear in the blocks, regardless of whether that is across multiple contracts or not. ### 10. How are templates different from data sources? -Templates allow you to create data sources quickly, while your subgraph is indexing. Your contract might spawn new contracts as people interact with it. Since you know the shape of those contracts (ABI, events, etc.) upfront, you can define how you want to index them in a template. When they are spawned, your subgraph will create a dynamic data source by supplying the contract address. +Templates allow you to create data sources quickly, while your Subgraph is indexing. Your contract might spawn new contracts as people interact with it. Since you know the shape of those contracts (ABI, events, etc.) upfront, you can define how you want to index them in a template. When they are spawned, your Subgraph will create a dynamic data source by supplying the contract address. Check out the "Instantiating a data source template" section on: [Data Source Templates](/developing/creating-a-subgraph/#data-source-templates). -### 11. Is it possible to set up a subgraph using `graph init` from `graph-cli` with two contracts? Or should I manually add another dataSource in `subgraph.yaml` after running `graph init`? +### 11. Is it possible to set up a Subgraph using `graph init` from `graph-cli` with two contracts? Or should I manually add another dataSource in `subgraph.yaml` after running `graph init`? Yes. On `graph init` command itself you can add multiple dataSources by entering contracts one after the other. @@ -79,9 +79,9 @@ When new dynamic data source are created, the handlers defined for dynamic data If only one entity is created during the event and if there's nothing better available, then the transaction hash + log index would be unique. You can obfuscate these by converting that to Bytes and then piping it through `crypto.keccak256` but this won't make it more unique. -### 15. Can I delete my subgraph? +### 15. Can I delete my Subgraph? -Yes, you can [delete](/subgraphs/developing/managing/deleting-a-subgraph/) and [transfer](/subgraphs/developing/managing/transferring-a-subgraph/) your subgraph. +Yes, you can [delete](/subgraphs/developing/managing/deleting-a-subgraph/) and [transfer](/subgraphs/developing/managing/transferring-a-subgraph/) your Subgraph. ## Network Related @@ -110,11 +110,11 @@ Yes. Sepolia supports block handlers, call handlers and event handlers. It shoul Yes. `dataSources.source.startBlock` in the `subgraph.yaml` file specifies the number of the block that the dataSource starts indexing from. In most cases, we suggest using the block where the contract was created: [Start blocks](/developing/creating-a-subgraph/#start-blocks) -### 20. What are some tips to increase the performance of indexing? My subgraph is taking a very long time to sync +### 20. What are some tips to increase the performance of indexing? My Subgraph is taking a very long time to sync Yes, you should take a look at the optional start block feature to start indexing from the block where the contract was deployed: [Start blocks](/developing/creating-a-subgraph/#start-blocks) -### 21. Is there a way to query the subgraph directly to determine the latest block number it has indexed? +### 21. Is there a way to query the Subgraph directly to determine the latest block number it has indexed? होय! खालील आदेश वापरून पहा, "संस्था/सबग्राफनेम" च्या जागी त्याखालील संस्था प्रकाशित झाली आहे आणि तुमच्या सबग्राफचे नाव: @@ -132,7 +132,7 @@ someCollection(first: 1000, skip: ) { ... } ### 23. If my dapp frontend uses The Graph for querying, do I need to write my API key into the frontend directly? What if we pay query fees for users – will malicious users cause our query fees to be very high? -Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. +Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and Subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. ## Miscellaneous From de9d5173ad54e8d0b2c01da6da8dd34c2fe64713 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:23:55 -0500 Subject: [PATCH 0888/1789] New translations developer-faq.mdx (Hindi) --- .../hi/subgraphs/developing/developer-faq.mdx | 50 +++++++++---------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/website/src/pages/hi/subgraphs/developing/developer-faq.mdx b/website/src/pages/hi/subgraphs/developing/developer-faq.mdx index 6eeb3c64ff7f..154f01dfe721 100644 --- a/website/src/pages/hi/subgraphs/developing/developer-faq.mdx +++ b/website/src/pages/hi/subgraphs/developing/developer-faq.mdx @@ -7,37 +7,37 @@ sidebarTitle: FAQ ## सबग्रह संबंधित -### 1. सबग्राफ क्या है? +### 1. What is a Subgraph? -एक subgraph एक कस्टम API है जो ब्लॉकचेन डेटा पर आधारित है। subgraphs को GraphQL क्वेरी भाषा का उपयोग करके क्वेरी किया जाता है और इन्हें The Graph CLI का उपयोग करके Graph Node पर तैनात किया जाता है। एक बार तैनात और The Graph के विकेन्द्रीकृत नेटवर्क पर प्रकाशित होने के बाद, Indexers subgraphs को प्रोसेस करते हैं और उन्हें subgraph उपभोक्ताओं के लिए क्वेरी करने के लिए उपलब्ध कराते हैं। +A Subgraph is a custom API built on blockchain data. Subgraphs are queried using the GraphQL query language and are deployed to a Graph Node using The Graph CLI. Once deployed and published to The Graph's decentralized network, Indexers process Subgraphs and make them available for Subgraph consumers to query. -### 2. एक Subgraph बनाने का पहला कदम क्या है? +### 2. What is the first step to create a Subgraph? -To successfully create a subgraph, you will need to install The Graph CLI. Review the [Quick Start](/subgraphs/quick-start/) to get started. For detailed information, see [Creating a Subgraph](/developing/creating-a-subgraph/). +To successfully create a Subgraph, you will need to install The Graph CLI. Review the [Quick Start](/subgraphs/quick-start/) to get started. For detailed information, see [Creating a Subgraph](/developing/creating-a-subgraph/). -### 3. क्या मैं अभी भी एक subgraph बना सकता हूँ यदि मेरी स्मार्ट कॉन्ट्रैक्ट्स में कोई इवेंट्स नहीं हैं? +### 3. Can I still create a Subgraph if my smart contracts don't have events? -यह अत्यधिक अनुशंसित है कि आप अपने स्मार्ट अनुबंधों को इस तरह से संरचित करें कि उन डेटा के साथ घटनाएँ हों जिनमें आपकी रुचि है। अनुबंध की घटनाओं द्वारा संचालित 'event handlers' को Subgraph में ट्रिगर किया जाता है और यह उपयोगी डेटा प्राप्त करने का सबसे तेज़ तरीका है। +It is highly recommended that you structure your smart contracts to have events associated with data you are interested in querying. Event handlers in the Subgraph are triggered by contract events and are the fastest way to retrieve useful data. -अगर आप जिन अनुबंधों के साथ काम कर रहे हैं, उनमें घटनाएँ नहीं हैं, तो आपका subgraph कॉल और ब्लॉक हैंडलर्स का उपयोग कर सकता है ताकि इंडेक्सिंग को ट्रिगर किया जा सके। हालाँकि, यह अनुशंसित नहीं है, क्योंकि प्रदर्शन काफी धीमा होगा। +If the contracts you work with do not contain events, your Subgraph can use call and block handlers to trigger indexing. However, this is not recommended, as performance will be significantly slower. -### 4. क्या मैं अपने सबग्राफ से जुड़े GitHub खाते को बदल सकता हूँ? +### 4. Can I change the GitHub account associated with my Subgraph? -एक बार जब एक subgraph बनाया जाता है, तो संबंधित GitHub खाता नहीं बदला जा सकता है। कृपया अपने subgraph को बनाने से पहले इसे ध्यान से विचार करें। +No. Once a Subgraph is created, the associated GitHub account cannot be changed. Please make sure to carefully consider this before creating your Subgraph. -### 5. मैं मुख्य नेटवर्क पर एक subgraph को कैसे अपडेट करूँ? +### 5. How do I update a Subgraph on mainnet? -आप अपने subgraph का नया संस्करण Subgraph Studio में CLI का उपयोग करके डिप्लॉय कर सकते हैं। यह क्रिया आपके subgraph को निजी रखती है, लेकिन जब आप इससे खुश हों, तो आप Graph Explorer में इसे प्रकाशित कर सकते हैं। इससे आपके subgraph का एक नया संस्करण बनेगा जिस पर Curators सिग्नल करना शुरू कर सकते हैं। +You can deploy a new version of your Subgraph to Subgraph Studio using the CLI. This action maintains your Subgraph private, but once you’re happy with it, you can publish to Graph Explorer. This will create a new version of your Subgraph that Curators can start signaling on. -### 6. एक Subgraph को दूसरे खाते या एंडपॉइंट पर बिना पुनः तैनात किए डुप्लिकेट करना संभव है? +### 6. Is it possible to duplicate a Subgraph to another account or endpoint without redeploying? -आपको सबग्राफ को फिर से तैनात करना होगा, लेकिन अगर सबग्राफ आईडी (आईपीएफएस हैश) नहीं बदलता है, तो इसे शुरुआत से सिंक नहीं करना पड़ेगा। +You have to redeploy the Subgraph, but if the Subgraph ID (IPFS hash) doesn't change, it won't have to sync from the beginning. -### 7. आप अपने subgraph mappings से एक contract function को कैसे कॉल करें या एक सार्वजनिक state variable तक कैसे पहुँचें? +### 7. How do I call a contract function or access a public state variable from my Subgraph mappings? Take a look at `Access to smart contract` state inside the section [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/#access-to-smart-contract-state). -### 8. Can I import `ethers.js` or other JS libraries into my subgraph mappings? +### 8. Can I import `ethers.js` or other JS libraries into my Subgraph mappings? AssemblyScript में वर्तमान में मैपिंग्स नहीं लिखी जा रही हैं। @@ -45,15 +45,15 @@ AssemblyScript में वर्तमान में मैपिंग् ### 9. कई कॉन्ट्रैक्ट सुनते समय, क्या घटनाओं को सुनने के लिए कॉन्ट्रैक्ट के क्रम का चयन करना संभव है? -एक सबग्राफ के भीतर, घटनाओं को हमेशा उसी क्रम में संसाधित किया जाता है जिस क्रम में वे ब्लॉक में दिखाई देते हैं, भले ही वह कई अनुबंधों में हो या नहीं। +Within a Subgraph, the events are always processed in the order they appear in the blocks, regardless of whether that is across multiple contracts or not. ### 10. टेम्प्लेट्स और डेटा स्रोतों में क्या अंतर है? -Templates आपको डेटा स्रोतों को तेजी से बनाने की अनुमति देते हैं, जबकि आपका subgraph इंडेक्सिंग कर रहा है। आपका कॉन्ट्रैक्ट नए कॉन्ट्रैक्ट उत्पन्न कर सकता है जब लोग इसके साथ इंटरैक्ट करते हैं। चूंकि आप उन कॉन्ट्रैक्टों का आकार (ABI, इवेंट, आदि) पहले से जानते हैं, आप यह निर्धारित कर सकते हैं कि आप उन्हें एक टेम्पलेट में कैसे इंडेक्स करना चाहते हैं। जब वे उत्पन्न होते हैं, तो आपका subgraph कॉन्ट्रैक्ट पते को प्रदान करके एक डायनामिक डेटा स्रोत बनाएगा। +Templates allow you to create data sources quickly, while your Subgraph is indexing. Your contract might spawn new contracts as people interact with it. Since you know the shape of those contracts (ABI, events, etc.) upfront, you can define how you want to index them in a template. When they are spawned, your Subgraph will create a dynamic data source by supplying the contract address. Check out the "Instantiating a data source template" section on: [Data Source Templates](/developing/creating-a-subgraph/#data-source-templates). -### 11. Is it possible to set up a subgraph using `graph init` from `graph-cli` with two contracts? Or should I manually add another dataSource in `subgraph.yaml` after running `graph init`? +### 11. Is it possible to set up a Subgraph using `graph init` from `graph-cli` with two contracts? Or should I manually add another dataSource in `subgraph.yaml` after running `graph init`? Yes. On `graph init` command itself you can add multiple dataSources by entering contracts one after the other. @@ -79,9 +79,9 @@ docker pull graphprotocol/graph-node:latest If only one entity is created during the event and if there's nothing better available, then the transaction hash + log index would be unique. You can obfuscate these by converting that to Bytes and then piping it through `crypto.keccak256` but this won't make it more unique. -### 15. क्या मैं अपना subgraph हटा सकता हूँ? +### 15. Can I delete my Subgraph? -Yes, you can [delete](/subgraphs/developing/managing/deleting-a-subgraph/) and [transfer](/subgraphs/developing/managing/transferring-a-subgraph/) your subgraph. +Yes, you can [delete](/subgraphs/developing/managing/deleting-a-subgraph/) and [transfer](/subgraphs/developing/managing/transferring-a-subgraph/) your Subgraph. ## नेटवर्क से संबंधित। @@ -110,11 +110,11 @@ Yes. Sepolia supports block handlers, call handlers and event handlers. It shoul Yes. `dataSources.source.startBlock` in the `subgraph.yaml` file specifies the number of the block that the dataSource starts indexing from. In most cases, we suggest using the block where the contract was created: [Start blocks](/developing/creating-a-subgraph/#start-blocks) -### 20. यहां कुछ सुझाव दिए गए हैं ताकि इंडेक्सिंग का प्रदर्शन बढ़ सके। मेरा subgraph बहुत लंबे समय तक सिंक होने में समय ले रहा है। +### 20. What are some tips to increase the performance of indexing? My Subgraph is taking a very long time to sync Yes, you should take a look at the optional start block feature to start indexing from the block where the contract was deployed: [Start blocks](/developing/creating-a-subgraph/#start-blocks) -### 21. क्या कोई तरीका है कि 'subgraph' को सीधे क्वेरी करके यह पता लगाया जा सके कि उसने कौन सा लेटेस्ट ब्लॉक नंबर इंडेक्स किया है? +### 21. Is there a way to query the Subgraph directly to determine the latest block number it has indexed? हाँ! निम्न आदेश का प्रयास करें, "संगठन/सबग्राफनाम" को उस संगठन के साथ प्रतिस्थापित करें जिसके अंतर्गत वह प्रकाशित है और आपके सबग्राफ का नाम: @@ -132,11 +132,11 @@ someCollection(first: 1000, skip: ) { ... } ### 23. If my dapp frontend uses The Graph for querying, do I need to write my API key into the frontend directly? What if we pay query fees for users – will malicious users cause our query fees to be very high? -Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. +Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and Subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. -## विविध +## विविध -### क्या Apollo Federation का उपयोग graph-node के ऊपर किया जा सकता है? +### क्या Apollo Federation का उपयोग graph-node के ऊपर किया जा सकता है? Federation अभी समर्थित नहीं है। फिलहाल, आप schema stitching का उपयोग कर सकते हैं, या तो क्लाइंट पर या एक प्रॉक्सी सेवा के माध्यम से। From 04cde1cd7d4260d2e633447bf006b9fd801e6256 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:23:56 -0500 Subject: [PATCH 0889/1789] New translations developer-faq.mdx (Swahili) --- .../sw/subgraphs/developing/developer-faq.mdx | 148 ++++++++++++++++++ 1 file changed, 148 insertions(+) create mode 100644 website/src/pages/sw/subgraphs/developing/developer-faq.mdx diff --git a/website/src/pages/sw/subgraphs/developing/developer-faq.mdx b/website/src/pages/sw/subgraphs/developing/developer-faq.mdx new file mode 100644 index 000000000000..e45141294523 --- /dev/null +++ b/website/src/pages/sw/subgraphs/developing/developer-faq.mdx @@ -0,0 +1,148 @@ +--- +title: Developer FAQ +sidebarTitle: FAQ +--- + +This page summarizes some of the most common questions for developers building on The Graph. + +## Subgraph Related + +### 1. What is a Subgraph? + +A Subgraph is a custom API built on blockchain data. Subgraphs are queried using the GraphQL query language and are deployed to a Graph Node using The Graph CLI. Once deployed and published to The Graph's decentralized network, Indexers process Subgraphs and make them available for Subgraph consumers to query. + +### 2. What is the first step to create a Subgraph? + +To successfully create a Subgraph, you will need to install The Graph CLI. Review the [Quick Start](/subgraphs/quick-start/) to get started. For detailed information, see [Creating a Subgraph](/developing/creating-a-subgraph/). + +### 3. Can I still create a Subgraph if my smart contracts don't have events? + +It is highly recommended that you structure your smart contracts to have events associated with data you are interested in querying. Event handlers in the Subgraph are triggered by contract events and are the fastest way to retrieve useful data. + +If the contracts you work with do not contain events, your Subgraph can use call and block handlers to trigger indexing. However, this is not recommended, as performance will be significantly slower. + +### 4. Can I change the GitHub account associated with my Subgraph? + +No. Once a Subgraph is created, the associated GitHub account cannot be changed. Please make sure to carefully consider this before creating your Subgraph. + +### 5. How do I update a Subgraph on mainnet? + +You can deploy a new version of your Subgraph to Subgraph Studio using the CLI. This action maintains your Subgraph private, but once you’re happy with it, you can publish to Graph Explorer. This will create a new version of your Subgraph that Curators can start signaling on. + +### 6. Is it possible to duplicate a Subgraph to another account or endpoint without redeploying? + +You have to redeploy the Subgraph, but if the Subgraph ID (IPFS hash) doesn't change, it won't have to sync from the beginning. + +### 7. How do I call a contract function or access a public state variable from my Subgraph mappings? + +Take a look at `Access to smart contract` state inside the section [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/#access-to-smart-contract-state). + +### 8. Can I import `ethers.js` or other JS libraries into my Subgraph mappings? + +Not currently, as mappings are written in AssemblyScript. + +One possible alternative solution to this is to store raw data in entities and perform logic that requires JS libraries on the client. + +### 9. When listening to multiple contracts, is it possible to select the contract order to listen to events? + +Within a Subgraph, the events are always processed in the order they appear in the blocks, regardless of whether that is across multiple contracts or not. + +### 10. How are templates different from data sources? + +Templates allow you to create data sources quickly, while your Subgraph is indexing. Your contract might spawn new contracts as people interact with it. Since you know the shape of those contracts (ABI, events, etc.) upfront, you can define how you want to index them in a template. When they are spawned, your Subgraph will create a dynamic data source by supplying the contract address. + +Check out the "Instantiating a data source template" section on: [Data Source Templates](/developing/creating-a-subgraph/#data-source-templates). + +### 11. Is it possible to set up a Subgraph using `graph init` from `graph-cli` with two contracts? Or should I manually add another dataSource in `subgraph.yaml` after running `graph init`? + +Yes. On `graph init` command itself you can add multiple dataSources by entering contracts one after the other. + +You can also use `graph add` command to add a new dataSource. + +### 12. In what order are the event, block, and call handlers triggered for a data source? + +Event and call handlers are first ordered by transaction index within the block. Event and call handlers within the same transaction are ordered using a convention: event handlers first then call handlers, each type respecting the order they are defined in the manifest. Block handlers are run after event and call handlers, in the order they are defined in the manifest. Also these ordering rules are subject to change. + +When new dynamic data source are created, the handlers defined for dynamic data sources will only start processing after all existing data source handlers are processed, and will repeat in the same sequence whenever triggered. + +### 13. How do I make sure I'm using the latest version of graph-node for my local deployments? + +You can run the following command: + +```sh +docker pull graphprotocol/graph-node:latest +``` + +> Note: docker / docker-compose will always use whatever graph-node version was pulled the first time you ran it, so make sure you're up to date with the latest version of graph-node. + +### 14. What is the recommended way to build "autogenerated" ids for an entity when handling events? + +If only one entity is created during the event and if there's nothing better available, then the transaction hash + log index would be unique. You can obfuscate these by converting that to Bytes and then piping it through `crypto.keccak256` but this won't make it more unique. + +### 15. Can I delete my Subgraph? + +Yes, you can [delete](/subgraphs/developing/managing/deleting-a-subgraph/) and [transfer](/subgraphs/developing/managing/transferring-a-subgraph/) your Subgraph. + +## Network Related + +### 16. What networks are supported by The Graph? + +You can find the list of the supported networks [here](/supported-networks/). + +### 17. Is it possible to differentiate between networks (mainnet, Sepolia, local) within event handlers? + +Yes. You can do this by importing `graph-ts` as per the example below: + +```javascript +import { dataSource } from '@graphprotocol/graph-ts' + +dataSource.network() +dataSource.address() +``` + +### 18. Do you support block and call handlers on Sepolia? + +Yes. Sepolia supports block handlers, call handlers and event handlers. It should be noted that event handlers are far more performant than the other two handlers, and they are supported on every EVM-compatible network. + +## Indexing & Querying Related + +### 19. Is it possible to specify what block to start indexing on? + +Yes. `dataSources.source.startBlock` in the `subgraph.yaml` file specifies the number of the block that the dataSource starts indexing from. In most cases, we suggest using the block where the contract was created: [Start blocks](/developing/creating-a-subgraph/#start-blocks) + +### 20. What are some tips to increase the performance of indexing? My Subgraph is taking a very long time to sync + +Yes, you should take a look at the optional start block feature to start indexing from the block where the contract was deployed: [Start blocks](/developing/creating-a-subgraph/#start-blocks) + +### 21. Is there a way to query the Subgraph directly to determine the latest block number it has indexed? + +Yes! Try the following command, substituting "organization/subgraphName" with the organization under it is published and the name of your subgraph: + +```sh +curl -X POST -d '{ "query": "{indexingStatusForCurrentVersion(subgraphName: \"organization/subgraphName\") { chains { latestBlock { hash number }}}}"}' https://api.thegraph.com/index-node/graphql +``` + +### 22. Is there a limit to how many objects The Graph can return per query? + +By default, query responses are limited to 100 items per collection. If you want to receive more, you can go up to 1000 items per collection and beyond that, you can paginate with: + +```graphql +someCollection(first: 1000, skip: ) { ... } +``` + +### 23. If my dapp frontend uses The Graph for querying, do I need to write my API key into the frontend directly? What if we pay query fees for users – will malicious users cause our query fees to be very high? + +Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and Subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. + +## Miscellaneous + +### 24. Is it possible to use Apollo Federation on top of graph-node? + +Federation is not supported yet. At the moment, you can use schema stitching, either on the client or via a proxy service. + +### 25. I want to contribute or add a GitHub issue. Where can I find the open source repositories? + +- [graph-node](https://github.com/graphprotocol/graph-node) +- [graph-tooling](https://github.com/graphprotocol/graph-tooling) +- [graph-docs](https://github.com/graphprotocol/docs) +- [graph-client](https://github.com/graphprotocol/graph-client) From 988b72ccbcfeff180a78ac4f29c212e5fbb489d8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:23:57 -0500 Subject: [PATCH 0890/1789] New translations introduction.mdx (Romanian) --- .../ro/subgraphs/developing/introduction.mdx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/website/src/pages/ro/subgraphs/developing/introduction.mdx b/website/src/pages/ro/subgraphs/developing/introduction.mdx index 615b6cec4c9c..06bc2b76104d 100644 --- a/website/src/pages/ro/subgraphs/developing/introduction.mdx +++ b/website/src/pages/ro/subgraphs/developing/introduction.mdx @@ -11,21 +11,21 @@ As a developer, you need data to build and power your dapp. Querying and indexin On The Graph, you can: -1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). -2. Use GraphQL to query existing subgraphs. +1. Create, deploy, and publish Subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). +2. Use GraphQL to query existing Subgraphs. ### What is GraphQL? -- [GraphQL](https://graphql.org/learn/) is the query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query subgraphs. +- [GraphQL](https://graphql.org/learn/) is the query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query Subgraphs. ### Developer Actions -- Query subgraphs built by other developers in [The Graph Network](https://thegraph.com/explorer) and integrate them into your own dapps. -- Create custom subgraphs to fulfill specific data needs, allowing improved scalability and flexibility for other developers. -- Deploy, publish and signal your subgraphs within The Graph Network. +- Query Subgraphs built by other developers in [The Graph Network](https://thegraph.com/explorer) and integrate them into your own dapps. +- Create custom Subgraphs to fulfill specific data needs, allowing improved scalability and flexibility for other developers. +- Deploy, publish and signal your Subgraphs within The Graph Network. -### What are subgraphs? +### What are Subgraphs? -A subgraph is a custom API built on blockchain data. It extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. +A Subgraph is a custom API built on blockchain data. It extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. -Check out the documentation on [subgraphs](/subgraphs/developing/subgraphs/) to learn specifics. +Check out the documentation on [Subgraphs](/subgraphs/developing/subgraphs/) to learn specifics. From 46d179309864c709e19687b0a2737f50f5dd2060 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:23:58 -0500 Subject: [PATCH 0891/1789] New translations introduction.mdx (French) --- .../fr/subgraphs/developing/introduction.mdx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/website/src/pages/fr/subgraphs/developing/introduction.mdx b/website/src/pages/fr/subgraphs/developing/introduction.mdx index 7956855d9d83..b6a81d61f1d8 100644 --- a/website/src/pages/fr/subgraphs/developing/introduction.mdx +++ b/website/src/pages/fr/subgraphs/developing/introduction.mdx @@ -11,21 +11,21 @@ En tant que développeur, vous avez besoin de données pour construire et alimen Sur The Graph, vous pouvez : -1. Créer, déployer et publier des subgraphs sur The Graph à l'aide de Graph CLI et de [Subgraph Studio](https://thegraph.com/studio/). -2. Utiliser GraphQL pour interroger des subgraphs existants. +1. Create, deploy, and publish Subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). +2. Use GraphQL to query existing Subgraphs. ### Qu'est-ce que GraphQL ? -- [GraphQL](https://graphql.org/learn/) est un langage de requête pour les API et un moteur d'exécution permettant d'exécuter ces requêtes avec vos données existantes. The Graph utilise GraphQL pour interroger les subgraphs. +- [GraphQL](https://graphql.org/learn/) is the query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query Subgraphs. ### Actions des Développeurs -- Interrogez les subgraphs construits par d'autres développeurs dans [The Graph Network](https://thegraph.com/explorer) et intégrez-les dans vos propres dapps. -- Créer des subgraphs personnalisés pour répondre à des besoins de données spécifiques, permettant une meilleure évolutivité et flexibilité pour les autres développeurs. -- Déployer, publier et signaler vos subgraphs au sein de The Graph Network. +- Query Subgraphs built by other developers in [The Graph Network](https://thegraph.com/explorer) and integrate them into your own dapps. +- Create custom Subgraphs to fulfill specific data needs, allowing improved scalability and flexibility for other developers. +- Deploy, publish and signal your Subgraphs within The Graph Network. -### Que sont les subgraphs ? +### What are Subgraphs? -Un subgraph est une API personnalisée construite sur des données blockchain. Il extrait des données d'une blockchain, les traite et les stocke afin qu'elles puissent être facilement interrogées via GraphQL. +A Subgraph is a custom API built on blockchain data. It extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. -Consultez la documentation sur les [subgraphs](/subgraphs/developing/subgraphs/) pour en savoir plus. +Check out the documentation on [Subgraphs](/subgraphs/developing/subgraphs/) to learn specifics. From 6246206f606529e619f0f3107ce3c04d1a259976 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:23:59 -0500 Subject: [PATCH 0892/1789] New translations introduction.mdx (Spanish) --- .../es/subgraphs/developing/introduction.mdx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/website/src/pages/es/subgraphs/developing/introduction.mdx b/website/src/pages/es/subgraphs/developing/introduction.mdx index 7d4760cb4c35..facd793fde33 100644 --- a/website/src/pages/es/subgraphs/developing/introduction.mdx +++ b/website/src/pages/es/subgraphs/developing/introduction.mdx @@ -11,21 +11,21 @@ As a developer, you need data to build and power your dapp. Querying and indexin On The Graph, you can: -1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). -2. Use GraphQL to query existing subgraphs. +1. Create, deploy, and publish Subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). +2. Use GraphQL to query existing Subgraphs. ### What is GraphQL? -- [GraphQL](https://graphql.org/learn/) is the query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query subgraphs. +- [GraphQL](https://graphql.org/learn/) is the query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query Subgraphs. ### Developer Actions -- Query subgraphs built by other developers in [The Graph Network](https://thegraph.com/explorer) and integrate them into your own dapps. -- Create custom subgraphs to fulfill specific data needs, allowing improved scalability and flexibility for other developers. -- Deploy, publish and signal your subgraphs within The Graph Network. +- Query Subgraphs built by other developers in [The Graph Network](https://thegraph.com/explorer) and integrate them into your own dapps. +- Create custom Subgraphs to fulfill specific data needs, allowing improved scalability and flexibility for other developers. +- Deploy, publish and signal your Subgraphs within The Graph Network. -### What are subgraphs? +### What are Subgraphs? -A subgraph is a custom API built on blockchain data. It extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. +A Subgraph is a custom API built on blockchain data. It extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. -Check out the documentation on [subgraphs](/subgraphs/developing/subgraphs/) to learn specifics. +Check out the documentation on [Subgraphs](/subgraphs/developing/subgraphs/) to learn specifics. From 01c7a94b622c48bb219e33c23d1e5021e4c98f8b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:24:00 -0500 Subject: [PATCH 0893/1789] New translations introduction.mdx (Arabic) --- .../ar/subgraphs/developing/introduction.mdx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/website/src/pages/ar/subgraphs/developing/introduction.mdx b/website/src/pages/ar/subgraphs/developing/introduction.mdx index d3b71aaab704..946e62affbe7 100644 --- a/website/src/pages/ar/subgraphs/developing/introduction.mdx +++ b/website/src/pages/ar/subgraphs/developing/introduction.mdx @@ -11,21 +11,21 @@ As a developer, you need data to build and power your dapp. Querying and indexin On The Graph, you can: -1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). -2. Use GraphQL to query existing subgraphs. +1. Create, deploy, and publish Subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). +2. Use GraphQL to query existing Subgraphs. ### What is GraphQL? -- [GraphQL](https://graphql.org/learn/) is the query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query subgraphs. +- [GraphQL](https://graphql.org/learn/) is the query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query Subgraphs. ### Developer Actions -- Query subgraphs built by other developers in [The Graph Network](https://thegraph.com/explorer) and integrate them into your own dapps. -- Create custom subgraphs to fulfill specific data needs, allowing improved scalability and flexibility for other developers. -- Deploy, publish and signal your subgraphs within The Graph Network. +- Query Subgraphs built by other developers in [The Graph Network](https://thegraph.com/explorer) and integrate them into your own dapps. +- Create custom Subgraphs to fulfill specific data needs, allowing improved scalability and flexibility for other developers. +- Deploy, publish and signal your Subgraphs within The Graph Network. -### What are subgraphs? +### What are Subgraphs? -A subgraph is a custom API built on blockchain data. It extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. +A Subgraph is a custom API built on blockchain data. It extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. -Check out the documentation on [subgraphs](/subgraphs/developing/subgraphs/) to learn specifics. +Check out the documentation on [Subgraphs](/subgraphs/developing/subgraphs/) to learn specifics. From 89d4dfcc4b1c1fdb227b34e31807ed1960e149c8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:24:01 -0500 Subject: [PATCH 0894/1789] New translations introduction.mdx (Czech) --- .../cs/subgraphs/developing/introduction.mdx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/website/src/pages/cs/subgraphs/developing/introduction.mdx b/website/src/pages/cs/subgraphs/developing/introduction.mdx index 110d7639aded..b040c749c6ca 100644 --- a/website/src/pages/cs/subgraphs/developing/introduction.mdx +++ b/website/src/pages/cs/subgraphs/developing/introduction.mdx @@ -11,21 +11,21 @@ As a developer, you need data to build and power your dapp. Querying and indexin On The Graph, you can: -1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). -2. Use GraphQL to query existing subgraphs. +1. Create, deploy, and publish Subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). +2. Use GraphQL to query existing Subgraphs. ### What is GraphQL? -- [GraphQL](https://graphql.org/learn/) is the query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query subgraphs. +- [GraphQL](https://graphql.org/learn/) is the query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query Subgraphs. ### Developer Actions -- Query subgraphs built by other developers in [The Graph Network](https://thegraph.com/explorer) and integrate them into your own dapps. -- Create custom subgraphs to fulfill specific data needs, allowing improved scalability and flexibility for other developers. -- Deploy, publish and signal your subgraphs within The Graph Network. +- Query Subgraphs built by other developers in [The Graph Network](https://thegraph.com/explorer) and integrate them into your own dapps. +- Create custom Subgraphs to fulfill specific data needs, allowing improved scalability and flexibility for other developers. +- Deploy, publish and signal your Subgraphs within The Graph Network. -### What are subgraphs? +### What are Subgraphs? -A subgraph is a custom API built on blockchain data. It extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. +A Subgraph is a custom API built on blockchain data. It extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. -Check out the documentation on [subgraphs](/subgraphs/developing/subgraphs/) to learn specifics. +Check out the documentation on [Subgraphs](/subgraphs/developing/subgraphs/) to learn specifics. From 6c74334925640401c1122883d0c6bbe215899a8c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:24:02 -0500 Subject: [PATCH 0895/1789] New translations introduction.mdx (German) --- .../de/subgraphs/developing/introduction.mdx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/website/src/pages/de/subgraphs/developing/introduction.mdx b/website/src/pages/de/subgraphs/developing/introduction.mdx index fd2872880ce0..42fe1833273b 100644 --- a/website/src/pages/de/subgraphs/developing/introduction.mdx +++ b/website/src/pages/de/subgraphs/developing/introduction.mdx @@ -11,21 +11,21 @@ As a developer, you need data to build and power your dapp. Querying and indexin On The Graph, you can: -1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). -2. Use GraphQL to query existing subgraphs. +1. Create, deploy, and publish Subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). +2. Use GraphQL to query existing Subgraphs. ### What is GraphQL? -- [GraphQL](https://graphql.org/learn/) is the query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query subgraphs. +- [GraphQL](https://graphql.org/learn/) is the query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query Subgraphs. ### Developer Actions -- Query subgraphs built by other developers in [The Graph Network](https://thegraph.com/explorer) and integrate them into your own dapps. -- Create custom subgraphs to fulfill specific data needs, allowing improved scalability and flexibility for other developers. -- Deploy, publish and signal your subgraphs within The Graph Network. +- Query Subgraphs built by other developers in [The Graph Network](https://thegraph.com/explorer) and integrate them into your own dapps. +- Create custom Subgraphs to fulfill specific data needs, allowing improved scalability and flexibility for other developers. +- Deploy, publish and signal your Subgraphs within The Graph Network. -### What are subgraphs? +### What are Subgraphs? -A subgraph is a custom API built on blockchain data. It extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. +A Subgraph is a custom API built on blockchain data. It extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. -Check out the documentation on [subgraphs](/subgraphs/developing/subgraphs/) to learn specifics. +Check out the documentation on [Subgraphs](/subgraphs/developing/subgraphs/) to learn specifics. From 95a1ac114919f1a68deee2fc40b51f76bdbd3768 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:24:03 -0500 Subject: [PATCH 0896/1789] New translations introduction.mdx (Italian) --- .../it/subgraphs/developing/introduction.mdx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/website/src/pages/it/subgraphs/developing/introduction.mdx b/website/src/pages/it/subgraphs/developing/introduction.mdx index 53060bdd4de4..70610ef84065 100644 --- a/website/src/pages/it/subgraphs/developing/introduction.mdx +++ b/website/src/pages/it/subgraphs/developing/introduction.mdx @@ -11,21 +11,21 @@ As a developer, you need data to build and power your dapp. Querying and indexin On The Graph, you can: -1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). -2. Use GraphQL to query existing subgraphs. +1. Create, deploy, and publish Subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). +2. Use GraphQL to query existing Subgraphs. ### What is GraphQL? -- [GraphQL](https://graphql.org/learn/) is the query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query subgraphs. +- [GraphQL](https://graphql.org/learn/) is the query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query Subgraphs. ### Developer Actions -- Query subgraphs built by other developers in [The Graph Network](https://thegraph.com/explorer) and integrate them into your own dapps. -- Create custom subgraphs to fulfill specific data needs, allowing improved scalability and flexibility for other developers. -- Deploy, publish and signal your subgraphs within The Graph Network. +- Query Subgraphs built by other developers in [The Graph Network](https://thegraph.com/explorer) and integrate them into your own dapps. +- Create custom Subgraphs to fulfill specific data needs, allowing improved scalability and flexibility for other developers. +- Deploy, publish and signal your Subgraphs within The Graph Network. -### What are subgraphs? +### What are Subgraphs? -A subgraph is a custom API built on blockchain data. It extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. +A Subgraph is a custom API built on blockchain data. It extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. -Check out the documentation on [subgraphs](/subgraphs/developing/subgraphs/) to learn specifics. +Check out the documentation on [Subgraphs](/subgraphs/developing/subgraphs/) to learn specifics. From 45f8d9a2b68d348b152e94a704f8598d2499b5f5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:24:04 -0500 Subject: [PATCH 0897/1789] New translations introduction.mdx (Japanese) --- .../ja/subgraphs/developing/introduction.mdx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/website/src/pages/ja/subgraphs/developing/introduction.mdx b/website/src/pages/ja/subgraphs/developing/introduction.mdx index 982e426ba4aa..e7d2fb8eff33 100644 --- a/website/src/pages/ja/subgraphs/developing/introduction.mdx +++ b/website/src/pages/ja/subgraphs/developing/introduction.mdx @@ -11,21 +11,21 @@ As a developer, you need data to build and power your dapp. Querying and indexin On The Graph, you can: -1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). -2. Use GraphQL to query existing subgraphs. +1. Create, deploy, and publish Subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). +2. Use GraphQL to query existing Subgraphs. ### What is GraphQL? -- [GraphQL](https://graphql.org/learn/) is the query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query subgraphs. +- [GraphQL](https://graphql.org/learn/) is the query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query Subgraphs. ### Developer Actions -- Query subgraphs built by other developers in [The Graph Network](https://thegraph.com/explorer) and integrate them into your own dapps. -- Create custom subgraphs to fulfill specific data needs, allowing improved scalability and flexibility for other developers. -- Deploy, publish and signal your subgraphs within The Graph Network. +- Query Subgraphs built by other developers in [The Graph Network](https://thegraph.com/explorer) and integrate them into your own dapps. +- Create custom Subgraphs to fulfill specific data needs, allowing improved scalability and flexibility for other developers. +- Deploy, publish and signal your Subgraphs within The Graph Network. -### What are subgraphs? +### What are Subgraphs? -A subgraph is a custom API built on blockchain data. It extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. +A Subgraph is a custom API built on blockchain data. It extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. -Check out the documentation on [subgraphs](/subgraphs/developing/subgraphs/) to learn specifics. +Check out the documentation on [Subgraphs](/subgraphs/developing/subgraphs/) to learn specifics. From 782696a9e7210d7a8055180b5934a48ae5574636 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:24:05 -0500 Subject: [PATCH 0898/1789] New translations introduction.mdx (Korean) --- .../ko/subgraphs/developing/introduction.mdx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/website/src/pages/ko/subgraphs/developing/introduction.mdx b/website/src/pages/ko/subgraphs/developing/introduction.mdx index 615b6cec4c9c..06bc2b76104d 100644 --- a/website/src/pages/ko/subgraphs/developing/introduction.mdx +++ b/website/src/pages/ko/subgraphs/developing/introduction.mdx @@ -11,21 +11,21 @@ As a developer, you need data to build and power your dapp. Querying and indexin On The Graph, you can: -1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). -2. Use GraphQL to query existing subgraphs. +1. Create, deploy, and publish Subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). +2. Use GraphQL to query existing Subgraphs. ### What is GraphQL? -- [GraphQL](https://graphql.org/learn/) is the query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query subgraphs. +- [GraphQL](https://graphql.org/learn/) is the query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query Subgraphs. ### Developer Actions -- Query subgraphs built by other developers in [The Graph Network](https://thegraph.com/explorer) and integrate them into your own dapps. -- Create custom subgraphs to fulfill specific data needs, allowing improved scalability and flexibility for other developers. -- Deploy, publish and signal your subgraphs within The Graph Network. +- Query Subgraphs built by other developers in [The Graph Network](https://thegraph.com/explorer) and integrate them into your own dapps. +- Create custom Subgraphs to fulfill specific data needs, allowing improved scalability and flexibility for other developers. +- Deploy, publish and signal your Subgraphs within The Graph Network. -### What are subgraphs? +### What are Subgraphs? -A subgraph is a custom API built on blockchain data. It extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. +A Subgraph is a custom API built on blockchain data. It extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. -Check out the documentation on [subgraphs](/subgraphs/developing/subgraphs/) to learn specifics. +Check out the documentation on [Subgraphs](/subgraphs/developing/subgraphs/) to learn specifics. From 95cabc3e52d5197924a81950bf2fc15e2ff0f833 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:24:06 -0500 Subject: [PATCH 0899/1789] New translations introduction.mdx (Dutch) --- .../nl/subgraphs/developing/introduction.mdx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/website/src/pages/nl/subgraphs/developing/introduction.mdx b/website/src/pages/nl/subgraphs/developing/introduction.mdx index 615b6cec4c9c..06bc2b76104d 100644 --- a/website/src/pages/nl/subgraphs/developing/introduction.mdx +++ b/website/src/pages/nl/subgraphs/developing/introduction.mdx @@ -11,21 +11,21 @@ As a developer, you need data to build and power your dapp. Querying and indexin On The Graph, you can: -1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). -2. Use GraphQL to query existing subgraphs. +1. Create, deploy, and publish Subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). +2. Use GraphQL to query existing Subgraphs. ### What is GraphQL? -- [GraphQL](https://graphql.org/learn/) is the query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query subgraphs. +- [GraphQL](https://graphql.org/learn/) is the query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query Subgraphs. ### Developer Actions -- Query subgraphs built by other developers in [The Graph Network](https://thegraph.com/explorer) and integrate them into your own dapps. -- Create custom subgraphs to fulfill specific data needs, allowing improved scalability and flexibility for other developers. -- Deploy, publish and signal your subgraphs within The Graph Network. +- Query Subgraphs built by other developers in [The Graph Network](https://thegraph.com/explorer) and integrate them into your own dapps. +- Create custom Subgraphs to fulfill specific data needs, allowing improved scalability and flexibility for other developers. +- Deploy, publish and signal your Subgraphs within The Graph Network. -### What are subgraphs? +### What are Subgraphs? -A subgraph is a custom API built on blockchain data. It extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. +A Subgraph is a custom API built on blockchain data. It extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. -Check out the documentation on [subgraphs](/subgraphs/developing/subgraphs/) to learn specifics. +Check out the documentation on [Subgraphs](/subgraphs/developing/subgraphs/) to learn specifics. From 8e49b4c9a050c40ee880d7e5c7f0f8c78cbdae86 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:24:07 -0500 Subject: [PATCH 0900/1789] New translations introduction.mdx (Polish) --- .../pl/subgraphs/developing/introduction.mdx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/website/src/pages/pl/subgraphs/developing/introduction.mdx b/website/src/pages/pl/subgraphs/developing/introduction.mdx index 509b25654e82..92b39857a7f1 100644 --- a/website/src/pages/pl/subgraphs/developing/introduction.mdx +++ b/website/src/pages/pl/subgraphs/developing/introduction.mdx @@ -11,21 +11,21 @@ As a developer, you need data to build and power your dapp. Querying and indexin On The Graph, you can: -1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). -2. Use GraphQL to query existing subgraphs. +1. Create, deploy, and publish Subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). +2. Use GraphQL to query existing Subgraphs. ### What is GraphQL? -- [GraphQL](https://graphql.org/learn/) is the query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query subgraphs. +- [GraphQL](https://graphql.org/learn/) is the query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query Subgraphs. ### Developer Actions -- Query subgraphs built by other developers in [The Graph Network](https://thegraph.com/explorer) and integrate them into your own dapps. -- Create custom subgraphs to fulfill specific data needs, allowing improved scalability and flexibility for other developers. -- Deploy, publish and signal your subgraphs within The Graph Network. +- Query Subgraphs built by other developers in [The Graph Network](https://thegraph.com/explorer) and integrate them into your own dapps. +- Create custom Subgraphs to fulfill specific data needs, allowing improved scalability and flexibility for other developers. +- Deploy, publish and signal your Subgraphs within The Graph Network. -### What are subgraphs? +### What are Subgraphs? -A subgraph is a custom API built on blockchain data. It extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. +A Subgraph is a custom API built on blockchain data. It extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. -Check out the documentation on [subgraphs](/subgraphs/developing/subgraphs/) to learn specifics. +Check out the documentation on [Subgraphs](/subgraphs/developing/subgraphs/) to learn specifics. From b1095fe22b5abb8795c2c18da68dab5807946d39 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:24:09 -0500 Subject: [PATCH 0901/1789] New translations introduction.mdx (Portuguese) --- .../pt/subgraphs/developing/introduction.mdx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/website/src/pages/pt/subgraphs/developing/introduction.mdx b/website/src/pages/pt/subgraphs/developing/introduction.mdx index e550867e2244..4ee18b88b45c 100644 --- a/website/src/pages/pt/subgraphs/developing/introduction.mdx +++ b/website/src/pages/pt/subgraphs/developing/introduction.mdx @@ -11,21 +11,21 @@ As a developer, you need data to build and power your dapp. Querying and indexin On The Graph, you can: -1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). -2. Use GraphQL to query existing subgraphs. +1. Create, deploy, and publish Subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). +2. Use GraphQL to query existing Subgraphs. ### O Que é a GraphQL? -- [GraphQL](https://graphql.org/learn/) is the query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query subgraphs. +- [GraphQL](https://graphql.org/learn/) is the query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query Subgraphs. ### Developer Actions -- Query subgraphs built by other developers in [The Graph Network](https://thegraph.com/explorer) and integrate them into your own dapps. -- Create custom subgraphs to fulfill specific data needs, allowing improved scalability and flexibility for other developers. -- Deploy, publish and signal your subgraphs within The Graph Network. +- Query Subgraphs built by other developers in [The Graph Network](https://thegraph.com/explorer) and integrate them into your own dapps. +- Create custom Subgraphs to fulfill specific data needs, allowing improved scalability and flexibility for other developers. +- Deploy, publish and signal your Subgraphs within The Graph Network. -### What are subgraphs? +### What are Subgraphs? -A subgraph is a custom API built on blockchain data. It extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. +A Subgraph is a custom API built on blockchain data. It extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. -Check out the documentation on [subgraphs](/subgraphs/developing/subgraphs/) to learn specifics. +Check out the documentation on [Subgraphs](/subgraphs/developing/subgraphs/) to learn specifics. From d98e67a1990c9227050ae94ededcda2ca0e0bc71 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:24:10 -0500 Subject: [PATCH 0902/1789] New translations introduction.mdx (Russian) --- .../ru/subgraphs/developing/introduction.mdx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/website/src/pages/ru/subgraphs/developing/introduction.mdx b/website/src/pages/ru/subgraphs/developing/introduction.mdx index d5b1df06feae..6a2a8803f326 100644 --- a/website/src/pages/ru/subgraphs/developing/introduction.mdx +++ b/website/src/pages/ru/subgraphs/developing/introduction.mdx @@ -11,21 +11,21 @@ To start coding right away, go to [Developer Quick Start](/subgraphs/quick-start На The Graph Вы можете: -1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). -2. Использовать GraphQL для запроса существующих субграфов. +1. Create, deploy, and publish Subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). +2. Use GraphQL to query existing Subgraphs. ### Что такое GraphQL? -- [GraphQL](https://graphql.org/learn/) is the query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query subgraphs. +- [GraphQL](https://graphql.org/learn/) is the query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query Subgraphs. ### Действия разработчика -- Query subgraphs built by other developers in [The Graph Network](https://thegraph.com/explorer) and integrate them into your own dapps. -- Создавайте собственные субграфы для удовлетворения конкретных потребностей в данных, обеспечивая улучшенную масштабируемость и гибкость для других разработчиков. -- Развертывайте, публикуйте и сигнализируйте о своих субграфах в The Graph Network. +- Query Subgraphs built by other developers in [The Graph Network](https://thegraph.com/explorer) and integrate them into your own dapps. +- Create custom Subgraphs to fulfill specific data needs, allowing improved scalability and flexibility for other developers. +- Deploy, publish and signal your Subgraphs within The Graph Network. -### Что такое субграфы? +### What are Subgraphs? -Субграф — это пользовательский API, созданный на основе данных блокчейна. Он извлекает данные из блокчейна, обрабатывает их и сохраняет так, чтобы их можно было легко запросить через GraphQL. +A Subgraph is a custom API built on blockchain data. It extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. -Check out the documentation on [subgraphs](/subgraphs/developing/subgraphs/) to learn specifics. +Check out the documentation on [Subgraphs](/subgraphs/developing/subgraphs/) to learn specifics. From bde1cb591a0381382483f915d7b9c3df77ca07a6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:24:11 -0500 Subject: [PATCH 0903/1789] New translations introduction.mdx (Swedish) --- .../sv/subgraphs/developing/introduction.mdx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/website/src/pages/sv/subgraphs/developing/introduction.mdx b/website/src/pages/sv/subgraphs/developing/introduction.mdx index bf5f1bb0f311..c4e9fbd9c78a 100644 --- a/website/src/pages/sv/subgraphs/developing/introduction.mdx +++ b/website/src/pages/sv/subgraphs/developing/introduction.mdx @@ -11,21 +11,21 @@ As a developer, you need data to build and power your dapp. Querying and indexin On The Graph, you can: -1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). -2. Use GraphQL to query existing subgraphs. +1. Create, deploy, and publish Subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). +2. Use GraphQL to query existing Subgraphs. ### What is GraphQL? -- [GraphQL](https://graphql.org/learn/) is the query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query subgraphs. +- [GraphQL](https://graphql.org/learn/) is the query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query Subgraphs. ### Developer Actions -- Query subgraphs built by other developers in [The Graph Network](https://thegraph.com/explorer) and integrate them into your own dapps. -- Create custom subgraphs to fulfill specific data needs, allowing improved scalability and flexibility for other developers. -- Deploy, publish and signal your subgraphs within The Graph Network. +- Query Subgraphs built by other developers in [The Graph Network](https://thegraph.com/explorer) and integrate them into your own dapps. +- Create custom Subgraphs to fulfill specific data needs, allowing improved scalability and flexibility for other developers. +- Deploy, publish and signal your Subgraphs within The Graph Network. -### What are subgraphs? +### What are Subgraphs? -A subgraph is a custom API built on blockchain data. It extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. +A Subgraph is a custom API built on blockchain data. It extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. -Check out the documentation on [subgraphs](/subgraphs/developing/subgraphs/) to learn specifics. +Check out the documentation on [Subgraphs](/subgraphs/developing/subgraphs/) to learn specifics. From 14ceabc13753638d90d8858f621ede89dfcc7943 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:24:12 -0500 Subject: [PATCH 0904/1789] New translations introduction.mdx (Turkish) --- .../tr/subgraphs/developing/introduction.mdx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/website/src/pages/tr/subgraphs/developing/introduction.mdx b/website/src/pages/tr/subgraphs/developing/introduction.mdx index 6a76c8957cee..95e5f12f197d 100644 --- a/website/src/pages/tr/subgraphs/developing/introduction.mdx +++ b/website/src/pages/tr/subgraphs/developing/introduction.mdx @@ -11,21 +11,21 @@ As a developer, you need data to build and power your dapp. Querying and indexin On The Graph, you can: -1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). -2. Use GraphQL to query existing subgraphs. +1. Create, deploy, and publish Subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). +2. Use GraphQL to query existing Subgraphs. ### What is GraphQL? -- [GraphQL](https://graphql.org/learn/) is the query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query subgraphs. +- [GraphQL](https://graphql.org/learn/) is the query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query Subgraphs. ### Developer Actions -- Query subgraphs built by other developers in [The Graph Network](https://thegraph.com/explorer) and integrate them into your own dapps. -- Create custom subgraphs to fulfill specific data needs, allowing improved scalability and flexibility for other developers. -- Deploy, publish and signal your subgraphs within The Graph Network. +- Query Subgraphs built by other developers in [The Graph Network](https://thegraph.com/explorer) and integrate them into your own dapps. +- Create custom Subgraphs to fulfill specific data needs, allowing improved scalability and flexibility for other developers. +- Deploy, publish and signal your Subgraphs within The Graph Network. -### What are subgraphs? +### What are Subgraphs? -A subgraph is a custom API built on blockchain data. It extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. +A Subgraph is a custom API built on blockchain data. It extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. -Check out the documentation on [subgraphs](/subgraphs/developing/subgraphs/) to learn specifics. +Check out the documentation on [Subgraphs](/subgraphs/developing/subgraphs/) to learn specifics. From 6ab7e06ed0e6b6f70aebcc3949e2cfac0d0bf108 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:24:14 -0500 Subject: [PATCH 0905/1789] New translations introduction.mdx (Ukrainian) --- .../uk/subgraphs/developing/introduction.mdx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/website/src/pages/uk/subgraphs/developing/introduction.mdx b/website/src/pages/uk/subgraphs/developing/introduction.mdx index 615b6cec4c9c..06bc2b76104d 100644 --- a/website/src/pages/uk/subgraphs/developing/introduction.mdx +++ b/website/src/pages/uk/subgraphs/developing/introduction.mdx @@ -11,21 +11,21 @@ As a developer, you need data to build and power your dapp. Querying and indexin On The Graph, you can: -1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). -2. Use GraphQL to query existing subgraphs. +1. Create, deploy, and publish Subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). +2. Use GraphQL to query existing Subgraphs. ### What is GraphQL? -- [GraphQL](https://graphql.org/learn/) is the query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query subgraphs. +- [GraphQL](https://graphql.org/learn/) is the query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query Subgraphs. ### Developer Actions -- Query subgraphs built by other developers in [The Graph Network](https://thegraph.com/explorer) and integrate them into your own dapps. -- Create custom subgraphs to fulfill specific data needs, allowing improved scalability and flexibility for other developers. -- Deploy, publish and signal your subgraphs within The Graph Network. +- Query Subgraphs built by other developers in [The Graph Network](https://thegraph.com/explorer) and integrate them into your own dapps. +- Create custom Subgraphs to fulfill specific data needs, allowing improved scalability and flexibility for other developers. +- Deploy, publish and signal your Subgraphs within The Graph Network. -### What are subgraphs? +### What are Subgraphs? -A subgraph is a custom API built on blockchain data. It extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. +A Subgraph is a custom API built on blockchain data. It extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. -Check out the documentation on [subgraphs](/subgraphs/developing/subgraphs/) to learn specifics. +Check out the documentation on [Subgraphs](/subgraphs/developing/subgraphs/) to learn specifics. From 5950652f1b54da01396d8a411fdce71667511434 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:24:15 -0500 Subject: [PATCH 0906/1789] New translations introduction.mdx (Chinese Simplified) --- .../zh/subgraphs/developing/introduction.mdx | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/website/src/pages/zh/subgraphs/developing/introduction.mdx b/website/src/pages/zh/subgraphs/developing/introduction.mdx index a34bc90855b3..593c8c391cc2 100644 --- a/website/src/pages/zh/subgraphs/developing/introduction.mdx +++ b/website/src/pages/zh/subgraphs/developing/introduction.mdx @@ -5,27 +5,27 @@ sidebarTitle: 介绍 To start coding right away, go to [Developer Quick Start](/subgraphs/quick-start/). -## 概述 +## Overview As a developer, you need data to build and power your dapp. Querying and indexing blockchain data is challenging, but The Graph provides a solution to this issue. On The Graph, you can: -1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). -2. Use GraphQL to query existing subgraphs. +1. Create, deploy, and publish Subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). +2. Use GraphQL to query existing Subgraphs. ### What is GraphQL? -- [GraphQL](https://graphql.org/learn/) is the query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query subgraphs. +- [GraphQL](https://graphql.org/learn/) is the query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query Subgraphs. ### Developer Actions -- Query subgraphs built by other developers in [The Graph Network](https://thegraph.com/explorer) and integrate them into your own dapps. -- Create custom subgraphs to fulfill specific data needs, allowing improved scalability and flexibility for other developers. -- Deploy, publish and signal your subgraphs within The Graph Network. +- Query Subgraphs built by other developers in [The Graph Network](https://thegraph.com/explorer) and integrate them into your own dapps. +- Create custom Subgraphs to fulfill specific data needs, allowing improved scalability and flexibility for other developers. +- Deploy, publish and signal your Subgraphs within The Graph Network. -### What are subgraphs? +### What are Subgraphs? -A subgraph is a custom API built on blockchain data. It extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. +A Subgraph is a custom API built on blockchain data. It extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. -Check out the documentation on [subgraphs](/subgraphs/developing/subgraphs/) to learn specifics. +Check out the documentation on [Subgraphs](/subgraphs/developing/subgraphs/) to learn specifics. From 1168b1d05212731505199e4448dec47deae2610f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:24:15 -0500 Subject: [PATCH 0907/1789] New translations introduction.mdx (Urdu (Pakistan)) --- .../ur/subgraphs/developing/introduction.mdx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/website/src/pages/ur/subgraphs/developing/introduction.mdx b/website/src/pages/ur/subgraphs/developing/introduction.mdx index e7ab36598ccb..aceaf166c362 100644 --- a/website/src/pages/ur/subgraphs/developing/introduction.mdx +++ b/website/src/pages/ur/subgraphs/developing/introduction.mdx @@ -11,21 +11,21 @@ As a developer, you need data to build and power your dapp. Querying and indexin On The Graph, you can: -1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). -2. Use GraphQL to query existing subgraphs. +1. Create, deploy, and publish Subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). +2. Use GraphQL to query existing Subgraphs. ### What is GraphQL? -- [GraphQL](https://graphql.org/learn/) is the query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query subgraphs. +- [GraphQL](https://graphql.org/learn/) is the query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query Subgraphs. ### Developer Actions -- Query subgraphs built by other developers in [The Graph Network](https://thegraph.com/explorer) and integrate them into your own dapps. -- Create custom subgraphs to fulfill specific data needs, allowing improved scalability and flexibility for other developers. -- Deploy, publish and signal your subgraphs within The Graph Network. +- Query Subgraphs built by other developers in [The Graph Network](https://thegraph.com/explorer) and integrate them into your own dapps. +- Create custom Subgraphs to fulfill specific data needs, allowing improved scalability and flexibility for other developers. +- Deploy, publish and signal your Subgraphs within The Graph Network. -### What are subgraphs? +### What are Subgraphs? -A subgraph is a custom API built on blockchain data. It extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. +A Subgraph is a custom API built on blockchain data. It extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. -Check out the documentation on [subgraphs](/subgraphs/developing/subgraphs/) to learn specifics. +Check out the documentation on [Subgraphs](/subgraphs/developing/subgraphs/) to learn specifics. From 4a88cb86a533573c12cdb55990375df237ec6217 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:24:16 -0500 Subject: [PATCH 0908/1789] New translations introduction.mdx (Vietnamese) --- .../vi/subgraphs/developing/introduction.mdx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/website/src/pages/vi/subgraphs/developing/introduction.mdx b/website/src/pages/vi/subgraphs/developing/introduction.mdx index ea7cc276b1d2..7e1039b57a36 100644 --- a/website/src/pages/vi/subgraphs/developing/introduction.mdx +++ b/website/src/pages/vi/subgraphs/developing/introduction.mdx @@ -11,21 +11,21 @@ As a developer, you need data to build and power your dapp. Querying and indexin On The Graph, you can: -1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). -2. Use GraphQL to query existing subgraphs. +1. Create, deploy, and publish Subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). +2. Use GraphQL to query existing Subgraphs. ### What is GraphQL? -- [GraphQL](https://graphql.org/learn/) is the query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query subgraphs. +- [GraphQL](https://graphql.org/learn/) is the query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query Subgraphs. ### Developer Actions -- Query subgraphs built by other developers in [The Graph Network](https://thegraph.com/explorer) and integrate them into your own dapps. -- Create custom subgraphs to fulfill specific data needs, allowing improved scalability and flexibility for other developers. -- Deploy, publish and signal your subgraphs within The Graph Network. +- Query Subgraphs built by other developers in [The Graph Network](https://thegraph.com/explorer) and integrate them into your own dapps. +- Create custom Subgraphs to fulfill specific data needs, allowing improved scalability and flexibility for other developers. +- Deploy, publish and signal your Subgraphs within The Graph Network. -### What are subgraphs? +### What are Subgraphs? -A subgraph is a custom API built on blockchain data. It extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. +A Subgraph is a custom API built on blockchain data. It extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. -Check out the documentation on [subgraphs](/subgraphs/developing/subgraphs/) to learn specifics. +Check out the documentation on [Subgraphs](/subgraphs/developing/subgraphs/) to learn specifics. From 2045d1707c3370b1367b471e007b486f2c23d975 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:24:17 -0500 Subject: [PATCH 0909/1789] New translations introduction.mdx (Marathi) --- .../mr/subgraphs/developing/introduction.mdx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/website/src/pages/mr/subgraphs/developing/introduction.mdx b/website/src/pages/mr/subgraphs/developing/introduction.mdx index 3123dd66f2a7..9b6155152843 100644 --- a/website/src/pages/mr/subgraphs/developing/introduction.mdx +++ b/website/src/pages/mr/subgraphs/developing/introduction.mdx @@ -11,21 +11,21 @@ As a developer, you need data to build and power your dapp. Querying and indexin On The Graph, you can: -1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). -2. Use GraphQL to query existing subgraphs. +1. Create, deploy, and publish Subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). +2. Use GraphQL to query existing Subgraphs. ### What is GraphQL? -- [GraphQL](https://graphql.org/learn/) is the query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query subgraphs. +- [GraphQL](https://graphql.org/learn/) is the query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query Subgraphs. ### Developer Actions -- Query subgraphs built by other developers in [The Graph Network](https://thegraph.com/explorer) and integrate them into your own dapps. -- Create custom subgraphs to fulfill specific data needs, allowing improved scalability and flexibility for other developers. -- Deploy, publish and signal your subgraphs within The Graph Network. +- Query Subgraphs built by other developers in [The Graph Network](https://thegraph.com/explorer) and integrate them into your own dapps. +- Create custom Subgraphs to fulfill specific data needs, allowing improved scalability and flexibility for other developers. +- Deploy, publish and signal your Subgraphs within The Graph Network. -### What are subgraphs? +### What are Subgraphs? -A subgraph is a custom API built on blockchain data. It extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. +A Subgraph is a custom API built on blockchain data. It extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. -Check out the documentation on [subgraphs](/subgraphs/developing/subgraphs/) to learn specifics. +Check out the documentation on [Subgraphs](/subgraphs/developing/subgraphs/) to learn specifics. From d4960923e5e1ce29c955617c1b90dd53576cf904 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:24:18 -0500 Subject: [PATCH 0910/1789] New translations introduction.mdx (Hindi) --- .../hi/subgraphs/developing/introduction.mdx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/website/src/pages/hi/subgraphs/developing/introduction.mdx b/website/src/pages/hi/subgraphs/developing/introduction.mdx index 12e2aba18447..e2f3b086bd88 100644 --- a/website/src/pages/hi/subgraphs/developing/introduction.mdx +++ b/website/src/pages/hi/subgraphs/developing/introduction.mdx @@ -11,21 +11,21 @@ To start coding right away, go to [Developer Quick Start](/subgraphs/quick-start The Graph पर, आप: -1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). -2. मौजूदा subgraphs को क्वेरी करने के लिए GraphQL का उपयोग करें। +1. Create, deploy, and publish Subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). +2. Use GraphQL to query existing Subgraphs. ### GraphQL क्या है? -- [GraphQL](https://graphql.org/learn/) is the query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query subgraphs. +- [GraphQL](https://graphql.org/learn/) is the query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query Subgraphs. ### डेवलपर क्रियाएँ -- Query subgraphs built by other developers in [The Graph Network](https://thegraph.com/explorer) and integrate them into your own dapps. -- विशिष्ट डेटा आवश्यकताओं को पूरा करने के लिए कस्टम सबग्राफ़ बनाएं, जिससे अन्य डेवलपर्स के लिए स्केलेबिलिटी और लचीलापन में सुधार हो सके। -- अपने subgraphs को The Graph Network में तैनात करें, प्रकाशित करें और संकेत दें। +- Query Subgraphs built by other developers in [The Graph Network](https://thegraph.com/explorer) and integrate them into your own dapps. +- Create custom Subgraphs to fulfill specific data needs, allowing improved scalability and flexibility for other developers. +- Deploy, publish and signal your Subgraphs within The Graph Network. -### सबग्राफ़ क्या हैं? +### What are Subgraphs? -एक Subgraph एक कस्टम API है जो ब्लॉकचेन डेटा पर आधारित होता है। यह ब्लॉकचेन से डेटा निकालता है, उसे प्रोसेस करता है, और उसे इस तरह से संग्रहित करता है कि उसे GraphQL के माध्यम से आसानी से क्वेरी किया जा सके। +A Subgraph is a custom API built on blockchain data. It extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. -Check out the documentation on [subgraphs](/subgraphs/developing/subgraphs/) to learn specifics. +Check out the documentation on [Subgraphs](/subgraphs/developing/subgraphs/) to learn specifics. From 74b1d1a678d5577855d8813d8e13c6e41cd2bd54 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:24:19 -0500 Subject: [PATCH 0911/1789] New translations introduction.mdx (Swahili) --- .../sw/subgraphs/developing/introduction.mdx | 31 +++++++++++++++++++ 1 file changed, 31 insertions(+) create mode 100644 website/src/pages/sw/subgraphs/developing/introduction.mdx diff --git a/website/src/pages/sw/subgraphs/developing/introduction.mdx b/website/src/pages/sw/subgraphs/developing/introduction.mdx new file mode 100644 index 000000000000..06bc2b76104d --- /dev/null +++ b/website/src/pages/sw/subgraphs/developing/introduction.mdx @@ -0,0 +1,31 @@ +--- +title: Introduction to Subgraph Development +sidebarTitle: Introduction +--- + +To start coding right away, go to [Developer Quick Start](/subgraphs/quick-start/). + +## Overview + +As a developer, you need data to build and power your dapp. Querying and indexing blockchain data is challenging, but The Graph provides a solution to this issue. + +On The Graph, you can: + +1. Create, deploy, and publish Subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). +2. Use GraphQL to query existing Subgraphs. + +### What is GraphQL? + +- [GraphQL](https://graphql.org/learn/) is the query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query Subgraphs. + +### Developer Actions + +- Query Subgraphs built by other developers in [The Graph Network](https://thegraph.com/explorer) and integrate them into your own dapps. +- Create custom Subgraphs to fulfill specific data needs, allowing improved scalability and flexibility for other developers. +- Deploy, publish and signal your Subgraphs within The Graph Network. + +### What are Subgraphs? + +A Subgraph is a custom API built on blockchain data. It extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. + +Check out the documentation on [Subgraphs](/subgraphs/developing/subgraphs/) to learn specifics. From f1e520ef4d00ce2dc6d742a51f9ec665c022a5de Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:24:20 -0500 Subject: [PATCH 0912/1789] New translations deleting-a-subgraph.mdx (Romanian) --- .../managing/deleting-a-subgraph.mdx | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/website/src/pages/ro/subgraphs/developing/managing/deleting-a-subgraph.mdx b/website/src/pages/ro/subgraphs/developing/managing/deleting-a-subgraph.mdx index 5a4ac15e07fd..b8c2330ca49d 100644 --- a/website/src/pages/ro/subgraphs/developing/managing/deleting-a-subgraph.mdx +++ b/website/src/pages/ro/subgraphs/developing/managing/deleting-a-subgraph.mdx @@ -2,30 +2,30 @@ title: Deleting a Subgraph --- -Delete your subgraph using [Subgraph Studio](https://thegraph.com/studio/). +Delete your Subgraph using [Subgraph Studio](https://thegraph.com/studio/). -> Deleting your subgraph will remove all published versions from The Graph Network, but it will remain visible on Graph Explorer and Subgraph Studio for users who have signaled on it. +> Deleting your Subgraph will remove all published versions from The Graph Network, but it will remain visible on Graph Explorer and Subgraph Studio for users who have signaled on it. ## Step-by-Step -1. Visit the subgraph's page on [Subgraph Studio](https://thegraph.com/studio/). +1. Visit the Subgraph's page on [Subgraph Studio](https://thegraph.com/studio/). 2. Click on the three-dots to the right of the "publish" button. -3. Click on the option to "delete this subgraph": +3. Click on the option to "delete this Subgraph": ![Delete-subgraph](/img/Delete-subgraph.png) -4. Depending on the subgraph's status, you will be prompted with various options. +4. Depending on the Subgraph's status, you will be prompted with various options. - - If the subgraph is not published, simply click “delete” and confirm. - - If the subgraph is published, you will need to confirm on your wallet before the subgraph can be deleted from Studio. If a subgraph is published to multiple networks, such as testnet and mainnet, additional steps may be required. + - If the Subgraph is not published, simply click “delete” and confirm. + - If the Subgraph is published, you will need to confirm on your wallet before the Subgraph can be deleted from Studio. If a Subgraph is published to multiple networks, such as testnet and mainnet, additional steps may be required. -> If the owner of the subgraph has signal on it, the signaled GRT will be returned to the owner. +> If the owner of the Subgraph has signal on it, the signaled GRT will be returned to the owner. ### Important Reminders -- Once you delete a subgraph, it will **not** appear on Graph Explorer's homepage. However, users who have signaled on it will still be able to view it on their profile pages and remove their signal. -- Curators will not be able to signal on the subgraph anymore. -- Curators that already signaled on the subgraph can withdraw their signal at an average share price. -- Deleted subgraphs will show an error message. +- Once you delete a Subgraph, it will **not** appear on Graph Explorer's homepage. However, users who have signaled on it will still be able to view it on their profile pages and remove their signal. +- Curators will not be able to signal on the Subgraph anymore. +- Curators that already signaled on the Subgraph can withdraw their signal at an average share price. +- Deleted Subgraphs will show an error message. From d963e7859e2d3410f1c8948e6c61d3a3abaa4691 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:24:21 -0500 Subject: [PATCH 0913/1789] New translations deleting-a-subgraph.mdx (French) --- .../managing/deleting-a-subgraph.mdx | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/website/src/pages/fr/subgraphs/developing/managing/deleting-a-subgraph.mdx b/website/src/pages/fr/subgraphs/developing/managing/deleting-a-subgraph.mdx index c74be2b234dd..4d9ff9807b70 100644 --- a/website/src/pages/fr/subgraphs/developing/managing/deleting-a-subgraph.mdx +++ b/website/src/pages/fr/subgraphs/developing/managing/deleting-a-subgraph.mdx @@ -2,30 +2,30 @@ title: Suppression d'un Subgraph --- -Supprimez votre subgraph en utilisant [Subgraph Studio](https://thegraph.com/studio/). +Delete your Subgraph using [Subgraph Studio](https://thegraph.com/studio/). -> En supprimant votre subgraph, vous supprimez toutes les versions publiées de The Graph Network, mais il restera visible sur Graph Explorer et Subgraph Studio pour les utilisateurs qui l'ont signalé. +> Deleting your Subgraph will remove all published versions from The Graph Network, but it will remain visible on Graph Explorer and Subgraph Studio for users who have signaled on it. ## Étape par Étape -1. Visitez la page du subgraph sur [Subgraph Studio](https://thegraph.com/studio/). +1. Visit the Subgraph's page on [Subgraph Studio](https://thegraph.com/studio/). 2. Cliquez sur les trois points à droite du bouton "publier". -3. Cliquez sur l'option "delete this subgraph": +3. Click on the option to "delete this Subgraph": ![Delete-subgraph](/img/Delete-subgraph.png) -4. En fonction de l'état du subgraph, différentes options vous seront proposées. +4. Depending on the Subgraph's status, you will be prompted with various options. - - Si le subgraph n'est pas publié, il suffit de cliquer sur “delete“ et de confirmer. - - Si le subgraph est publié, vous devrez le confirmer sur votre portefeuille avant de pouvoir le supprimer de Studio. Si un subgraph est publié sur plusieurs réseaux, tels que testnet et mainnet, des étapes supplémentaires peuvent être nécessaires. + - If the Subgraph is not published, simply click “delete” and confirm. + - If the Subgraph is published, you will need to confirm on your wallet before the Subgraph can be deleted from Studio. If a Subgraph is published to multiple networks, such as testnet and mainnet, additional steps may be required. -> Si le propriétaire du subgraph l'a signalé, les GRT signalés seront renvoyés au propriétaire. +> If the owner of the Subgraph has signal on it, the signaled GRT will be returned to the owner. ### Rappels importants -- Une fois que vous avez supprimé un subgraph, il **n'apparaîtra plus** sur la page d'accueil de Graph Explorer. Toutefois, les utilisateurs qui ont signalé sur ce subgraph pourront toujours le voir sur leurs pages de profil et supprimer leur signal. -- Les curateurs ne seront plus en mesure de signaler le subgraph. -- Les Curateurs qui ont déjà signalé sur le subgraph peuvent retirer leur signal à un prix moyen par action. -- Les subgraphs supprimés afficheront un message d'erreur. +- Once you delete a Subgraph, it will **not** appear on Graph Explorer's homepage. However, users who have signaled on it will still be able to view it on their profile pages and remove their signal. +- Curators will not be able to signal on the Subgraph anymore. +- Curators that already signaled on the Subgraph can withdraw their signal at an average share price. +- Deleted Subgraphs will show an error message. From 2488690ce6e3e6b4bf11039b633af0e2d66a1220 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:24:22 -0500 Subject: [PATCH 0914/1789] New translations deleting-a-subgraph.mdx (Spanish) --- .../managing/deleting-a-subgraph.mdx | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/website/src/pages/es/subgraphs/developing/managing/deleting-a-subgraph.mdx b/website/src/pages/es/subgraphs/developing/managing/deleting-a-subgraph.mdx index 972a4f552c25..b8c2330ca49d 100644 --- a/website/src/pages/es/subgraphs/developing/managing/deleting-a-subgraph.mdx +++ b/website/src/pages/es/subgraphs/developing/managing/deleting-a-subgraph.mdx @@ -2,30 +2,30 @@ title: Deleting a Subgraph --- -Delete your subgraph using [Subgraph Studio](https://thegraph.com/studio/). +Delete your Subgraph using [Subgraph Studio](https://thegraph.com/studio/). -> Deleting your subgraph will remove all published versions from The Graph Network, but it will remain visible on Graph Explorer and Subgraph Studio for users who have signaled on it. +> Deleting your Subgraph will remove all published versions from The Graph Network, but it will remain visible on Graph Explorer and Subgraph Studio for users who have signaled on it. ## Step-by-Step -1. Visit the subgraph's page on [Subgraph Studio](https://thegraph.com/studio/). +1. Visit the Subgraph's page on [Subgraph Studio](https://thegraph.com/studio/). 2. Click on the three-dots to the right of the "publish" button. -3. Click on the option to "delete this subgraph": +3. Click on the option to "delete this Subgraph": ![Delete-subgraph](/img/Delete-subgraph.png) -4. Depending on the subgraph's status, you will be prompted with various options. +4. Depending on the Subgraph's status, you will be prompted with various options. - - If the subgraph is not published, simply click “delete” and confirm. - - If the subgraph is published, you will need to confirm on your wallet before the subgraph can be deleted from Studio. If a subgraph is published to multiple networks, such as testnet and mainnet, additional steps may be required. + - If the Subgraph is not published, simply click “delete” and confirm. + - If the Subgraph is published, you will need to confirm on your wallet before the Subgraph can be deleted from Studio. If a Subgraph is published to multiple networks, such as testnet and mainnet, additional steps may be required. -> If the owner of the subgraph has signal on it, the signaled GRT will be returned to the owner. +> If the owner of the Subgraph has signal on it, the signaled GRT will be returned to the owner. ### Important Reminders -- Once you delete a subgraph, it will **not** appear on Graph Explorer's homepage. However, users who have signaled on it will still be able to view it on their profile pages and remove their signal. -- Los Curadores ya no podrán señalar en el subgrafo. -- Curators that already signaled on the subgraph can withdraw their signal at an average share price. -- Deleted subgraphs will show an error message. +- Once you delete a Subgraph, it will **not** appear on Graph Explorer's homepage. However, users who have signaled on it will still be able to view it on their profile pages and remove their signal. +- Curators will not be able to signal on the Subgraph anymore. +- Curators that already signaled on the Subgraph can withdraw their signal at an average share price. +- Deleted Subgraphs will show an error message. From 6dda46d0098e06b58eadca50c3ef85a1955ed1fa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:24:23 -0500 Subject: [PATCH 0915/1789] New translations deleting-a-subgraph.mdx (Arabic) --- .../managing/deleting-a-subgraph.mdx | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/website/src/pages/ar/subgraphs/developing/managing/deleting-a-subgraph.mdx b/website/src/pages/ar/subgraphs/developing/managing/deleting-a-subgraph.mdx index 5a4ac15e07fd..b8c2330ca49d 100644 --- a/website/src/pages/ar/subgraphs/developing/managing/deleting-a-subgraph.mdx +++ b/website/src/pages/ar/subgraphs/developing/managing/deleting-a-subgraph.mdx @@ -2,30 +2,30 @@ title: Deleting a Subgraph --- -Delete your subgraph using [Subgraph Studio](https://thegraph.com/studio/). +Delete your Subgraph using [Subgraph Studio](https://thegraph.com/studio/). -> Deleting your subgraph will remove all published versions from The Graph Network, but it will remain visible on Graph Explorer and Subgraph Studio for users who have signaled on it. +> Deleting your Subgraph will remove all published versions from The Graph Network, but it will remain visible on Graph Explorer and Subgraph Studio for users who have signaled on it. ## Step-by-Step -1. Visit the subgraph's page on [Subgraph Studio](https://thegraph.com/studio/). +1. Visit the Subgraph's page on [Subgraph Studio](https://thegraph.com/studio/). 2. Click on the three-dots to the right of the "publish" button. -3. Click on the option to "delete this subgraph": +3. Click on the option to "delete this Subgraph": ![Delete-subgraph](/img/Delete-subgraph.png) -4. Depending on the subgraph's status, you will be prompted with various options. +4. Depending on the Subgraph's status, you will be prompted with various options. - - If the subgraph is not published, simply click “delete” and confirm. - - If the subgraph is published, you will need to confirm on your wallet before the subgraph can be deleted from Studio. If a subgraph is published to multiple networks, such as testnet and mainnet, additional steps may be required. + - If the Subgraph is not published, simply click “delete” and confirm. + - If the Subgraph is published, you will need to confirm on your wallet before the Subgraph can be deleted from Studio. If a Subgraph is published to multiple networks, such as testnet and mainnet, additional steps may be required. -> If the owner of the subgraph has signal on it, the signaled GRT will be returned to the owner. +> If the owner of the Subgraph has signal on it, the signaled GRT will be returned to the owner. ### Important Reminders -- Once you delete a subgraph, it will **not** appear on Graph Explorer's homepage. However, users who have signaled on it will still be able to view it on their profile pages and remove their signal. -- Curators will not be able to signal on the subgraph anymore. -- Curators that already signaled on the subgraph can withdraw their signal at an average share price. -- Deleted subgraphs will show an error message. +- Once you delete a Subgraph, it will **not** appear on Graph Explorer's homepage. However, users who have signaled on it will still be able to view it on their profile pages and remove their signal. +- Curators will not be able to signal on the Subgraph anymore. +- Curators that already signaled on the Subgraph can withdraw their signal at an average share price. +- Deleted Subgraphs will show an error message. From 249c8a0cdaa24429ac2bd5bd7c9f62e9ca6e45b6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:24:24 -0500 Subject: [PATCH 0916/1789] New translations deleting-a-subgraph.mdx (Czech) --- .../managing/deleting-a-subgraph.mdx | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/website/src/pages/cs/subgraphs/developing/managing/deleting-a-subgraph.mdx b/website/src/pages/cs/subgraphs/developing/managing/deleting-a-subgraph.mdx index 77896e36a45d..b8c2330ca49d 100644 --- a/website/src/pages/cs/subgraphs/developing/managing/deleting-a-subgraph.mdx +++ b/website/src/pages/cs/subgraphs/developing/managing/deleting-a-subgraph.mdx @@ -2,30 +2,30 @@ title: Deleting a Subgraph --- -Delete your subgraph using [Subgraph Studio](https://thegraph.com/studio/). +Delete your Subgraph using [Subgraph Studio](https://thegraph.com/studio/). -> Deleting your subgraph will remove all published versions from The Graph Network, but it will remain visible on Graph Explorer and Subgraph Studio for users who have signaled on it. +> Deleting your Subgraph will remove all published versions from The Graph Network, but it will remain visible on Graph Explorer and Subgraph Studio for users who have signaled on it. ## Step-by-Step -1. Visit the subgraph's page on [Subgraph Studio](https://thegraph.com/studio/). +1. Visit the Subgraph's page on [Subgraph Studio](https://thegraph.com/studio/). 2. Click on the three-dots to the right of the "publish" button. -3. Click on the option to "delete this subgraph": +3. Click on the option to "delete this Subgraph": ![Delete-subgraph](/img/Delete-subgraph.png) -4. Depending on the subgraph's status, you will be prompted with various options. +4. Depending on the Subgraph's status, you will be prompted with various options. - - If the subgraph is not published, simply click “delete” and confirm. - - If the subgraph is published, you will need to confirm on your wallet before the subgraph can be deleted from Studio. If a subgraph is published to multiple networks, such as testnet and mainnet, additional steps may be required. + - If the Subgraph is not published, simply click “delete” and confirm. + - If the Subgraph is published, you will need to confirm on your wallet before the Subgraph can be deleted from Studio. If a Subgraph is published to multiple networks, such as testnet and mainnet, additional steps may be required. -> If the owner of the subgraph has signal on it, the signaled GRT will be returned to the owner. +> If the owner of the Subgraph has signal on it, the signaled GRT will be returned to the owner. ### Important Reminders -- Once you delete a subgraph, it will **not** appear on Graph Explorer's homepage. However, users who have signaled on it will still be able to view it on their profile pages and remove their signal. -- Kurátoři již nebudou moci signalizovat na podgrafu. -- Curators that already signaled on the subgraph can withdraw their signal at an average share price. -- Deleted subgraphs will show an error message. +- Once you delete a Subgraph, it will **not** appear on Graph Explorer's homepage. However, users who have signaled on it will still be able to view it on their profile pages and remove their signal. +- Curators will not be able to signal on the Subgraph anymore. +- Curators that already signaled on the Subgraph can withdraw their signal at an average share price. +- Deleted Subgraphs will show an error message. From bf9875799edd3c035bd92df455ce244658c22c16 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:24:25 -0500 Subject: [PATCH 0917/1789] New translations deleting-a-subgraph.mdx (German) --- .../managing/deleting-a-subgraph.mdx | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/website/src/pages/de/subgraphs/developing/managing/deleting-a-subgraph.mdx b/website/src/pages/de/subgraphs/developing/managing/deleting-a-subgraph.mdx index 91c22f7c44ba..e01d84c31aee 100644 --- a/website/src/pages/de/subgraphs/developing/managing/deleting-a-subgraph.mdx +++ b/website/src/pages/de/subgraphs/developing/managing/deleting-a-subgraph.mdx @@ -2,30 +2,30 @@ title: Deleting a Subgraph --- -Delete your subgraph using [Subgraph Studio](https://thegraph.com/studio/). +Delete your Subgraph using [Subgraph Studio](https://thegraph.com/studio/). -> Deleting your subgraph will remove all published versions from The Graph Network, but it will remain visible on Graph Explorer and Subgraph Studio for users who have signaled on it. +> Deleting your Subgraph will remove all published versions from The Graph Network, but it will remain visible on Graph Explorer and Subgraph Studio for users who have signaled on it. ## Schritt für Schritt -1. Visit the subgraph's page on [Subgraph Studio](https://thegraph.com/studio/). +1. Visit the Subgraph's page on [Subgraph Studio](https://thegraph.com/studio/). 2. Click on the three-dots to the right of the "publish" button. -3. Click on the option to "delete this subgraph": +3. Click on the option to "delete this Subgraph": ![Delete-subgraph](/img/Delete-subgraph.png) -4. Depending on the subgraph's status, you will be prompted with various options. +4. Depending on the Subgraph's status, you will be prompted with various options. - - If the subgraph is not published, simply click “delete” and confirm. - - If the subgraph is published, you will need to confirm on your wallet before the subgraph can be deleted from Studio. If a subgraph is published to multiple networks, such as testnet and mainnet, additional steps may be required. + - If the Subgraph is not published, simply click “delete” and confirm. + - If the Subgraph is published, you will need to confirm on your wallet before the Subgraph can be deleted from Studio. If a Subgraph is published to multiple networks, such as testnet and mainnet, additional steps may be required. -> If the owner of the subgraph has signal on it, the signaled GRT will be returned to the owner. +> If the owner of the Subgraph has signal on it, the signaled GRT will be returned to the owner. ### Important Reminders -- Once you delete a subgraph, it will **not** appear on Graph Explorer's homepage. However, users who have signaled on it will still be able to view it on their profile pages and remove their signal. -- Curators will not be able to signal on the subgraph anymore. -- Curators that already signaled on the subgraph can withdraw their signal at an average share price. -- Deleted subgraphs will show an error message. +- Once you delete a Subgraph, it will **not** appear on Graph Explorer's homepage. However, users who have signaled on it will still be able to view it on their profile pages and remove their signal. +- Curators will not be able to signal on the Subgraph anymore. +- Curators that already signaled on the Subgraph can withdraw their signal at an average share price. +- Deleted Subgraphs will show an error message. From 955a4993db1df341b684366fc11d7b8b20362066 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:24:27 -0500 Subject: [PATCH 0918/1789] New translations deleting-a-subgraph.mdx (Italian) --- .../managing/deleting-a-subgraph.mdx | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/website/src/pages/it/subgraphs/developing/managing/deleting-a-subgraph.mdx b/website/src/pages/it/subgraphs/developing/managing/deleting-a-subgraph.mdx index 90a2eb4b7d33..b8c2330ca49d 100644 --- a/website/src/pages/it/subgraphs/developing/managing/deleting-a-subgraph.mdx +++ b/website/src/pages/it/subgraphs/developing/managing/deleting-a-subgraph.mdx @@ -2,30 +2,30 @@ title: Deleting a Subgraph --- -Delete your subgraph using [Subgraph Studio](https://thegraph.com/studio/). +Delete your Subgraph using [Subgraph Studio](https://thegraph.com/studio/). -> Deleting your subgraph will remove all published versions from The Graph Network, but it will remain visible on Graph Explorer and Subgraph Studio for users who have signaled on it. +> Deleting your Subgraph will remove all published versions from The Graph Network, but it will remain visible on Graph Explorer and Subgraph Studio for users who have signaled on it. ## Step-by-Step -1. Visit the subgraph's page on [Subgraph Studio](https://thegraph.com/studio/). +1. Visit the Subgraph's page on [Subgraph Studio](https://thegraph.com/studio/). 2. Click on the three-dots to the right of the "publish" button. -3. Click on the option to "delete this subgraph": +3. Click on the option to "delete this Subgraph": ![Delete-subgraph](/img/Delete-subgraph.png) -4. Depending on the subgraph's status, you will be prompted with various options. +4. Depending on the Subgraph's status, you will be prompted with various options. - - If the subgraph is not published, simply click “delete” and confirm. - - If the subgraph is published, you will need to confirm on your wallet before the subgraph can be deleted from Studio. If a subgraph is published to multiple networks, such as testnet and mainnet, additional steps may be required. + - If the Subgraph is not published, simply click “delete” and confirm. + - If the Subgraph is published, you will need to confirm on your wallet before the Subgraph can be deleted from Studio. If a Subgraph is published to multiple networks, such as testnet and mainnet, additional steps may be required. -> If the owner of the subgraph has signal on it, the signaled GRT will be returned to the owner. +> If the owner of the Subgraph has signal on it, the signaled GRT will be returned to the owner. ### Important Reminders -- Once you delete a subgraph, it will **not** appear on Graph Explorer's homepage. However, users who have signaled on it will still be able to view it on their profile pages and remove their signal. -- I Curator non potranno più segnalare il subgraph. -- Curators that already signaled on the subgraph can withdraw their signal at an average share price. -- Deleted subgraphs will show an error message. +- Once you delete a Subgraph, it will **not** appear on Graph Explorer's homepage. However, users who have signaled on it will still be able to view it on their profile pages and remove their signal. +- Curators will not be able to signal on the Subgraph anymore. +- Curators that already signaled on the Subgraph can withdraw their signal at an average share price. +- Deleted Subgraphs will show an error message. From fabfcc73bd8c8fe0b4ae1e63303451d844025336 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:24:28 -0500 Subject: [PATCH 0919/1789] New translations deleting-a-subgraph.mdx (Japanese) --- .../managing/deleting-a-subgraph.mdx | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/website/src/pages/ja/subgraphs/developing/managing/deleting-a-subgraph.mdx b/website/src/pages/ja/subgraphs/developing/managing/deleting-a-subgraph.mdx index 6a9aef388d02..b8c2330ca49d 100644 --- a/website/src/pages/ja/subgraphs/developing/managing/deleting-a-subgraph.mdx +++ b/website/src/pages/ja/subgraphs/developing/managing/deleting-a-subgraph.mdx @@ -2,30 +2,30 @@ title: Deleting a Subgraph --- -Delete your subgraph using [Subgraph Studio](https://thegraph.com/studio/). +Delete your Subgraph using [Subgraph Studio](https://thegraph.com/studio/). -> Deleting your subgraph will remove all published versions from The Graph Network, but it will remain visible on Graph Explorer and Subgraph Studio for users who have signaled on it. +> Deleting your Subgraph will remove all published versions from The Graph Network, but it will remain visible on Graph Explorer and Subgraph Studio for users who have signaled on it. ## Step-by-Step -1. Visit the subgraph's page on [Subgraph Studio](https://thegraph.com/studio/). +1. Visit the Subgraph's page on [Subgraph Studio](https://thegraph.com/studio/). 2. Click on the three-dots to the right of the "publish" button. -3. Click on the option to "delete this subgraph": +3. Click on the option to "delete this Subgraph": ![Delete-subgraph](/img/Delete-subgraph.png) -4. Depending on the subgraph's status, you will be prompted with various options. +4. Depending on the Subgraph's status, you will be prompted with various options. - - If the subgraph is not published, simply click “delete” and confirm. - - If the subgraph is published, you will need to confirm on your wallet before the subgraph can be deleted from Studio. If a subgraph is published to multiple networks, such as testnet and mainnet, additional steps may be required. + - If the Subgraph is not published, simply click “delete” and confirm. + - If the Subgraph is published, you will need to confirm on your wallet before the Subgraph can be deleted from Studio. If a Subgraph is published to multiple networks, such as testnet and mainnet, additional steps may be required. -> If the owner of the subgraph has signal on it, the signaled GRT will be returned to the owner. +> If the owner of the Subgraph has signal on it, the signaled GRT will be returned to the owner. ### Important Reminders -- Once you delete a subgraph, it will **not** appear on Graph Explorer's homepage. However, users who have signaled on it will still be able to view it on their profile pages and remove their signal. -- キュレーターは、サブグラフにシグナルを送ることができなくなります。 -- Curators that already signaled on the subgraph can withdraw their signal at an average share price. -- Deleted subgraphs will show an error message. +- Once you delete a Subgraph, it will **not** appear on Graph Explorer's homepage. However, users who have signaled on it will still be able to view it on their profile pages and remove their signal. +- Curators will not be able to signal on the Subgraph anymore. +- Curators that already signaled on the Subgraph can withdraw their signal at an average share price. +- Deleted Subgraphs will show an error message. From 0c5d30967c227d974df49a03b549c32e42a4e011 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:24:29 -0500 Subject: [PATCH 0920/1789] New translations deleting-a-subgraph.mdx (Korean) --- .../managing/deleting-a-subgraph.mdx | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/website/src/pages/ko/subgraphs/developing/managing/deleting-a-subgraph.mdx b/website/src/pages/ko/subgraphs/developing/managing/deleting-a-subgraph.mdx index 5a4ac15e07fd..b8c2330ca49d 100644 --- a/website/src/pages/ko/subgraphs/developing/managing/deleting-a-subgraph.mdx +++ b/website/src/pages/ko/subgraphs/developing/managing/deleting-a-subgraph.mdx @@ -2,30 +2,30 @@ title: Deleting a Subgraph --- -Delete your subgraph using [Subgraph Studio](https://thegraph.com/studio/). +Delete your Subgraph using [Subgraph Studio](https://thegraph.com/studio/). -> Deleting your subgraph will remove all published versions from The Graph Network, but it will remain visible on Graph Explorer and Subgraph Studio for users who have signaled on it. +> Deleting your Subgraph will remove all published versions from The Graph Network, but it will remain visible on Graph Explorer and Subgraph Studio for users who have signaled on it. ## Step-by-Step -1. Visit the subgraph's page on [Subgraph Studio](https://thegraph.com/studio/). +1. Visit the Subgraph's page on [Subgraph Studio](https://thegraph.com/studio/). 2. Click on the three-dots to the right of the "publish" button. -3. Click on the option to "delete this subgraph": +3. Click on the option to "delete this Subgraph": ![Delete-subgraph](/img/Delete-subgraph.png) -4. Depending on the subgraph's status, you will be prompted with various options. +4. Depending on the Subgraph's status, you will be prompted with various options. - - If the subgraph is not published, simply click “delete” and confirm. - - If the subgraph is published, you will need to confirm on your wallet before the subgraph can be deleted from Studio. If a subgraph is published to multiple networks, such as testnet and mainnet, additional steps may be required. + - If the Subgraph is not published, simply click “delete” and confirm. + - If the Subgraph is published, you will need to confirm on your wallet before the Subgraph can be deleted from Studio. If a Subgraph is published to multiple networks, such as testnet and mainnet, additional steps may be required. -> If the owner of the subgraph has signal on it, the signaled GRT will be returned to the owner. +> If the owner of the Subgraph has signal on it, the signaled GRT will be returned to the owner. ### Important Reminders -- Once you delete a subgraph, it will **not** appear on Graph Explorer's homepage. However, users who have signaled on it will still be able to view it on their profile pages and remove their signal. -- Curators will not be able to signal on the subgraph anymore. -- Curators that already signaled on the subgraph can withdraw their signal at an average share price. -- Deleted subgraphs will show an error message. +- Once you delete a Subgraph, it will **not** appear on Graph Explorer's homepage. However, users who have signaled on it will still be able to view it on their profile pages and remove their signal. +- Curators will not be able to signal on the Subgraph anymore. +- Curators that already signaled on the Subgraph can withdraw their signal at an average share price. +- Deleted Subgraphs will show an error message. From 406e949a43b85753f3750a54292794e890fde365 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:24:30 -0500 Subject: [PATCH 0921/1789] New translations deleting-a-subgraph.mdx (Dutch) --- .../managing/deleting-a-subgraph.mdx | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/website/src/pages/nl/subgraphs/developing/managing/deleting-a-subgraph.mdx b/website/src/pages/nl/subgraphs/developing/managing/deleting-a-subgraph.mdx index 5a4ac15e07fd..b8c2330ca49d 100644 --- a/website/src/pages/nl/subgraphs/developing/managing/deleting-a-subgraph.mdx +++ b/website/src/pages/nl/subgraphs/developing/managing/deleting-a-subgraph.mdx @@ -2,30 +2,30 @@ title: Deleting a Subgraph --- -Delete your subgraph using [Subgraph Studio](https://thegraph.com/studio/). +Delete your Subgraph using [Subgraph Studio](https://thegraph.com/studio/). -> Deleting your subgraph will remove all published versions from The Graph Network, but it will remain visible on Graph Explorer and Subgraph Studio for users who have signaled on it. +> Deleting your Subgraph will remove all published versions from The Graph Network, but it will remain visible on Graph Explorer and Subgraph Studio for users who have signaled on it. ## Step-by-Step -1. Visit the subgraph's page on [Subgraph Studio](https://thegraph.com/studio/). +1. Visit the Subgraph's page on [Subgraph Studio](https://thegraph.com/studio/). 2. Click on the three-dots to the right of the "publish" button. -3. Click on the option to "delete this subgraph": +3. Click on the option to "delete this Subgraph": ![Delete-subgraph](/img/Delete-subgraph.png) -4. Depending on the subgraph's status, you will be prompted with various options. +4. Depending on the Subgraph's status, you will be prompted with various options. - - If the subgraph is not published, simply click “delete” and confirm. - - If the subgraph is published, you will need to confirm on your wallet before the subgraph can be deleted from Studio. If a subgraph is published to multiple networks, such as testnet and mainnet, additional steps may be required. + - If the Subgraph is not published, simply click “delete” and confirm. + - If the Subgraph is published, you will need to confirm on your wallet before the Subgraph can be deleted from Studio. If a Subgraph is published to multiple networks, such as testnet and mainnet, additional steps may be required. -> If the owner of the subgraph has signal on it, the signaled GRT will be returned to the owner. +> If the owner of the Subgraph has signal on it, the signaled GRT will be returned to the owner. ### Important Reminders -- Once you delete a subgraph, it will **not** appear on Graph Explorer's homepage. However, users who have signaled on it will still be able to view it on their profile pages and remove their signal. -- Curators will not be able to signal on the subgraph anymore. -- Curators that already signaled on the subgraph can withdraw their signal at an average share price. -- Deleted subgraphs will show an error message. +- Once you delete a Subgraph, it will **not** appear on Graph Explorer's homepage. However, users who have signaled on it will still be able to view it on their profile pages and remove their signal. +- Curators will not be able to signal on the Subgraph anymore. +- Curators that already signaled on the Subgraph can withdraw their signal at an average share price. +- Deleted Subgraphs will show an error message. From a53ae67951c662e132ebfd1d87d1b09ff0823725 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:24:31 -0500 Subject: [PATCH 0922/1789] New translations deleting-a-subgraph.mdx (Polish) --- .../managing/deleting-a-subgraph.mdx | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/website/src/pages/pl/subgraphs/developing/managing/deleting-a-subgraph.mdx b/website/src/pages/pl/subgraphs/developing/managing/deleting-a-subgraph.mdx index 5a4ac15e07fd..b8c2330ca49d 100644 --- a/website/src/pages/pl/subgraphs/developing/managing/deleting-a-subgraph.mdx +++ b/website/src/pages/pl/subgraphs/developing/managing/deleting-a-subgraph.mdx @@ -2,30 +2,30 @@ title: Deleting a Subgraph --- -Delete your subgraph using [Subgraph Studio](https://thegraph.com/studio/). +Delete your Subgraph using [Subgraph Studio](https://thegraph.com/studio/). -> Deleting your subgraph will remove all published versions from The Graph Network, but it will remain visible on Graph Explorer and Subgraph Studio for users who have signaled on it. +> Deleting your Subgraph will remove all published versions from The Graph Network, but it will remain visible on Graph Explorer and Subgraph Studio for users who have signaled on it. ## Step-by-Step -1. Visit the subgraph's page on [Subgraph Studio](https://thegraph.com/studio/). +1. Visit the Subgraph's page on [Subgraph Studio](https://thegraph.com/studio/). 2. Click on the three-dots to the right of the "publish" button. -3. Click on the option to "delete this subgraph": +3. Click on the option to "delete this Subgraph": ![Delete-subgraph](/img/Delete-subgraph.png) -4. Depending on the subgraph's status, you will be prompted with various options. +4. Depending on the Subgraph's status, you will be prompted with various options. - - If the subgraph is not published, simply click “delete” and confirm. - - If the subgraph is published, you will need to confirm on your wallet before the subgraph can be deleted from Studio. If a subgraph is published to multiple networks, such as testnet and mainnet, additional steps may be required. + - If the Subgraph is not published, simply click “delete” and confirm. + - If the Subgraph is published, you will need to confirm on your wallet before the Subgraph can be deleted from Studio. If a Subgraph is published to multiple networks, such as testnet and mainnet, additional steps may be required. -> If the owner of the subgraph has signal on it, the signaled GRT will be returned to the owner. +> If the owner of the Subgraph has signal on it, the signaled GRT will be returned to the owner. ### Important Reminders -- Once you delete a subgraph, it will **not** appear on Graph Explorer's homepage. However, users who have signaled on it will still be able to view it on their profile pages and remove their signal. -- Curators will not be able to signal on the subgraph anymore. -- Curators that already signaled on the subgraph can withdraw their signal at an average share price. -- Deleted subgraphs will show an error message. +- Once you delete a Subgraph, it will **not** appear on Graph Explorer's homepage. However, users who have signaled on it will still be able to view it on their profile pages and remove their signal. +- Curators will not be able to signal on the Subgraph anymore. +- Curators that already signaled on the Subgraph can withdraw their signal at an average share price. +- Deleted Subgraphs will show an error message. From 2ff33067ead2671170bd1b85e4e49e5c75d6326c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:24:32 -0500 Subject: [PATCH 0923/1789] New translations deleting-a-subgraph.mdx (Portuguese) --- .../managing/deleting-a-subgraph.mdx | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/website/src/pages/pt/subgraphs/developing/managing/deleting-a-subgraph.mdx b/website/src/pages/pt/subgraphs/developing/managing/deleting-a-subgraph.mdx index d5305fe2cfbe..a792aa2669bf 100644 --- a/website/src/pages/pt/subgraphs/developing/managing/deleting-a-subgraph.mdx +++ b/website/src/pages/pt/subgraphs/developing/managing/deleting-a-subgraph.mdx @@ -2,30 +2,30 @@ title: Deleting a Subgraph --- -Delete your subgraph using [Subgraph Studio](https://thegraph.com/studio/). +Delete your Subgraph using [Subgraph Studio](https://thegraph.com/studio/). -> Deleting your subgraph will remove all published versions from The Graph Network, but it will remain visible on Graph Explorer and Subgraph Studio for users who have signaled on it. +> Deleting your Subgraph will remove all published versions from The Graph Network, but it will remain visible on Graph Explorer and Subgraph Studio for users who have signaled on it. ## Passo a Passo -1. Visit the subgraph's page on [Subgraph Studio](https://thegraph.com/studio/). +1. Visit the Subgraph's page on [Subgraph Studio](https://thegraph.com/studio/). 2. Click on the three-dots to the right of the "publish" button. -3. Click on the option to "delete this subgraph": +3. Click on the option to "delete this Subgraph": ![Delete-subgraph](/img/Delete-subgraph.png) -4. Depending on the subgraph's status, you will be prompted with various options. +4. Depending on the Subgraph's status, you will be prompted with various options. - - If the subgraph is not published, simply click “delete” and confirm. - - If the subgraph is published, you will need to confirm on your wallet before the subgraph can be deleted from Studio. If a subgraph is published to multiple networks, such as testnet and mainnet, additional steps may be required. + - If the Subgraph is not published, simply click “delete” and confirm. + - If the Subgraph is published, you will need to confirm on your wallet before the Subgraph can be deleted from Studio. If a Subgraph is published to multiple networks, such as testnet and mainnet, additional steps may be required. -> If the owner of the subgraph has signal on it, the signaled GRT will be returned to the owner. +> If the owner of the Subgraph has signal on it, the signaled GRT will be returned to the owner. ### Important Reminders -- Once you delete a subgraph, it will **not** appear on Graph Explorer's homepage. However, users who have signaled on it will still be able to view it on their profile pages and remove their signal. -- Os curadores não poderão mais sinalizar no subgraph depreciado. -- Curadores que já sinalizaram no subgraph poderão retirar a sua sinalização a um preço de ação normal. -- Deleted subgraphs will show an error message. +- Once you delete a Subgraph, it will **not** appear on Graph Explorer's homepage. However, users who have signaled on it will still be able to view it on their profile pages and remove their signal. +- Curators will not be able to signal on the Subgraph anymore. +- Curators that already signaled on the Subgraph can withdraw their signal at an average share price. +- Deleted Subgraphs will show an error message. From 2f50f9eefbc053f11b3b2ae2537a20237c3e91fe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:24:33 -0500 Subject: [PATCH 0924/1789] New translations deleting-a-subgraph.mdx (Russian) --- .../managing/deleting-a-subgraph.mdx | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/website/src/pages/ru/subgraphs/developing/managing/deleting-a-subgraph.mdx b/website/src/pages/ru/subgraphs/developing/managing/deleting-a-subgraph.mdx index 5787620c079a..7d5a170783e0 100644 --- a/website/src/pages/ru/subgraphs/developing/managing/deleting-a-subgraph.mdx +++ b/website/src/pages/ru/subgraphs/developing/managing/deleting-a-subgraph.mdx @@ -2,30 +2,30 @@ title: Deleting a Subgraph --- -Delete your subgraph using [Subgraph Studio](https://thegraph.com/studio/). +Delete your Subgraph using [Subgraph Studio](https://thegraph.com/studio/). -> Deleting your subgraph will remove all published versions from The Graph Network, but it will remain visible on Graph Explorer and Subgraph Studio for users who have signaled on it. +> Deleting your Subgraph will remove all published versions from The Graph Network, but it will remain visible on Graph Explorer and Subgraph Studio for users who have signaled on it. ## Пошаговое руководство -1. Visit the subgraph's page on [Subgraph Studio](https://thegraph.com/studio/). +1. Visit the Subgraph's page on [Subgraph Studio](https://thegraph.com/studio/). 2. Click on the three-dots to the right of the "publish" button. -3. Click on the option to "delete this subgraph": +3. Click on the option to "delete this Subgraph": ![Delete-subgraph](/img/Delete-subgraph.png) -4. Depending on the subgraph's status, you will be prompted with various options. +4. Depending on the Subgraph's status, you will be prompted with various options. - - If the subgraph is not published, simply click “delete” and confirm. - - If the subgraph is published, you will need to confirm on your wallet before the subgraph can be deleted from Studio. If a subgraph is published to multiple networks, such as testnet and mainnet, additional steps may be required. + - If the Subgraph is not published, simply click “delete” and confirm. + - If the Subgraph is published, you will need to confirm on your wallet before the Subgraph can be deleted from Studio. If a Subgraph is published to multiple networks, such as testnet and mainnet, additional steps may be required. -> If the owner of the subgraph has signal on it, the signaled GRT will be returned to the owner. +> If the owner of the Subgraph has signal on it, the signaled GRT will be returned to the owner. ### Important Reminders -- Once you delete a subgraph, it will **not** appear on Graph Explorer's homepage. However, users who have signaled on it will still be able to view it on their profile pages and remove their signal. -- Кураторы больше не смогут сигналить на сабграф. -- Кураторы, уже подавшие сигнал на субграф, могут отозвать свой сигнал по средней цене доли. -- Deleted subgraphs will show an error message. +- Once you delete a Subgraph, it will **not** appear on Graph Explorer's homepage. However, users who have signaled on it will still be able to view it on their profile pages and remove their signal. +- Curators will not be able to signal on the Subgraph anymore. +- Curators that already signaled on the Subgraph can withdraw their signal at an average share price. +- Deleted Subgraphs will show an error message. From d6ee0a51f8da62e54c9ea05a711d97d5c919c8c4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:24:34 -0500 Subject: [PATCH 0925/1789] New translations deleting-a-subgraph.mdx (Swedish) --- .../managing/deleting-a-subgraph.mdx | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/website/src/pages/sv/subgraphs/developing/managing/deleting-a-subgraph.mdx b/website/src/pages/sv/subgraphs/developing/managing/deleting-a-subgraph.mdx index ae778febe161..b8c2330ca49d 100644 --- a/website/src/pages/sv/subgraphs/developing/managing/deleting-a-subgraph.mdx +++ b/website/src/pages/sv/subgraphs/developing/managing/deleting-a-subgraph.mdx @@ -2,30 +2,30 @@ title: Deleting a Subgraph --- -Delete your subgraph using [Subgraph Studio](https://thegraph.com/studio/). +Delete your Subgraph using [Subgraph Studio](https://thegraph.com/studio/). -> Deleting your subgraph will remove all published versions from The Graph Network, but it will remain visible on Graph Explorer and Subgraph Studio for users who have signaled on it. +> Deleting your Subgraph will remove all published versions from The Graph Network, but it will remain visible on Graph Explorer and Subgraph Studio for users who have signaled on it. ## Step-by-Step -1. Visit the subgraph's page on [Subgraph Studio](https://thegraph.com/studio/). +1. Visit the Subgraph's page on [Subgraph Studio](https://thegraph.com/studio/). 2. Click on the three-dots to the right of the "publish" button. -3. Click on the option to "delete this subgraph": +3. Click on the option to "delete this Subgraph": ![Delete-subgraph](/img/Delete-subgraph.png) -4. Depending on the subgraph's status, you will be prompted with various options. +4. Depending on the Subgraph's status, you will be prompted with various options. - - If the subgraph is not published, simply click “delete” and confirm. - - If the subgraph is published, you will need to confirm on your wallet before the subgraph can be deleted from Studio. If a subgraph is published to multiple networks, such as testnet and mainnet, additional steps may be required. + - If the Subgraph is not published, simply click “delete” and confirm. + - If the Subgraph is published, you will need to confirm on your wallet before the Subgraph can be deleted from Studio. If a Subgraph is published to multiple networks, such as testnet and mainnet, additional steps may be required. -> If the owner of the subgraph has signal on it, the signaled GRT will be returned to the owner. +> If the owner of the Subgraph has signal on it, the signaled GRT will be returned to the owner. ### Important Reminders -- Once you delete a subgraph, it will **not** appear on Graph Explorer's homepage. However, users who have signaled on it will still be able to view it on their profile pages and remove their signal. -- Kuratorer kommer inte längre kunna signalera på subgrafet. -- Curators that already signaled on the subgraph can withdraw their signal at an average share price. -- Deleted subgraphs will show an error message. +- Once you delete a Subgraph, it will **not** appear on Graph Explorer's homepage. However, users who have signaled on it will still be able to view it on their profile pages and remove their signal. +- Curators will not be able to signal on the Subgraph anymore. +- Curators that already signaled on the Subgraph can withdraw their signal at an average share price. +- Deleted Subgraphs will show an error message. From 0d856fb498b871a24381cb54366c7c6169b264a8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:24:35 -0500 Subject: [PATCH 0926/1789] New translations deleting-a-subgraph.mdx (Turkish) --- .../managing/deleting-a-subgraph.mdx | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/website/src/pages/tr/subgraphs/developing/managing/deleting-a-subgraph.mdx b/website/src/pages/tr/subgraphs/developing/managing/deleting-a-subgraph.mdx index e4564fc247f2..23574d11eff3 100644 --- a/website/src/pages/tr/subgraphs/developing/managing/deleting-a-subgraph.mdx +++ b/website/src/pages/tr/subgraphs/developing/managing/deleting-a-subgraph.mdx @@ -2,30 +2,30 @@ title: Deleting a Subgraph --- -Delete your subgraph using [Subgraph Studio](https://thegraph.com/studio/). +Delete your Subgraph using [Subgraph Studio](https://thegraph.com/studio/). -> Deleting your subgraph will remove all published versions from The Graph Network, but it will remain visible on Graph Explorer and Subgraph Studio for users who have signaled on it. +> Deleting your Subgraph will remove all published versions from The Graph Network, but it will remain visible on Graph Explorer and Subgraph Studio for users who have signaled on it. ## Adım Adım -1. Visit the subgraph's page on [Subgraph Studio](https://thegraph.com/studio/). +1. Visit the Subgraph's page on [Subgraph Studio](https://thegraph.com/studio/). 2. Click on the three-dots to the right of the "publish" button. -3. Click on the option to "delete this subgraph": +3. Click on the option to "delete this Subgraph": ![Delete-subgraph](/img/Delete-subgraph.png) -4. Depending on the subgraph's status, you will be prompted with various options. +4. Depending on the Subgraph's status, you will be prompted with various options. - - If the subgraph is not published, simply click “delete” and confirm. - - If the subgraph is published, you will need to confirm on your wallet before the subgraph can be deleted from Studio. If a subgraph is published to multiple networks, such as testnet and mainnet, additional steps may be required. + - If the Subgraph is not published, simply click “delete” and confirm. + - If the Subgraph is published, you will need to confirm on your wallet before the Subgraph can be deleted from Studio. If a Subgraph is published to multiple networks, such as testnet and mainnet, additional steps may be required. -> If the owner of the subgraph has signal on it, the signaled GRT will be returned to the owner. +> If the owner of the Subgraph has signal on it, the signaled GRT will be returned to the owner. ### Important Reminders -- Once you delete a subgraph, it will **not** appear on Graph Explorer's homepage. However, users who have signaled on it will still be able to view it on their profile pages and remove their signal. -- Curators will not be able to signal on the subgraph anymore. -- Subgraph'e halihazırda sinyal vermiş küratörler, sinyallerini ortalama hisse fiyatından geri çekebilir. -- Deleted subgraphs will show an error message. +- Once you delete a Subgraph, it will **not** appear on Graph Explorer's homepage. However, users who have signaled on it will still be able to view it on their profile pages and remove their signal. +- Curators will not be able to signal on the Subgraph anymore. +- Curators that already signaled on the Subgraph can withdraw their signal at an average share price. +- Deleted Subgraphs will show an error message. From 7c215f728d55da7b2a03595241e85974fd21f8ef Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:24:36 -0500 Subject: [PATCH 0927/1789] New translations deleting-a-subgraph.mdx (Ukrainian) --- .../managing/deleting-a-subgraph.mdx | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/website/src/pages/uk/subgraphs/developing/managing/deleting-a-subgraph.mdx b/website/src/pages/uk/subgraphs/developing/managing/deleting-a-subgraph.mdx index 5a4ac15e07fd..b8c2330ca49d 100644 --- a/website/src/pages/uk/subgraphs/developing/managing/deleting-a-subgraph.mdx +++ b/website/src/pages/uk/subgraphs/developing/managing/deleting-a-subgraph.mdx @@ -2,30 +2,30 @@ title: Deleting a Subgraph --- -Delete your subgraph using [Subgraph Studio](https://thegraph.com/studio/). +Delete your Subgraph using [Subgraph Studio](https://thegraph.com/studio/). -> Deleting your subgraph will remove all published versions from The Graph Network, but it will remain visible on Graph Explorer and Subgraph Studio for users who have signaled on it. +> Deleting your Subgraph will remove all published versions from The Graph Network, but it will remain visible on Graph Explorer and Subgraph Studio for users who have signaled on it. ## Step-by-Step -1. Visit the subgraph's page on [Subgraph Studio](https://thegraph.com/studio/). +1. Visit the Subgraph's page on [Subgraph Studio](https://thegraph.com/studio/). 2. Click on the three-dots to the right of the "publish" button. -3. Click on the option to "delete this subgraph": +3. Click on the option to "delete this Subgraph": ![Delete-subgraph](/img/Delete-subgraph.png) -4. Depending on the subgraph's status, you will be prompted with various options. +4. Depending on the Subgraph's status, you will be prompted with various options. - - If the subgraph is not published, simply click “delete” and confirm. - - If the subgraph is published, you will need to confirm on your wallet before the subgraph can be deleted from Studio. If a subgraph is published to multiple networks, such as testnet and mainnet, additional steps may be required. + - If the Subgraph is not published, simply click “delete” and confirm. + - If the Subgraph is published, you will need to confirm on your wallet before the Subgraph can be deleted from Studio. If a Subgraph is published to multiple networks, such as testnet and mainnet, additional steps may be required. -> If the owner of the subgraph has signal on it, the signaled GRT will be returned to the owner. +> If the owner of the Subgraph has signal on it, the signaled GRT will be returned to the owner. ### Important Reminders -- Once you delete a subgraph, it will **not** appear on Graph Explorer's homepage. However, users who have signaled on it will still be able to view it on their profile pages and remove their signal. -- Curators will not be able to signal on the subgraph anymore. -- Curators that already signaled on the subgraph can withdraw their signal at an average share price. -- Deleted subgraphs will show an error message. +- Once you delete a Subgraph, it will **not** appear on Graph Explorer's homepage. However, users who have signaled on it will still be able to view it on their profile pages and remove their signal. +- Curators will not be able to signal on the Subgraph anymore. +- Curators that already signaled on the Subgraph can withdraw their signal at an average share price. +- Deleted Subgraphs will show an error message. From 4cee1f60d7b9c3f6753f8e30f41f0b97bd1c6134 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:24:37 -0500 Subject: [PATCH 0928/1789] New translations deleting-a-subgraph.mdx (Chinese Simplified) --- .../managing/deleting-a-subgraph.mdx | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/website/src/pages/zh/subgraphs/developing/managing/deleting-a-subgraph.mdx b/website/src/pages/zh/subgraphs/developing/managing/deleting-a-subgraph.mdx index dff170e3730f..b8c2330ca49d 100644 --- a/website/src/pages/zh/subgraphs/developing/managing/deleting-a-subgraph.mdx +++ b/website/src/pages/zh/subgraphs/developing/managing/deleting-a-subgraph.mdx @@ -2,30 +2,30 @@ title: Deleting a Subgraph --- -Delete your subgraph using [Subgraph Studio](https://thegraph.com/studio/). +Delete your Subgraph using [Subgraph Studio](https://thegraph.com/studio/). -> Deleting your subgraph will remove all published versions from The Graph Network, but it will remain visible on Graph Explorer and Subgraph Studio for users who have signaled on it. +> Deleting your Subgraph will remove all published versions from The Graph Network, but it will remain visible on Graph Explorer and Subgraph Studio for users who have signaled on it. ## Step-by-Step -1. Visit the subgraph's page on [Subgraph Studio](https://thegraph.com/studio/). +1. Visit the Subgraph's page on [Subgraph Studio](https://thegraph.com/studio/). 2. Click on the three-dots to the right of the "publish" button. -3. Click on the option to "delete this subgraph": +3. Click on the option to "delete this Subgraph": ![Delete-subgraph](/img/Delete-subgraph.png) -4. Depending on the subgraph's status, you will be prompted with various options. +4. Depending on the Subgraph's status, you will be prompted with various options. - - If the subgraph is not published, simply click “delete” and confirm. - - If the subgraph is published, you will need to confirm on your wallet before the subgraph can be deleted from Studio. If a subgraph is published to multiple networks, such as testnet and mainnet, additional steps may be required. + - If the Subgraph is not published, simply click “delete” and confirm. + - If the Subgraph is published, you will need to confirm on your wallet before the Subgraph can be deleted from Studio. If a Subgraph is published to multiple networks, such as testnet and mainnet, additional steps may be required. -> If the owner of the subgraph has signal on it, the signaled GRT will be returned to the owner. +> If the owner of the Subgraph has signal on it, the signaled GRT will be returned to the owner. ### Important Reminders -- Once you delete a subgraph, it will **not** appear on Graph Explorer's homepage. However, users who have signaled on it will still be able to view it on their profile pages and remove their signal. -- 策展人将无法再对该子图发出信号。 -- Curators that already signaled on the subgraph can withdraw their signal at an average share price. -- Deleted subgraphs will show an error message. +- Once you delete a Subgraph, it will **not** appear on Graph Explorer's homepage. However, users who have signaled on it will still be able to view it on their profile pages and remove their signal. +- Curators will not be able to signal on the Subgraph anymore. +- Curators that already signaled on the Subgraph can withdraw their signal at an average share price. +- Deleted Subgraphs will show an error message. From 98e9ffd2647a511f4e970f0fd6aaf130bccc538c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:24:38 -0500 Subject: [PATCH 0929/1789] New translations deleting-a-subgraph.mdx (Urdu (Pakistan)) --- .../managing/deleting-a-subgraph.mdx | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/website/src/pages/ur/subgraphs/developing/managing/deleting-a-subgraph.mdx b/website/src/pages/ur/subgraphs/developing/managing/deleting-a-subgraph.mdx index f078c166db88..b8c2330ca49d 100644 --- a/website/src/pages/ur/subgraphs/developing/managing/deleting-a-subgraph.mdx +++ b/website/src/pages/ur/subgraphs/developing/managing/deleting-a-subgraph.mdx @@ -2,30 +2,30 @@ title: Deleting a Subgraph --- -Delete your subgraph using [Subgraph Studio](https://thegraph.com/studio/). +Delete your Subgraph using [Subgraph Studio](https://thegraph.com/studio/). -> Deleting your subgraph will remove all published versions from The Graph Network, but it will remain visible on Graph Explorer and Subgraph Studio for users who have signaled on it. +> Deleting your Subgraph will remove all published versions from The Graph Network, but it will remain visible on Graph Explorer and Subgraph Studio for users who have signaled on it. ## Step-by-Step -1. Visit the subgraph's page on [Subgraph Studio](https://thegraph.com/studio/). +1. Visit the Subgraph's page on [Subgraph Studio](https://thegraph.com/studio/). 2. Click on the three-dots to the right of the "publish" button. -3. Click on the option to "delete this subgraph": +3. Click on the option to "delete this Subgraph": ![Delete-subgraph](/img/Delete-subgraph.png) -4. Depending on the subgraph's status, you will be prompted with various options. +4. Depending on the Subgraph's status, you will be prompted with various options. - - If the subgraph is not published, simply click “delete” and confirm. - - If the subgraph is published, you will need to confirm on your wallet before the subgraph can be deleted from Studio. If a subgraph is published to multiple networks, such as testnet and mainnet, additional steps may be required. + - If the Subgraph is not published, simply click “delete” and confirm. + - If the Subgraph is published, you will need to confirm on your wallet before the Subgraph can be deleted from Studio. If a Subgraph is published to multiple networks, such as testnet and mainnet, additional steps may be required. -> If the owner of the subgraph has signal on it, the signaled GRT will be returned to the owner. +> If the owner of the Subgraph has signal on it, the signaled GRT will be returned to the owner. ### Important Reminders -- Once you delete a subgraph, it will **not** appear on Graph Explorer's homepage. However, users who have signaled on it will still be able to view it on their profile pages and remove their signal. -- کیوریٹرز اب سب گراف پر سگنل نہیں دے سکیں گے. -- Curators that already signaled on the subgraph can withdraw their signal at an average share price. -- Deleted subgraphs will show an error message. +- Once you delete a Subgraph, it will **not** appear on Graph Explorer's homepage. However, users who have signaled on it will still be able to view it on their profile pages and remove their signal. +- Curators will not be able to signal on the Subgraph anymore. +- Curators that already signaled on the Subgraph can withdraw their signal at an average share price. +- Deleted Subgraphs will show an error message. From c7eded1a8fb80ee4735e736f13093c03606790dc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:24:39 -0500 Subject: [PATCH 0930/1789] New translations deleting-a-subgraph.mdx (Vietnamese) --- .../managing/deleting-a-subgraph.mdx | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/website/src/pages/vi/subgraphs/developing/managing/deleting-a-subgraph.mdx b/website/src/pages/vi/subgraphs/developing/managing/deleting-a-subgraph.mdx index 5a4ac15e07fd..b8c2330ca49d 100644 --- a/website/src/pages/vi/subgraphs/developing/managing/deleting-a-subgraph.mdx +++ b/website/src/pages/vi/subgraphs/developing/managing/deleting-a-subgraph.mdx @@ -2,30 +2,30 @@ title: Deleting a Subgraph --- -Delete your subgraph using [Subgraph Studio](https://thegraph.com/studio/). +Delete your Subgraph using [Subgraph Studio](https://thegraph.com/studio/). -> Deleting your subgraph will remove all published versions from The Graph Network, but it will remain visible on Graph Explorer and Subgraph Studio for users who have signaled on it. +> Deleting your Subgraph will remove all published versions from The Graph Network, but it will remain visible on Graph Explorer and Subgraph Studio for users who have signaled on it. ## Step-by-Step -1. Visit the subgraph's page on [Subgraph Studio](https://thegraph.com/studio/). +1. Visit the Subgraph's page on [Subgraph Studio](https://thegraph.com/studio/). 2. Click on the three-dots to the right of the "publish" button. -3. Click on the option to "delete this subgraph": +3. Click on the option to "delete this Subgraph": ![Delete-subgraph](/img/Delete-subgraph.png) -4. Depending on the subgraph's status, you will be prompted with various options. +4. Depending on the Subgraph's status, you will be prompted with various options. - - If the subgraph is not published, simply click “delete” and confirm. - - If the subgraph is published, you will need to confirm on your wallet before the subgraph can be deleted from Studio. If a subgraph is published to multiple networks, such as testnet and mainnet, additional steps may be required. + - If the Subgraph is not published, simply click “delete” and confirm. + - If the Subgraph is published, you will need to confirm on your wallet before the Subgraph can be deleted from Studio. If a Subgraph is published to multiple networks, such as testnet and mainnet, additional steps may be required. -> If the owner of the subgraph has signal on it, the signaled GRT will be returned to the owner. +> If the owner of the Subgraph has signal on it, the signaled GRT will be returned to the owner. ### Important Reminders -- Once you delete a subgraph, it will **not** appear on Graph Explorer's homepage. However, users who have signaled on it will still be able to view it on their profile pages and remove their signal. -- Curators will not be able to signal on the subgraph anymore. -- Curators that already signaled on the subgraph can withdraw their signal at an average share price. -- Deleted subgraphs will show an error message. +- Once you delete a Subgraph, it will **not** appear on Graph Explorer's homepage. However, users who have signaled on it will still be able to view it on their profile pages and remove their signal. +- Curators will not be able to signal on the Subgraph anymore. +- Curators that already signaled on the Subgraph can withdraw their signal at an average share price. +- Deleted Subgraphs will show an error message. From 96f2017ce5a8a4afa7dc1e5b1260c03993a20020 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:24:40 -0500 Subject: [PATCH 0931/1789] New translations deleting-a-subgraph.mdx (Marathi) --- .../managing/deleting-a-subgraph.mdx | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/website/src/pages/mr/subgraphs/developing/managing/deleting-a-subgraph.mdx b/website/src/pages/mr/subgraphs/developing/managing/deleting-a-subgraph.mdx index cabf1261970a..b8c2330ca49d 100644 --- a/website/src/pages/mr/subgraphs/developing/managing/deleting-a-subgraph.mdx +++ b/website/src/pages/mr/subgraphs/developing/managing/deleting-a-subgraph.mdx @@ -2,30 +2,30 @@ title: Deleting a Subgraph --- -Delete your subgraph using [Subgraph Studio](https://thegraph.com/studio/). +Delete your Subgraph using [Subgraph Studio](https://thegraph.com/studio/). -> Deleting your subgraph will remove all published versions from The Graph Network, but it will remain visible on Graph Explorer and Subgraph Studio for users who have signaled on it. +> Deleting your Subgraph will remove all published versions from The Graph Network, but it will remain visible on Graph Explorer and Subgraph Studio for users who have signaled on it. ## Step-by-Step -1. Visit the subgraph's page on [Subgraph Studio](https://thegraph.com/studio/). +1. Visit the Subgraph's page on [Subgraph Studio](https://thegraph.com/studio/). 2. Click on the three-dots to the right of the "publish" button. -3. Click on the option to "delete this subgraph": +3. Click on the option to "delete this Subgraph": ![Delete-subgraph](/img/Delete-subgraph.png) -4. Depending on the subgraph's status, you will be prompted with various options. +4. Depending on the Subgraph's status, you will be prompted with various options. - - If the subgraph is not published, simply click “delete” and confirm. - - If the subgraph is published, you will need to confirm on your wallet before the subgraph can be deleted from Studio. If a subgraph is published to multiple networks, such as testnet and mainnet, additional steps may be required. + - If the Subgraph is not published, simply click “delete” and confirm. + - If the Subgraph is published, you will need to confirm on your wallet before the Subgraph can be deleted from Studio. If a Subgraph is published to multiple networks, such as testnet and mainnet, additional steps may be required. -> If the owner of the subgraph has signal on it, the signaled GRT will be returned to the owner. +> If the owner of the Subgraph has signal on it, the signaled GRT will be returned to the owner. ### Important Reminders -- Once you delete a subgraph, it will **not** appear on Graph Explorer's homepage. However, users who have signaled on it will still be able to view it on their profile pages and remove their signal. -- क्युरेटर यापुढे सबग्राफवर सिग्नल करू शकणार नाहीत. -- Curators that already signaled on the subgraph can withdraw their signal at an average share price. -- Deleted subgraphs will show an error message. +- Once you delete a Subgraph, it will **not** appear on Graph Explorer's homepage. However, users who have signaled on it will still be able to view it on their profile pages and remove their signal. +- Curators will not be able to signal on the Subgraph anymore. +- Curators that already signaled on the Subgraph can withdraw their signal at an average share price. +- Deleted Subgraphs will show an error message. From 62b46d965a2dd1d89b9df28aa1fb3b506b935e1d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:24:41 -0500 Subject: [PATCH 0932/1789] New translations deleting-a-subgraph.mdx (Hindi) --- .../managing/deleting-a-subgraph.mdx | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/website/src/pages/hi/subgraphs/developing/managing/deleting-a-subgraph.mdx b/website/src/pages/hi/subgraphs/developing/managing/deleting-a-subgraph.mdx index e0889b86b0ab..02fdc71480ef 100644 --- a/website/src/pages/hi/subgraphs/developing/managing/deleting-a-subgraph.mdx +++ b/website/src/pages/hi/subgraphs/developing/managing/deleting-a-subgraph.mdx @@ -2,30 +2,30 @@ title: Deleting a Subgraph --- -Delete your subgraph using [Subgraph Studio](https://thegraph.com/studio/). +Delete your Subgraph using [Subgraph Studio](https://thegraph.com/studio/). -> Deleting your subgraph will remove all published versions from The Graph Network, but it will remain visible on Graph Explorer and Subgraph Studio for users who have signaled on it. +> Deleting your Subgraph will remove all published versions from The Graph Network, but it will remain visible on Graph Explorer and Subgraph Studio for users who have signaled on it. ## चरण-दर-चरण -1. Visit the subgraph's page on [Subgraph Studio](https://thegraph.com/studio/). +1. Visit the Subgraph's page on [Subgraph Studio](https://thegraph.com/studio/). 2. Click on the three-dots to the right of the "publish" button. -3. Click on the option to "delete this subgraph": +3. Click on the option to "delete this Subgraph": ![Delete-subgraph](/img/Delete-subgraph.png) -4. Depending on the subgraph's status, you will be prompted with various options. +4. Depending on the Subgraph's status, you will be prompted with various options. - - If the subgraph is not published, simply click “delete” and confirm. - - If the subgraph is published, you will need to confirm on your wallet before the subgraph can be deleted from Studio. If a subgraph is published to multiple networks, such as testnet and mainnet, additional steps may be required. + - If the Subgraph is not published, simply click “delete” and confirm. + - If the Subgraph is published, you will need to confirm on your wallet before the Subgraph can be deleted from Studio. If a Subgraph is published to multiple networks, such as testnet and mainnet, additional steps may be required. -> If the owner of the subgraph has signal on it, the signaled GRT will be returned to the owner. +> If the owner of the Subgraph has signal on it, the signaled GRT will be returned to the owner. ### Important Reminders -- Once you delete a subgraph, it will **not** appear on Graph Explorer's homepage. However, users who have signaled on it will still be able to view it on their profile pages and remove their signal. -- क्यूरेटर अब सबग्राफ पर संकेत नहीं दे पाएंगे। -- Subgraph पर पहले से संकेत कर चुके Curators औसत शेयर मूल्य पर अपना संकेत वापस ले सकते हैं। -- Deleted subgraphs will show an error message. +- Once you delete a Subgraph, it will **not** appear on Graph Explorer's homepage. However, users who have signaled on it will still be able to view it on their profile pages and remove their signal. +- Curators will not be able to signal on the Subgraph anymore. +- Curators that already signaled on the Subgraph can withdraw their signal at an average share price. +- Deleted Subgraphs will show an error message. From 9e0acd9e173827de44c7888971602a85ac7c33dd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:24:42 -0500 Subject: [PATCH 0933/1789] New translations deleting-a-subgraph.mdx (Swahili) --- .../managing/deleting-a-subgraph.mdx | 31 +++++++++++++++++++ 1 file changed, 31 insertions(+) create mode 100644 website/src/pages/sw/subgraphs/developing/managing/deleting-a-subgraph.mdx diff --git a/website/src/pages/sw/subgraphs/developing/managing/deleting-a-subgraph.mdx b/website/src/pages/sw/subgraphs/developing/managing/deleting-a-subgraph.mdx new file mode 100644 index 000000000000..b8c2330ca49d --- /dev/null +++ b/website/src/pages/sw/subgraphs/developing/managing/deleting-a-subgraph.mdx @@ -0,0 +1,31 @@ +--- +title: Deleting a Subgraph +--- + +Delete your Subgraph using [Subgraph Studio](https://thegraph.com/studio/). + +> Deleting your Subgraph will remove all published versions from The Graph Network, but it will remain visible on Graph Explorer and Subgraph Studio for users who have signaled on it. + +## Step-by-Step + +1. Visit the Subgraph's page on [Subgraph Studio](https://thegraph.com/studio/). + +2. Click on the three-dots to the right of the "publish" button. + +3. Click on the option to "delete this Subgraph": + + ![Delete-subgraph](/img/Delete-subgraph.png) + +4. Depending on the Subgraph's status, you will be prompted with various options. + + - If the Subgraph is not published, simply click “delete” and confirm. + - If the Subgraph is published, you will need to confirm on your wallet before the Subgraph can be deleted from Studio. If a Subgraph is published to multiple networks, such as testnet and mainnet, additional steps may be required. + +> If the owner of the Subgraph has signal on it, the signaled GRT will be returned to the owner. + +### Important Reminders + +- Once you delete a Subgraph, it will **not** appear on Graph Explorer's homepage. However, users who have signaled on it will still be able to view it on their profile pages and remove their signal. +- Curators will not be able to signal on the Subgraph anymore. +- Curators that already signaled on the Subgraph can withdraw their signal at an average share price. +- Deleted Subgraphs will show an error message. From 96a2c57498c6130b020b40ca6b297dd061d7734c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:24:43 -0500 Subject: [PATCH 0934/1789] New translations publishing-a-subgraph.mdx (Romanian) --- .../publishing/publishing-a-subgraph.mdx | 45 ++++++++++--------- 1 file changed, 23 insertions(+), 22 deletions(-) diff --git a/website/src/pages/ro/subgraphs/developing/publishing/publishing-a-subgraph.mdx b/website/src/pages/ro/subgraphs/developing/publishing/publishing-a-subgraph.mdx index dca943ad3152..2bc0ec5f514c 100644 --- a/website/src/pages/ro/subgraphs/developing/publishing/publishing-a-subgraph.mdx +++ b/website/src/pages/ro/subgraphs/developing/publishing/publishing-a-subgraph.mdx @@ -1,10 +1,11 @@ --- title: Publishing a Subgraph to the Decentralized Network +sidebarTitle: Publishing to the Decentralized Network --- -Once you have [deployed your subgraph to Subgraph Studio](/deploying/deploying-a-subgraph-to-studio/) and it's ready to go into production, you can publish it to the decentralized network. +Once you have [deployed your Subgraph to Subgraph Studio](/deploying/deploying-a-subgraph-to-studio/) and it's ready to go into production, you can publish it to the decentralized network. -When you publish a subgraph to the decentralized network, you make it available for: +When you publish a Subgraph to the decentralized network, you make it available for: - [Curators](/resources/roles/curating/) to begin curating it. - [Indexers](/indexing/overview/) to begin indexing it. @@ -17,33 +18,33 @@ Check out the list of [supported networks](/supported-networks/). 1. Go to the [Subgraph Studio](https://thegraph.com/studio/) dashboard 2. Click on the **Publish** button -3. Your subgraph will now be visible in [Graph Explorer](https://thegraph.com/explorer/). +3. Your Subgraph will now be visible in [Graph Explorer](https://thegraph.com/explorer/). -All published versions of an existing subgraph can: +All published versions of an existing Subgraph can: - Be published to Arbitrum One. [Learn more about The Graph Network on Arbitrum](/archived/arbitrum/arbitrum-faq/). -- Index data on any of the [supported networks](/supported-networks/), regardless of the network on which the subgraph was published. +- Index data on any of the [supported networks](/supported-networks/), regardless of the network on which the Subgraph was published. -### Updating metadata for a published subgraph +### Updating metadata for a published Subgraph -- After publishing your subgraph to the decentralized network, you can update the metadata anytime in Subgraph Studio. +- After publishing your Subgraph to the decentralized network, you can update the metadata anytime in Subgraph Studio. - Once you’ve saved your changes and published the updates, they will appear in Graph Explorer. - It's important to note that this process will not create a new version since your deployment has not changed. ## Publishing from the CLI -As of version 0.73.0, you can also publish your subgraph with the [`graph-cli`](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). +As of version 0.73.0, you can also publish your Subgraph with the [`graph-cli`](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). 1. Open the `graph-cli`. 2. Use the following commands: `graph codegen && graph build` then `graph publish`. -3. A window will open, allowing you to connect your wallet, add metadata, and deploy your finalized subgraph to a network of your choice. +3. A window will open, allowing you to connect your wallet, add metadata, and deploy your finalized Subgraph to a network of your choice. ![cli-ui](/img/cli-ui.png) ### Customizing your deployment -You can upload your subgraph build to a specific IPFS node and further customize your deployment with the following flags: +You can upload your Subgraph build to a specific IPFS node and further customize your deployment with the following flags: ``` USAGE @@ -61,33 +62,33 @@ FLAGS ``` -## Adding signal to your subgraph +## Adding signal to your Subgraph -Developers can add GRT signal to their subgraphs to incentivize Indexers to query the subgraph. +Developers can add GRT signal to their Subgraphs to incentivize Indexers to query the Subgraph. -- If a subgraph is eligible for indexing rewards, Indexers who provide a "proof of indexing" will receive a GRT reward, based on the amount of GRT signalled. +- If a Subgraph is eligible for indexing rewards, Indexers who provide a "proof of indexing" will receive a GRT reward, based on the amount of GRT signalled. -- You can check indexing reward eligibility based on subgraph feature usage [here](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md). +- You can check indexing reward eligibility based on Subgraph feature usage [here](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md). - Specific supported networks can be checked [here](/supported-networks/). -> Adding signal to a subgraph which is not eligible for rewards will not attract additional Indexers. +> Adding signal to a Subgraph which is not eligible for rewards will not attract additional Indexers. > -> If your subgraph is eligible for rewards, it is recommended that you curate your own subgraph with at least 3,000 GRT in order to attract additional indexers to index your subgraph. +> If your Subgraph is eligible for rewards, it is recommended that you curate your own Subgraph with at least 3,000 GRT in order to attract additional indexers to index your Subgraph. -The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all subgraphs. However, signaling GRT on a particular subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. +The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all Subgraphs. However, signaling GRT on a particular Subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. -When signaling, Curators can decide to signal on a specific version of the subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. +When signaling, Curators can decide to signal on a specific version of the Subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. -Indexers can find subgraphs to index based on curation signals they see in Graph Explorer. +Indexers can find Subgraphs to index based on curation signals they see in Graph Explorer. -![Explorer subgraphs](/img/explorer-subgraphs.png) +![Explorer Subgraphs](/img/explorer-subgraphs.png) -Subgraph Studio enables you to add signal to your subgraph by adding GRT to your subgraph's curation pool in the same transaction it is published. +Subgraph Studio enables you to add signal to your Subgraph by adding GRT to your Subgraph's curation pool in the same transaction it is published. ![Curation Pool](/img/curate-own-subgraph-tx.png) -Alternatively, you can add GRT signal to a published subgraph from Graph Explorer. +Alternatively, you can add GRT signal to a published Subgraph from Graph Explorer. ![Signal from Explorer](/img/signal-from-explorer.png) From e1aded0b1f379afc49726fd1f0d2d1736ceeedab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:24:44 -0500 Subject: [PATCH 0935/1789] New translations publishing-a-subgraph.mdx (French) --- .../publishing/publishing-a-subgraph.mdx | 45 ++++++++++--------- 1 file changed, 23 insertions(+), 22 deletions(-) diff --git a/website/src/pages/fr/subgraphs/developing/publishing/publishing-a-subgraph.mdx b/website/src/pages/fr/subgraphs/developing/publishing/publishing-a-subgraph.mdx index 19a14a1b0eb2..3821e83ef1f8 100644 --- a/website/src/pages/fr/subgraphs/developing/publishing/publishing-a-subgraph.mdx +++ b/website/src/pages/fr/subgraphs/developing/publishing/publishing-a-subgraph.mdx @@ -1,10 +1,11 @@ --- title: Publication d'un subgraph sur le réseau décentralisé +sidebarTitle: Publishing to the Decentralized Network --- -Une fois que vous avez [déployé votre sous-graphe dans Subgraph Studio](/deploying/deploying-a-subgraph-to-studio/) et qu'il est prêt à être mis en production, vous pouvez le publier sur le réseau décentralisé. +Once you have [deployed your Subgraph to Subgraph Studio](/deploying/deploying-a-subgraph-to-studio/) and it's ready to go into production, you can publish it to the decentralized network. -Lorsque vous publiez un subgraph sur le réseau décentralisé, vous le rendez disponible pour : +When you publish a Subgraph to the decentralized network, you make it available for: - [Curateurs](/resources/roles/curating/) pour commencer la curation. - [Indexeurs](/indexing/overview/) pour commencer à l'indexer. @@ -17,33 +18,33 @@ Consultez la liste des [réseaux pris en charge](/supported-networks/). 1. Accédez au tableau de bord de [Subgraph Studio](https://thegraph.com/studio/) 2. Cliquez sur le bouton **Publish** -3. Votre subgraph est désormais visible dans [Graph Explorer](https://thegraph.com/explorer/). +3. Your Subgraph will now be visible in [Graph Explorer](https://thegraph.com/explorer/). -Toutes les versions publiées d'un subgraph existant peuvent : +All published versions of an existing Subgraph can: - Être publié sur Arbitrum One. [En savoir plus sur The Graph Network sur Arbitrum](/archived/arbitrum/arbitrum-faq/). -- Indexer les données sur n'importe lequel des [réseaux pris en charge](/supported-networks/), quel que soit le réseau sur lequel le subgraph a été publié. +- Index data on any of the [supported networks](/supported-networks/), regardless of the network on which the Subgraph was published. -### Mise à jour des métadonnées d'un subgraph publié +### Updating metadata for a published Subgraph -- Après avoir publié votre subgraph sur le réseau décentralisé, vous pouvez mettre à jour les métadonnées à tout moment dans Subgraph Studio. +- After publishing your Subgraph to the decentralized network, you can update the metadata anytime in Subgraph Studio. - Une fois que vous avez enregistré vos modifications et publié les mises à jour, elles apparaîtront dans Graph Explorer. - Il est important de noter que ce processus ne créera pas une nouvelle version puisque votre déploiement n'a pas changé. ## Publication à partir de la CLI -Depuis la version 0.73.0, vous pouvez également publier votre subgraph avec [`graph-cli`](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). +As of version 0.73.0, you can also publish your Subgraph with the [`graph-cli`](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). 1. Ouvrez le `graph-cli`. 2. Utilisez les commandes suivantes : `graph codegen && graph build` puis `graph publish`. -3. Une fenêtre s'ouvrira, vous permettant de connecter votre portefeuille, d'ajouter des métadonnées et de déployer votre subgraph finalisé sur le réseau de votre choix. +3. A window will open, allowing you to connect your wallet, add metadata, and deploy your finalized Subgraph to a network of your choice. ![cli-ui](/img/cli-ui.png) ### Personnalisation de votre déploiement -Vous pouvez uploader votre build de subgraph sur un nœud IPFS spécifique et personnaliser davantage votre déploiement avec les options suivantes : +You can upload your Subgraph build to a specific IPFS node and further customize your deployment with the following flags: ``` UTILISATION @@ -61,33 +62,33 @@ FLAGS ``` -## Ajout de signal à votre subgraph +## Adding signal to your Subgraph -Les développeurs peuvent ajouter des signaux GRT à leurs subgraphs pour inciter les Indexeurs à interroger le subgraph. +Developers can add GRT signal to their Subgraphs to incentivize Indexers to query the Subgraph. -- Si un subgraph est éligible aux récompenses d'indexation, les Indexeurs qui fournissent une "preuve d'indexation" recevront une récompense en GRT, basée sur la quantité de GRT signalée. +- If a Subgraph is eligible for indexing rewards, Indexers who provide a "proof of indexing" will receive a GRT reward, based on the amount of GRT signalled. -- Vous pouvez vérifier l'éligibilité de la récompense d'indexation en fonction de l'utilisation des caractéristiques du subgraph [ici](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md). +- You can check indexing reward eligibility based on Subgraph feature usage [here](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md). - Les réseaux spécifiques pris en charge peuvent être vérifiés [ici](/supported-networks/). -> Ajouter un signal à un subgraph non éligible aux récompenses n'attirera pas d'Indexeurs supplémentaires. +> Adding signal to a Subgraph which is not eligible for rewards will not attract additional Indexers. > -> Si votre subgraph est éligible aux récompenses, il est recommandé de curer votre propre subgraph avec au moins 3 000 GRT afin d'attirer des indexeurs supplémentaires pour indexer votre subgraph. +> If your Subgraph is eligible for rewards, it is recommended that you curate your own Subgraph with at least 3,000 GRT in order to attract additional indexers to index your Subgraph. -Le [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) assure l'indexation de tous les subgraphs. Cependant, le fait de signaler un GRT sur un subgraph particulier attirera plus d'Indexeurs vers celui-ci. Cette incitation à la création d'Indexeurs supplémentaires par le biais de la curation vise à améliorer la qualité de service pour les requêtes en réduisant la latence et en améliorant la disponibilité du réseau. +The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all Subgraphs. However, signaling GRT on a particular Subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. -Lors du signalement, les Curateurs peuvent décider de signaler une version spécifique du subgraph ou de signaler en utilisant l'auto-migration. S'ils signalent en utilisant l'auto-migration, les parts d'un Curateur seront toujours mises à jour vers la dernière version publiée par le développeur. S'ils décident de signaler une version spécifique, les parts resteront toujours sur cette version spécifique. +When signaling, Curators can decide to signal on a specific version of the Subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. -Les Indexeurs peuvent trouver des subgraphs à indexer en fonction des signaux de curation qu'ils voient dans Graph Explorer. +Indexers can find Subgraphs to index based on curation signals they see in Graph Explorer. -![Explorer subgraphs](/img/explorer-subgraphs.png) +![Explorer Subgraphs](/img/explorer-subgraphs.png) -Subgraph Studio vous permet d'ajouter des signaux à votre subgraph en ajoutant des GRT au pool de curation de votre subgraph dans la même transaction où il est publié. +Subgraph Studio enables you to add signal to your Subgraph by adding GRT to your Subgraph's curation pool in the same transaction it is published. ![Curation Pool](/img/curate-own-subgraph-tx.png) -Alternativement, vous pouvez ajouter des signaux GRT à un subgraph publié à partir de Graph Explorer. +Alternatively, you can add GRT signal to a published Subgraph from Graph Explorer. ![Signal provenant de l'Explorer](/img/signal-from-explorer.png) From 105988b58f3ae9d5b5389a07fa08d47c762409b1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:24:45 -0500 Subject: [PATCH 0936/1789] New translations publishing-a-subgraph.mdx (Spanish) --- .../publishing/publishing-a-subgraph.mdx | 45 ++++++++++--------- 1 file changed, 23 insertions(+), 22 deletions(-) diff --git a/website/src/pages/es/subgraphs/developing/publishing/publishing-a-subgraph.mdx b/website/src/pages/es/subgraphs/developing/publishing/publishing-a-subgraph.mdx index d37d8bf2ed62..67c076d0a156 100644 --- a/website/src/pages/es/subgraphs/developing/publishing/publishing-a-subgraph.mdx +++ b/website/src/pages/es/subgraphs/developing/publishing/publishing-a-subgraph.mdx @@ -1,10 +1,11 @@ --- title: Publicación de un subgrafo en la Red Descentralizada +sidebarTitle: Publishing to the Decentralized Network --- -Once you have [deployed your subgraph to Subgraph Studio](/deploying/deploying-a-subgraph-to-studio/) and it's ready to go into production, you can publish it to the decentralized network. +Once you have [deployed your Subgraph to Subgraph Studio](/deploying/deploying-a-subgraph-to-studio/) and it's ready to go into production, you can publish it to the decentralized network. -When you publish a subgraph to the decentralized network, you make it available for: +When you publish a Subgraph to the decentralized network, you make it available for: - [Curators](/resources/roles/curating/) to begin curating it. - [Indexers](/indexing/overview/) to begin indexing it. @@ -17,33 +18,33 @@ Check out the list of [supported networks](/supported-networks/). 1. Go to the [Subgraph Studio](https://thegraph.com/studio/) dashboard 2. Click on the **Publish** button -3. Your subgraph will now be visible in [Graph Explorer](https://thegraph.com/explorer/). +3. Your Subgraph will now be visible in [Graph Explorer](https://thegraph.com/explorer/). -All published versions of an existing subgraph can: +All published versions of an existing Subgraph can: - Be published to Arbitrum One. [Learn more about The Graph Network on Arbitrum](/archived/arbitrum/arbitrum-faq/). -- Index data on any of the [supported networks](/supported-networks/), regardless of the network on which the subgraph was published. +- Index data on any of the [supported networks](/supported-networks/), regardless of the network on which the Subgraph was published. -### Actualización de los metadatos de un subgrafo publicado +### Updating metadata for a published Subgraph -- After publishing your subgraph to the decentralized network, you can update the metadata anytime in Subgraph Studio. +- After publishing your Subgraph to the decentralized network, you can update the metadata anytime in Subgraph Studio. - Once you’ve saved your changes and published the updates, they will appear in Graph Explorer. - It's important to note that this process will not create a new version since your deployment has not changed. ## Publishing from the CLI -As of version 0.73.0, you can also publish your subgraph with the [`graph-cli`](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). +As of version 0.73.0, you can also publish your Subgraph with the [`graph-cli`](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). 1. Open the `graph-cli`. 2. Use the following commands: `graph codegen && graph build` then `graph publish`. -3. A window will open, allowing you to connect your wallet, add metadata, and deploy your finalized subgraph to a network of your choice. +3. A window will open, allowing you to connect your wallet, add metadata, and deploy your finalized Subgraph to a network of your choice. ![cli-ui](/img/cli-ui.png) ### Customizing your deployment -You can upload your subgraph build to a specific IPFS node and further customize your deployment with the following flags: +You can upload your Subgraph build to a specific IPFS node and further customize your deployment with the following flags: ``` USAGE @@ -61,33 +62,33 @@ FLAGS ``` -## Adding signal to your subgraph +## Adding signal to your Subgraph -Developers can add GRT signal to their subgraphs to incentivize Indexers to query the subgraph. +Developers can add GRT signal to their Subgraphs to incentivize Indexers to query the Subgraph. -- If a subgraph is eligible for indexing rewards, Indexers who provide a "proof of indexing" will receive a GRT reward, based on the amount of GRT signalled. +- If a Subgraph is eligible for indexing rewards, Indexers who provide a "proof of indexing" will receive a GRT reward, based on the amount of GRT signalled. -- You can check indexing reward eligibility based on subgraph feature usage [here](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md). +- You can check indexing reward eligibility based on Subgraph feature usage [here](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md). - Specific supported networks can be checked [here](/supported-networks/). -> Adding signal to a subgraph which is not eligible for rewards will not attract additional Indexers. +> Adding signal to a Subgraph which is not eligible for rewards will not attract additional Indexers. > -> If your subgraph is eligible for rewards, it is recommended that you curate your own subgraph with at least 3,000 GRT in order to attract additional indexers to index your subgraph. +> If your Subgraph is eligible for rewards, it is recommended that you curate your own Subgraph with at least 3,000 GRT in order to attract additional indexers to index your Subgraph. -The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all subgraphs. However, signaling GRT on a particular subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. +The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all Subgraphs. However, signaling GRT on a particular Subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. -When signaling, Curators can decide to signal on a specific version of the subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. +When signaling, Curators can decide to signal on a specific version of the Subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. -Indexers can find subgraphs to index based on curation signals they see in Graph Explorer. +Indexers can find Subgraphs to index based on curation signals they see in Graph Explorer. -![Explorer subgraphs](/img/explorer-subgraphs.png) +![Explorer Subgraphs](/img/explorer-subgraphs.png) -Subgraph Studio enables you to add signal to your subgraph by adding GRT to your subgraph's curation pool in the same transaction it is published. +Subgraph Studio enables you to add signal to your Subgraph by adding GRT to your Subgraph's curation pool in the same transaction it is published. ![Curation Pool](/img/curate-own-subgraph-tx.png) -Alternatively, you can add GRT signal to a published subgraph from Graph Explorer. +Alternatively, you can add GRT signal to a published Subgraph from Graph Explorer. ![Signal from Explorer](/img/signal-from-explorer.png) From e00242047e707595853272859c894dda5ae39813 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:24:46 -0500 Subject: [PATCH 0937/1789] New translations publishing-a-subgraph.mdx (Arabic) --- .../publishing/publishing-a-subgraph.mdx | 45 ++++++++++--------- 1 file changed, 23 insertions(+), 22 deletions(-) diff --git a/website/src/pages/ar/subgraphs/developing/publishing/publishing-a-subgraph.mdx b/website/src/pages/ar/subgraphs/developing/publishing/publishing-a-subgraph.mdx index dca943ad3152..2bc0ec5f514c 100644 --- a/website/src/pages/ar/subgraphs/developing/publishing/publishing-a-subgraph.mdx +++ b/website/src/pages/ar/subgraphs/developing/publishing/publishing-a-subgraph.mdx @@ -1,10 +1,11 @@ --- title: Publishing a Subgraph to the Decentralized Network +sidebarTitle: Publishing to the Decentralized Network --- -Once you have [deployed your subgraph to Subgraph Studio](/deploying/deploying-a-subgraph-to-studio/) and it's ready to go into production, you can publish it to the decentralized network. +Once you have [deployed your Subgraph to Subgraph Studio](/deploying/deploying-a-subgraph-to-studio/) and it's ready to go into production, you can publish it to the decentralized network. -When you publish a subgraph to the decentralized network, you make it available for: +When you publish a Subgraph to the decentralized network, you make it available for: - [Curators](/resources/roles/curating/) to begin curating it. - [Indexers](/indexing/overview/) to begin indexing it. @@ -17,33 +18,33 @@ Check out the list of [supported networks](/supported-networks/). 1. Go to the [Subgraph Studio](https://thegraph.com/studio/) dashboard 2. Click on the **Publish** button -3. Your subgraph will now be visible in [Graph Explorer](https://thegraph.com/explorer/). +3. Your Subgraph will now be visible in [Graph Explorer](https://thegraph.com/explorer/). -All published versions of an existing subgraph can: +All published versions of an existing Subgraph can: - Be published to Arbitrum One. [Learn more about The Graph Network on Arbitrum](/archived/arbitrum/arbitrum-faq/). -- Index data on any of the [supported networks](/supported-networks/), regardless of the network on which the subgraph was published. +- Index data on any of the [supported networks](/supported-networks/), regardless of the network on which the Subgraph was published. -### Updating metadata for a published subgraph +### Updating metadata for a published Subgraph -- After publishing your subgraph to the decentralized network, you can update the metadata anytime in Subgraph Studio. +- After publishing your Subgraph to the decentralized network, you can update the metadata anytime in Subgraph Studio. - Once you’ve saved your changes and published the updates, they will appear in Graph Explorer. - It's important to note that this process will not create a new version since your deployment has not changed. ## Publishing from the CLI -As of version 0.73.0, you can also publish your subgraph with the [`graph-cli`](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). +As of version 0.73.0, you can also publish your Subgraph with the [`graph-cli`](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). 1. Open the `graph-cli`. 2. Use the following commands: `graph codegen && graph build` then `graph publish`. -3. A window will open, allowing you to connect your wallet, add metadata, and deploy your finalized subgraph to a network of your choice. +3. A window will open, allowing you to connect your wallet, add metadata, and deploy your finalized Subgraph to a network of your choice. ![cli-ui](/img/cli-ui.png) ### Customizing your deployment -You can upload your subgraph build to a specific IPFS node and further customize your deployment with the following flags: +You can upload your Subgraph build to a specific IPFS node and further customize your deployment with the following flags: ``` USAGE @@ -61,33 +62,33 @@ FLAGS ``` -## Adding signal to your subgraph +## Adding signal to your Subgraph -Developers can add GRT signal to their subgraphs to incentivize Indexers to query the subgraph. +Developers can add GRT signal to their Subgraphs to incentivize Indexers to query the Subgraph. -- If a subgraph is eligible for indexing rewards, Indexers who provide a "proof of indexing" will receive a GRT reward, based on the amount of GRT signalled. +- If a Subgraph is eligible for indexing rewards, Indexers who provide a "proof of indexing" will receive a GRT reward, based on the amount of GRT signalled. -- You can check indexing reward eligibility based on subgraph feature usage [here](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md). +- You can check indexing reward eligibility based on Subgraph feature usage [here](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md). - Specific supported networks can be checked [here](/supported-networks/). -> Adding signal to a subgraph which is not eligible for rewards will not attract additional Indexers. +> Adding signal to a Subgraph which is not eligible for rewards will not attract additional Indexers. > -> If your subgraph is eligible for rewards, it is recommended that you curate your own subgraph with at least 3,000 GRT in order to attract additional indexers to index your subgraph. +> If your Subgraph is eligible for rewards, it is recommended that you curate your own Subgraph with at least 3,000 GRT in order to attract additional indexers to index your Subgraph. -The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all subgraphs. However, signaling GRT on a particular subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. +The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all Subgraphs. However, signaling GRT on a particular Subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. -When signaling, Curators can decide to signal on a specific version of the subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. +When signaling, Curators can decide to signal on a specific version of the Subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. -Indexers can find subgraphs to index based on curation signals they see in Graph Explorer. +Indexers can find Subgraphs to index based on curation signals they see in Graph Explorer. -![Explorer subgraphs](/img/explorer-subgraphs.png) +![Explorer Subgraphs](/img/explorer-subgraphs.png) -Subgraph Studio enables you to add signal to your subgraph by adding GRT to your subgraph's curation pool in the same transaction it is published. +Subgraph Studio enables you to add signal to your Subgraph by adding GRT to your Subgraph's curation pool in the same transaction it is published. ![Curation Pool](/img/curate-own-subgraph-tx.png) -Alternatively, you can add GRT signal to a published subgraph from Graph Explorer. +Alternatively, you can add GRT signal to a published Subgraph from Graph Explorer. ![Signal from Explorer](/img/signal-from-explorer.png) From c595c0772d8dce3571e7598ece252a352766178e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:24:47 -0500 Subject: [PATCH 0938/1789] New translations publishing-a-subgraph.mdx (Czech) --- .../publishing/publishing-a-subgraph.mdx | 45 ++++++++++--------- 1 file changed, 23 insertions(+), 22 deletions(-) diff --git a/website/src/pages/cs/subgraphs/developing/publishing/publishing-a-subgraph.mdx b/website/src/pages/cs/subgraphs/developing/publishing/publishing-a-subgraph.mdx index ed8846e26498..29c75273aa17 100644 --- a/website/src/pages/cs/subgraphs/developing/publishing/publishing-a-subgraph.mdx +++ b/website/src/pages/cs/subgraphs/developing/publishing/publishing-a-subgraph.mdx @@ -1,10 +1,11 @@ --- title: Zveřejnění podgrafu v decentralizované síti +sidebarTitle: Publishing to the Decentralized Network --- -Once you have [deployed your subgraph to Subgraph Studio](/deploying/deploying-a-subgraph-to-studio/) and it's ready to go into production, you can publish it to the decentralized network. +Once you have [deployed your Subgraph to Subgraph Studio](/deploying/deploying-a-subgraph-to-studio/) and it's ready to go into production, you can publish it to the decentralized network. -When you publish a subgraph to the decentralized network, you make it available for: +When you publish a Subgraph to the decentralized network, you make it available for: - [Curators](/resources/roles/curating/) to begin curating it. - [Indexers](/indexing/overview/) to begin indexing it. @@ -17,33 +18,33 @@ Check out the list of [supported networks](/supported-networks/). 1. Go to the [Subgraph Studio](https://thegraph.com/studio/) dashboard 2. Click on the **Publish** button -3. Your subgraph will now be visible in [Graph Explorer](https://thegraph.com/explorer/). +3. Your Subgraph will now be visible in [Graph Explorer](https://thegraph.com/explorer/). -All published versions of an existing subgraph can: +All published versions of an existing Subgraph can: - Be published to Arbitrum One. [Learn more about The Graph Network on Arbitrum](/archived/arbitrum/arbitrum-faq/). -- Index data on any of the [supported networks](/supported-networks/), regardless of the network on which the subgraph was published. +- Index data on any of the [supported networks](/supported-networks/), regardless of the network on which the Subgraph was published. -### Aktualizace metadata publikovaného podgrafu +### Updating metadata for a published Subgraph -- After publishing your subgraph to the decentralized network, you can update the metadata anytime in Subgraph Studio. +- After publishing your Subgraph to the decentralized network, you can update the metadata anytime in Subgraph Studio. - Once you’ve saved your changes and published the updates, they will appear in Graph Explorer. - It's important to note that this process will not create a new version since your deployment has not changed. ## Publishing from the CLI -As of version 0.73.0, you can also publish your subgraph with the [`graph-cli`](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). +As of version 0.73.0, you can also publish your Subgraph with the [`graph-cli`](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). 1. Open the `graph-cli`. 2. Use the following commands: `graph codegen && graph build` then `graph publish`. -3. A window will open, allowing you to connect your wallet, add metadata, and deploy your finalized subgraph to a network of your choice. +3. A window will open, allowing you to connect your wallet, add metadata, and deploy your finalized Subgraph to a network of your choice. ![cli-ui](/img/cli-ui.png) ### Customizing your deployment -You can upload your subgraph build to a specific IPFS node and further customize your deployment with the following flags: +You can upload your Subgraph build to a specific IPFS node and further customize your deployment with the following flags: ``` USAGE @@ -61,33 +62,33 @@ FLAGS ``` -## Přidání signálu do podgrafu +## Adding signal to your Subgraph -Developers can add GRT signal to their subgraphs to incentivize Indexers to query the subgraph. +Developers can add GRT signal to their Subgraphs to incentivize Indexers to query the Subgraph. -- If a subgraph is eligible for indexing rewards, Indexers who provide a "proof of indexing" will receive a GRT reward, based on the amount of GRT signalled. +- If a Subgraph is eligible for indexing rewards, Indexers who provide a "proof of indexing" will receive a GRT reward, based on the amount of GRT signalled. -- You can check indexing reward eligibility based on subgraph feature usage [here](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md). +- You can check indexing reward eligibility based on Subgraph feature usage [here](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md). - Specific supported networks can be checked [here](/supported-networks/). -> Přidání signálu do podgrafu, který nemá nárok na odměny, nepřiláká další indexátory. +> Adding signal to a Subgraph which is not eligible for rewards will not attract additional Indexers. > -> If your subgraph is eligible for rewards, it is recommended that you curate your own subgraph with at least 3,000 GRT in order to attract additional indexers to index your subgraph. +> If your Subgraph is eligible for rewards, it is recommended that you curate your own Subgraph with at least 3,000 GRT in order to attract additional indexers to index your Subgraph. -The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all subgraphs. However, signaling GRT on a particular subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. +The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all Subgraphs. However, signaling GRT on a particular Subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. -When signaling, Curators can decide to signal on a specific version of the subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. +When signaling, Curators can decide to signal on a specific version of the Subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. -Indexers can find subgraphs to index based on curation signals they see in Graph Explorer. +Indexers can find Subgraphs to index based on curation signals they see in Graph Explorer. -![Explorer subgraphs](/img/explorer-subgraphs.png) +![Explorer Subgraphs](/img/explorer-subgraphs.png) -Subgraph Studio enables you to add signal to your subgraph by adding GRT to your subgraph's curation pool in the same transaction it is published. +Subgraph Studio enables you to add signal to your Subgraph by adding GRT to your Subgraph's curation pool in the same transaction it is published. ![Curation Pool](/img/curate-own-subgraph-tx.png) -Případně můžete přidat signál GRT do publikovaného podgrafu z Průzkumníka grafů. +Alternatively, you can add GRT signal to a published Subgraph from Graph Explorer. ![Signal from Explorer](/img/signal-from-explorer.png) From 1a8a89ad46cd2d906a06becac9a295a0bf4d3e1e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:24:48 -0500 Subject: [PATCH 0939/1789] New translations publishing-a-subgraph.mdx (German) --- .../publishing/publishing-a-subgraph.mdx | 45 ++++++++++--------- 1 file changed, 23 insertions(+), 22 deletions(-) diff --git a/website/src/pages/de/subgraphs/developing/publishing/publishing-a-subgraph.mdx b/website/src/pages/de/subgraphs/developing/publishing/publishing-a-subgraph.mdx index 129d063a2e95..1f0810869c26 100644 --- a/website/src/pages/de/subgraphs/developing/publishing/publishing-a-subgraph.mdx +++ b/website/src/pages/de/subgraphs/developing/publishing/publishing-a-subgraph.mdx @@ -1,10 +1,11 @@ --- title: Veröffentlichung eines Subgraphen im dezentralen Netzwerk +sidebarTitle: Publishing to the Decentralized Network --- -Once you have [deployed your subgraph to Subgraph Studio](/deploying/deploying-a-subgraph-to-studio/) and it's ready to go into production, you can publish it to the decentralized network. +Once you have [deployed your Subgraph to Subgraph Studio](/deploying/deploying-a-subgraph-to-studio/) and it's ready to go into production, you can publish it to the decentralized network. -Wenn Sie einen Subgraphen im dezentralen Netzwerk veröffentlichen, stellen Sie ihn für andere zur Verfügung: +When you publish a Subgraph to the decentralized network, you make it available for: - [Curators](/resources/roles/curating/) to begin curating it. - [Indexers](/indexing/overview/) to begin indexing it. @@ -17,33 +18,33 @@ Check out the list of [supported networks](/supported-networks/). 1. Go to the [Subgraph Studio](https://thegraph.com/studio/) dashboard 2. Click on the **Publish** button -3. Your subgraph will now be visible in [Graph Explorer](https://thegraph.com/explorer/). +3. Your Subgraph will now be visible in [Graph Explorer](https://thegraph.com/explorer/). -Alle veröffentlichten Versionen eines bestehenden Subgraphen können: +All published versions of an existing Subgraph can: - Be published to Arbitrum One. [Learn more about The Graph Network on Arbitrum](/archived/arbitrum/arbitrum-faq/). -- Index data on any of the [supported networks](/supported-networks/), regardless of the network on which the subgraph was published. +- Index data on any of the [supported networks](/supported-networks/), regardless of the network on which the Subgraph was published. -### Aktualisierung der Metadaten für einen veröffentlichten Subgraphen +### Updating metadata for a published Subgraph -- Nachdem Sie Ihren Subgraphen im dezentralen Netzwerk veröffentlicht haben, können Sie die Metadaten jederzeit in Subgraph Studio aktualisieren. +- After publishing your Subgraph to the decentralized network, you can update the metadata anytime in Subgraph Studio. - Sobald Sie Ihre Änderungen gespeichert und die Aktualisierungen veröffentlicht haben, werden sie im Graph Explorer angezeigt. - Es ist wichtig zu beachten, dass bei diesem Vorgang keine neue Version erstellt wird, da sich Ihre Bereitstellung nicht geändert hat. ## Veröffentlichen über die CLI -As of version 0.73.0, you can also publish your subgraph with the [`graph-cli`](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). +As of version 0.73.0, you can also publish your Subgraph with the [`graph-cli`](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). 1. Öffnen Sie den `graph-cli`. 2. Use the following commands: `graph codegen && graph build` then `graph publish`. -3. Es öffnet sich ein Fenster, in dem Sie Ihre Wallet verbinden, Metadaten hinzufügen und Ihren fertigen Subgraphen in einem Netzwerk Ihrer Wahl bereitstellen können. +3. A window will open, allowing you to connect your wallet, add metadata, and deploy your finalized Subgraph to a network of your choice. ![cli-ui](/img/cli-ui.png) ### Anpassen Ihrer Bereitstellung -Sie können Ihre Subgraph-Erstellung auf einen bestimmten IPFS-Knoten hochladen und Ihre Bereitstellung mit den folgenden Flags weiter anpassen: +You can upload your Subgraph build to a specific IPFS node and further customize your deployment with the following flags: ``` USAGE @@ -61,33 +62,33 @@ FLAGS ``` -## Hinzufügen von Signalen zu Ihrem Subgraphen +## Adding signal to your Subgraph -Entwickler können ihren Subgraphen ein GRT-Signal hinzufügen, um Indexer zur Abfrage des Subgraphen zu veranlassen. +Developers can add GRT signal to their Subgraphs to incentivize Indexers to query the Subgraph. -- Wenn ein Subgraph für Indexing Rewards in Frage kommt, erhalten Indexer, die einen „Beweis für die Indizierung“ erbringen, einen GRT Reward, der sich nach der Menge der signalisierten GRT richtet. +- If a Subgraph is eligible for indexing rewards, Indexers who provide a "proof of indexing" will receive a GRT reward, based on the amount of GRT signalled. -- You can check indexing reward eligibility based on subgraph feature usage [here](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md). +- You can check indexing reward eligibility based on Subgraph feature usage [here](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md). - Specific supported networks can be checked [here](/supported-networks/). -> Das Hinzufügen von Signalen zu einem Subgraphen, der nicht für Rewards in Frage kommt, zieht keine weiteren Indexer an. +> Adding signal to a Subgraph which is not eligible for rewards will not attract additional Indexers. > -> Wenn Ihr Subgraph für Rewards in Frage kommt, wird empfohlen, dass Sie Ihren eigenen Subgraphen mit mindestens 3.000 GRT kuratieren, um zusätzliche Indexer für die Indizierung Ihres Subgraphen zu gewinnen. +> If your Subgraph is eligible for rewards, it is recommended that you curate your own Subgraph with at least 3,000 GRT in order to attract additional indexers to index your Subgraph. -The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all subgraphs. However, signaling GRT on a particular subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. +The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all Subgraphs. However, signaling GRT on a particular Subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. -Bei der Signalisierung können Kuratoren entscheiden, ob sie für eine bestimmte Version des Subgraphen signalisieren wollen oder ob sie die automatische Migration verwenden wollen. Bei der automatischen Migration werden die Freigaben eines Kurators immer auf die neueste vom Entwickler veröffentlichte Version aktualisiert. Wenn sie sich stattdessen für eine bestimmte Version entscheiden, bleiben die Freigaben immer auf dieser spezifischen Version. +When signaling, Curators can decide to signal on a specific version of the Subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. -Indexer können Subgraphen für die Indizierung auf der Grundlage von Kurationssignalen finden, die sie im Graph Explorer sehen. +Indexers can find Subgraphs to index based on curation signals they see in Graph Explorer. -![Explorer-Subgrafen](/img/explorer-subgraphs.png) +![Explorer Subgraphs](/img/explorer-subgraphs.png) -Mit Subgraph Studio können Sie Ihrem Subgraphen ein Signal hinzufügen, indem Sie GRT in der gleichen Transaktion, in der es veröffentlicht wird, zum Kurationspool Ihres Subgraphen hinzufügen. +Subgraph Studio enables you to add signal to your Subgraph by adding GRT to your Subgraph's curation pool in the same transaction it is published. ![Curation Pool](/img/curate-own-subgraph-tx.png) -Alternativ können Sie ein GRT-Signal zu einem veröffentlichten Subgraphen aus dem Graph Explorer hinzufügen. +Alternatively, you can add GRT signal to a published Subgraph from Graph Explorer. ![Signal provenant de l'Explorer](/img/signal-from-explorer.png) From 4c836ebcf40e7820a5a754e2d7d4c61bfefbf863 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:24:49 -0500 Subject: [PATCH 0940/1789] New translations publishing-a-subgraph.mdx (Italian) --- .../publishing/publishing-a-subgraph.mdx | 45 ++++++++++--------- 1 file changed, 23 insertions(+), 22 deletions(-) diff --git a/website/src/pages/it/subgraphs/developing/publishing/publishing-a-subgraph.mdx b/website/src/pages/it/subgraphs/developing/publishing/publishing-a-subgraph.mdx index 8706691669d1..1672a6619d13 100644 --- a/website/src/pages/it/subgraphs/developing/publishing/publishing-a-subgraph.mdx +++ b/website/src/pages/it/subgraphs/developing/publishing/publishing-a-subgraph.mdx @@ -1,10 +1,11 @@ --- title: Pubblicare un subgraph nella rete decentralizzata +sidebarTitle: Publishing to the Decentralized Network --- -Once you have [deployed your subgraph to Subgraph Studio](/deploying/deploying-a-subgraph-to-studio/) and it's ready to go into production, you can publish it to the decentralized network. +Once you have [deployed your Subgraph to Subgraph Studio](/deploying/deploying-a-subgraph-to-studio/) and it's ready to go into production, you can publish it to the decentralized network. -When you publish a subgraph to the decentralized network, you make it available for: +When you publish a Subgraph to the decentralized network, you make it available for: - [Curators](/resources/roles/curating/) to begin curating it. - [Indexers](/indexing/overview/) to begin indexing it. @@ -17,33 +18,33 @@ Check out the list of [supported networks](/supported-networks/). 1. Go to the [Subgraph Studio](https://thegraph.com/studio/) dashboard 2. Click on the **Publish** button -3. Your subgraph will now be visible in [Graph Explorer](https://thegraph.com/explorer/). +3. Your Subgraph will now be visible in [Graph Explorer](https://thegraph.com/explorer/). -All published versions of an existing subgraph can: +All published versions of an existing Subgraph can: - Be published to Arbitrum One. [Learn more about The Graph Network on Arbitrum](/archived/arbitrum/arbitrum-faq/). -- Index data on any of the [supported networks](/supported-networks/), regardless of the network on which the subgraph was published. +- Index data on any of the [supported networks](/supported-networks/), regardless of the network on which the Subgraph was published. -### Aggiornamento dei metadati per un subgraph pubblicato +### Updating metadata for a published Subgraph -- After publishing your subgraph to the decentralized network, you can update the metadata anytime in Subgraph Studio. +- After publishing your Subgraph to the decentralized network, you can update the metadata anytime in Subgraph Studio. - Once you’ve saved your changes and published the updates, they will appear in Graph Explorer. - It's important to note that this process will not create a new version since your deployment has not changed. ## Publishing from the CLI -As of version 0.73.0, you can also publish your subgraph with the [`graph-cli`](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). +As of version 0.73.0, you can also publish your Subgraph with the [`graph-cli`](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). 1. Open the `graph-cli`. 2. Use the following commands: `graph codegen && graph build` then `graph publish`. -3. A window will open, allowing you to connect your wallet, add metadata, and deploy your finalized subgraph to a network of your choice. +3. A window will open, allowing you to connect your wallet, add metadata, and deploy your finalized Subgraph to a network of your choice. ![cli-ui](/img/cli-ui.png) ### Customizing your deployment -You can upload your subgraph build to a specific IPFS node and further customize your deployment with the following flags: +You can upload your Subgraph build to a specific IPFS node and further customize your deployment with the following flags: ``` USAGE @@ -61,33 +62,33 @@ FLAGS ``` -## Adding signal to your subgraph +## Adding signal to your Subgraph -Developers can add GRT signal to their subgraphs to incentivize Indexers to query the subgraph. +Developers can add GRT signal to their Subgraphs to incentivize Indexers to query the Subgraph. -- If a subgraph is eligible for indexing rewards, Indexers who provide a "proof of indexing" will receive a GRT reward, based on the amount of GRT signalled. +- If a Subgraph is eligible for indexing rewards, Indexers who provide a "proof of indexing" will receive a GRT reward, based on the amount of GRT signalled. -- You can check indexing reward eligibility based on subgraph feature usage [here](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md). +- You can check indexing reward eligibility based on Subgraph feature usage [here](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md). - Specific supported networks can be checked [here](/supported-networks/). -> Adding signal to a subgraph which is not eligible for rewards will not attract additional Indexers. +> Adding signal to a Subgraph which is not eligible for rewards will not attract additional Indexers. > -> If your subgraph is eligible for rewards, it is recommended that you curate your own subgraph with at least 3,000 GRT in order to attract additional indexers to index your subgraph. +> If your Subgraph is eligible for rewards, it is recommended that you curate your own Subgraph with at least 3,000 GRT in order to attract additional indexers to index your Subgraph. -The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all subgraphs. However, signaling GRT on a particular subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. +The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all Subgraphs. However, signaling GRT on a particular Subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. -When signaling, Curators can decide to signal on a specific version of the subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. +When signaling, Curators can decide to signal on a specific version of the Subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. -Indexers can find subgraphs to index based on curation signals they see in Graph Explorer. +Indexers can find Subgraphs to index based on curation signals they see in Graph Explorer. -![Explorer subgraphs](/img/explorer-subgraphs.png) +![Explorer Subgraphs](/img/explorer-subgraphs.png) -Subgraph Studio enables you to add signal to your subgraph by adding GRT to your subgraph's curation pool in the same transaction it is published. +Subgraph Studio enables you to add signal to your Subgraph by adding GRT to your Subgraph's curation pool in the same transaction it is published. ![Curation Pool](/img/curate-own-subgraph-tx.png) -Alternatively, you can add GRT signal to a published subgraph from Graph Explorer. +Alternatively, you can add GRT signal to a published Subgraph from Graph Explorer. ![Signal from Explorer](/img/signal-from-explorer.png) From 389700a59b72752efb13df2c291ec28bc5ba78e2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:24:50 -0500 Subject: [PATCH 0941/1789] New translations publishing-a-subgraph.mdx (Japanese) --- .../publishing/publishing-a-subgraph.mdx | 45 ++++++++++--------- 1 file changed, 23 insertions(+), 22 deletions(-) diff --git a/website/src/pages/ja/subgraphs/developing/publishing/publishing-a-subgraph.mdx b/website/src/pages/ja/subgraphs/developing/publishing/publishing-a-subgraph.mdx index f9d92cf7d0d9..c26672ec6b84 100644 --- a/website/src/pages/ja/subgraphs/developing/publishing/publishing-a-subgraph.mdx +++ b/website/src/pages/ja/subgraphs/developing/publishing/publishing-a-subgraph.mdx @@ -1,10 +1,11 @@ --- title: 分散型ネットワークへのサブグラフの公開 +sidebarTitle: Publishing to the Decentralized Network --- -Once you have [deployed your subgraph to Subgraph Studio](/deploying/deploying-a-subgraph-to-studio/) and it's ready to go into production, you can publish it to the decentralized network. +Once you have [deployed your Subgraph to Subgraph Studio](/deploying/deploying-a-subgraph-to-studio/) and it's ready to go into production, you can publish it to the decentralized network. -When you publish a subgraph to the decentralized network, you make it available for: +When you publish a Subgraph to the decentralized network, you make it available for: - [Curators](/resources/roles/curating/) to begin curating it. - [Indexers](/indexing/overview/) to begin indexing it. @@ -17,33 +18,33 @@ Check out the list of [supported networks](/supported-networks/). 1. Go to the [Subgraph Studio](https://thegraph.com/studio/) dashboard 2. Click on the **Publish** button -3. Your subgraph will now be visible in [Graph Explorer](https://thegraph.com/explorer/). +3. Your Subgraph will now be visible in [Graph Explorer](https://thegraph.com/explorer/). -All published versions of an existing subgraph can: +All published versions of an existing Subgraph can: - Be published to Arbitrum One. [Learn more about The Graph Network on Arbitrum](/archived/arbitrum/arbitrum-faq/). -- Index data on any of the [supported networks](/supported-networks/), regardless of the network on which the subgraph was published. +- Index data on any of the [supported networks](/supported-networks/), regardless of the network on which the Subgraph was published. -### パブリッシュされたサブグラフのメタデータの更新 +### Updating metadata for a published Subgraph -- After publishing your subgraph to the decentralized network, you can update the metadata anytime in Subgraph Studio. +- After publishing your Subgraph to the decentralized network, you can update the metadata anytime in Subgraph Studio. - Once you’ve saved your changes and published the updates, they will appear in Graph Explorer. - It's important to note that this process will not create a new version since your deployment has not changed. ## Publishing from the CLI -As of version 0.73.0, you can also publish your subgraph with the [`graph-cli`](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). +As of version 0.73.0, you can also publish your Subgraph with the [`graph-cli`](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). 1. Open the `graph-cli`. 2. Use the following commands: `graph codegen && graph build` then `graph publish`. -3. A window will open, allowing you to connect your wallet, add metadata, and deploy your finalized subgraph to a network of your choice. +3. A window will open, allowing you to connect your wallet, add metadata, and deploy your finalized Subgraph to a network of your choice. ![cli-ui](/img/cli-ui.png) ### Customizing your deployment -You can upload your subgraph build to a specific IPFS node and further customize your deployment with the following flags: +You can upload your Subgraph build to a specific IPFS node and further customize your deployment with the following flags: ``` USAGE @@ -61,33 +62,33 @@ FLAGS ``` -## Adding signal to your subgraph +## Adding signal to your Subgraph -Developers can add GRT signal to their subgraphs to incentivize Indexers to query the subgraph. +Developers can add GRT signal to their Subgraphs to incentivize Indexers to query the Subgraph. -- If a subgraph is eligible for indexing rewards, Indexers who provide a "proof of indexing" will receive a GRT reward, based on the amount of GRT signalled. +- If a Subgraph is eligible for indexing rewards, Indexers who provide a "proof of indexing" will receive a GRT reward, based on the amount of GRT signalled. -- You can check indexing reward eligibility based on subgraph feature usage [here](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md). +- You can check indexing reward eligibility based on Subgraph feature usage [here](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md). - Specific supported networks can be checked [here](/supported-networks/). -> Adding signal to a subgraph which is not eligible for rewards will not attract additional Indexers. +> Adding signal to a Subgraph which is not eligible for rewards will not attract additional Indexers. > -> If your subgraph is eligible for rewards, it is recommended that you curate your own subgraph with at least 3,000 GRT in order to attract additional indexers to index your subgraph. +> If your Subgraph is eligible for rewards, it is recommended that you curate your own Subgraph with at least 3,000 GRT in order to attract additional indexers to index your Subgraph. -The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all subgraphs. However, signaling GRT on a particular subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. +The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all Subgraphs. However, signaling GRT on a particular Subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. -When signaling, Curators can decide to signal on a specific version of the subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. +When signaling, Curators can decide to signal on a specific version of the Subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. -Indexers can find subgraphs to index based on curation signals they see in Graph Explorer. +Indexers can find Subgraphs to index based on curation signals they see in Graph Explorer. -![Explorer subgraphs](/img/explorer-subgraphs.png) +![Explorer Subgraphs](/img/explorer-subgraphs.png) -Subgraph Studio enables you to add signal to your subgraph by adding GRT to your subgraph's curation pool in the same transaction it is published. +Subgraph Studio enables you to add signal to your Subgraph by adding GRT to your Subgraph's curation pool in the same transaction it is published. ![Curation Pool](/img/curate-own-subgraph-tx.png) -Alternatively, you can add GRT signal to a published subgraph from Graph Explorer. +Alternatively, you can add GRT signal to a published Subgraph from Graph Explorer. ![Signal from Explorer](/img/signal-from-explorer.png) From 40d1f905183c3f59d0fffca1b0630f79bfb8c985 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:24:51 -0500 Subject: [PATCH 0942/1789] New translations publishing-a-subgraph.mdx (Korean) --- .../publishing/publishing-a-subgraph.mdx | 45 ++++++++++--------- 1 file changed, 23 insertions(+), 22 deletions(-) diff --git a/website/src/pages/ko/subgraphs/developing/publishing/publishing-a-subgraph.mdx b/website/src/pages/ko/subgraphs/developing/publishing/publishing-a-subgraph.mdx index dca943ad3152..2bc0ec5f514c 100644 --- a/website/src/pages/ko/subgraphs/developing/publishing/publishing-a-subgraph.mdx +++ b/website/src/pages/ko/subgraphs/developing/publishing/publishing-a-subgraph.mdx @@ -1,10 +1,11 @@ --- title: Publishing a Subgraph to the Decentralized Network +sidebarTitle: Publishing to the Decentralized Network --- -Once you have [deployed your subgraph to Subgraph Studio](/deploying/deploying-a-subgraph-to-studio/) and it's ready to go into production, you can publish it to the decentralized network. +Once you have [deployed your Subgraph to Subgraph Studio](/deploying/deploying-a-subgraph-to-studio/) and it's ready to go into production, you can publish it to the decentralized network. -When you publish a subgraph to the decentralized network, you make it available for: +When you publish a Subgraph to the decentralized network, you make it available for: - [Curators](/resources/roles/curating/) to begin curating it. - [Indexers](/indexing/overview/) to begin indexing it. @@ -17,33 +18,33 @@ Check out the list of [supported networks](/supported-networks/). 1. Go to the [Subgraph Studio](https://thegraph.com/studio/) dashboard 2. Click on the **Publish** button -3. Your subgraph will now be visible in [Graph Explorer](https://thegraph.com/explorer/). +3. Your Subgraph will now be visible in [Graph Explorer](https://thegraph.com/explorer/). -All published versions of an existing subgraph can: +All published versions of an existing Subgraph can: - Be published to Arbitrum One. [Learn more about The Graph Network on Arbitrum](/archived/arbitrum/arbitrum-faq/). -- Index data on any of the [supported networks](/supported-networks/), regardless of the network on which the subgraph was published. +- Index data on any of the [supported networks](/supported-networks/), regardless of the network on which the Subgraph was published. -### Updating metadata for a published subgraph +### Updating metadata for a published Subgraph -- After publishing your subgraph to the decentralized network, you can update the metadata anytime in Subgraph Studio. +- After publishing your Subgraph to the decentralized network, you can update the metadata anytime in Subgraph Studio. - Once you’ve saved your changes and published the updates, they will appear in Graph Explorer. - It's important to note that this process will not create a new version since your deployment has not changed. ## Publishing from the CLI -As of version 0.73.0, you can also publish your subgraph with the [`graph-cli`](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). +As of version 0.73.0, you can also publish your Subgraph with the [`graph-cli`](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). 1. Open the `graph-cli`. 2. Use the following commands: `graph codegen && graph build` then `graph publish`. -3. A window will open, allowing you to connect your wallet, add metadata, and deploy your finalized subgraph to a network of your choice. +3. A window will open, allowing you to connect your wallet, add metadata, and deploy your finalized Subgraph to a network of your choice. ![cli-ui](/img/cli-ui.png) ### Customizing your deployment -You can upload your subgraph build to a specific IPFS node and further customize your deployment with the following flags: +You can upload your Subgraph build to a specific IPFS node and further customize your deployment with the following flags: ``` USAGE @@ -61,33 +62,33 @@ FLAGS ``` -## Adding signal to your subgraph +## Adding signal to your Subgraph -Developers can add GRT signal to their subgraphs to incentivize Indexers to query the subgraph. +Developers can add GRT signal to their Subgraphs to incentivize Indexers to query the Subgraph. -- If a subgraph is eligible for indexing rewards, Indexers who provide a "proof of indexing" will receive a GRT reward, based on the amount of GRT signalled. +- If a Subgraph is eligible for indexing rewards, Indexers who provide a "proof of indexing" will receive a GRT reward, based on the amount of GRT signalled. -- You can check indexing reward eligibility based on subgraph feature usage [here](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md). +- You can check indexing reward eligibility based on Subgraph feature usage [here](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md). - Specific supported networks can be checked [here](/supported-networks/). -> Adding signal to a subgraph which is not eligible for rewards will not attract additional Indexers. +> Adding signal to a Subgraph which is not eligible for rewards will not attract additional Indexers. > -> If your subgraph is eligible for rewards, it is recommended that you curate your own subgraph with at least 3,000 GRT in order to attract additional indexers to index your subgraph. +> If your Subgraph is eligible for rewards, it is recommended that you curate your own Subgraph with at least 3,000 GRT in order to attract additional indexers to index your Subgraph. -The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all subgraphs. However, signaling GRT on a particular subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. +The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all Subgraphs. However, signaling GRT on a particular Subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. -When signaling, Curators can decide to signal on a specific version of the subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. +When signaling, Curators can decide to signal on a specific version of the Subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. -Indexers can find subgraphs to index based on curation signals they see in Graph Explorer. +Indexers can find Subgraphs to index based on curation signals they see in Graph Explorer. -![Explorer subgraphs](/img/explorer-subgraphs.png) +![Explorer Subgraphs](/img/explorer-subgraphs.png) -Subgraph Studio enables you to add signal to your subgraph by adding GRT to your subgraph's curation pool in the same transaction it is published. +Subgraph Studio enables you to add signal to your Subgraph by adding GRT to your Subgraph's curation pool in the same transaction it is published. ![Curation Pool](/img/curate-own-subgraph-tx.png) -Alternatively, you can add GRT signal to a published subgraph from Graph Explorer. +Alternatively, you can add GRT signal to a published Subgraph from Graph Explorer. ![Signal from Explorer](/img/signal-from-explorer.png) From c28d85aa6495d8f64cce7231b62ceb8c9985e012 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:24:52 -0500 Subject: [PATCH 0943/1789] New translations publishing-a-subgraph.mdx (Dutch) --- .../publishing/publishing-a-subgraph.mdx | 45 ++++++++++--------- 1 file changed, 23 insertions(+), 22 deletions(-) diff --git a/website/src/pages/nl/subgraphs/developing/publishing/publishing-a-subgraph.mdx b/website/src/pages/nl/subgraphs/developing/publishing/publishing-a-subgraph.mdx index dca943ad3152..2bc0ec5f514c 100644 --- a/website/src/pages/nl/subgraphs/developing/publishing/publishing-a-subgraph.mdx +++ b/website/src/pages/nl/subgraphs/developing/publishing/publishing-a-subgraph.mdx @@ -1,10 +1,11 @@ --- title: Publishing a Subgraph to the Decentralized Network +sidebarTitle: Publishing to the Decentralized Network --- -Once you have [deployed your subgraph to Subgraph Studio](/deploying/deploying-a-subgraph-to-studio/) and it's ready to go into production, you can publish it to the decentralized network. +Once you have [deployed your Subgraph to Subgraph Studio](/deploying/deploying-a-subgraph-to-studio/) and it's ready to go into production, you can publish it to the decentralized network. -When you publish a subgraph to the decentralized network, you make it available for: +When you publish a Subgraph to the decentralized network, you make it available for: - [Curators](/resources/roles/curating/) to begin curating it. - [Indexers](/indexing/overview/) to begin indexing it. @@ -17,33 +18,33 @@ Check out the list of [supported networks](/supported-networks/). 1. Go to the [Subgraph Studio](https://thegraph.com/studio/) dashboard 2. Click on the **Publish** button -3. Your subgraph will now be visible in [Graph Explorer](https://thegraph.com/explorer/). +3. Your Subgraph will now be visible in [Graph Explorer](https://thegraph.com/explorer/). -All published versions of an existing subgraph can: +All published versions of an existing Subgraph can: - Be published to Arbitrum One. [Learn more about The Graph Network on Arbitrum](/archived/arbitrum/arbitrum-faq/). -- Index data on any of the [supported networks](/supported-networks/), regardless of the network on which the subgraph was published. +- Index data on any of the [supported networks](/supported-networks/), regardless of the network on which the Subgraph was published. -### Updating metadata for a published subgraph +### Updating metadata for a published Subgraph -- After publishing your subgraph to the decentralized network, you can update the metadata anytime in Subgraph Studio. +- After publishing your Subgraph to the decentralized network, you can update the metadata anytime in Subgraph Studio. - Once you’ve saved your changes and published the updates, they will appear in Graph Explorer. - It's important to note that this process will not create a new version since your deployment has not changed. ## Publishing from the CLI -As of version 0.73.0, you can also publish your subgraph with the [`graph-cli`](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). +As of version 0.73.0, you can also publish your Subgraph with the [`graph-cli`](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). 1. Open the `graph-cli`. 2. Use the following commands: `graph codegen && graph build` then `graph publish`. -3. A window will open, allowing you to connect your wallet, add metadata, and deploy your finalized subgraph to a network of your choice. +3. A window will open, allowing you to connect your wallet, add metadata, and deploy your finalized Subgraph to a network of your choice. ![cli-ui](/img/cli-ui.png) ### Customizing your deployment -You can upload your subgraph build to a specific IPFS node and further customize your deployment with the following flags: +You can upload your Subgraph build to a specific IPFS node and further customize your deployment with the following flags: ``` USAGE @@ -61,33 +62,33 @@ FLAGS ``` -## Adding signal to your subgraph +## Adding signal to your Subgraph -Developers can add GRT signal to their subgraphs to incentivize Indexers to query the subgraph. +Developers can add GRT signal to their Subgraphs to incentivize Indexers to query the Subgraph. -- If a subgraph is eligible for indexing rewards, Indexers who provide a "proof of indexing" will receive a GRT reward, based on the amount of GRT signalled. +- If a Subgraph is eligible for indexing rewards, Indexers who provide a "proof of indexing" will receive a GRT reward, based on the amount of GRT signalled. -- You can check indexing reward eligibility based on subgraph feature usage [here](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md). +- You can check indexing reward eligibility based on Subgraph feature usage [here](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md). - Specific supported networks can be checked [here](/supported-networks/). -> Adding signal to a subgraph which is not eligible for rewards will not attract additional Indexers. +> Adding signal to a Subgraph which is not eligible for rewards will not attract additional Indexers. > -> If your subgraph is eligible for rewards, it is recommended that you curate your own subgraph with at least 3,000 GRT in order to attract additional indexers to index your subgraph. +> If your Subgraph is eligible for rewards, it is recommended that you curate your own Subgraph with at least 3,000 GRT in order to attract additional indexers to index your Subgraph. -The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all subgraphs. However, signaling GRT on a particular subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. +The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all Subgraphs. However, signaling GRT on a particular Subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. -When signaling, Curators can decide to signal on a specific version of the subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. +When signaling, Curators can decide to signal on a specific version of the Subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. -Indexers can find subgraphs to index based on curation signals they see in Graph Explorer. +Indexers can find Subgraphs to index based on curation signals they see in Graph Explorer. -![Explorer subgraphs](/img/explorer-subgraphs.png) +![Explorer Subgraphs](/img/explorer-subgraphs.png) -Subgraph Studio enables you to add signal to your subgraph by adding GRT to your subgraph's curation pool in the same transaction it is published. +Subgraph Studio enables you to add signal to your Subgraph by adding GRT to your Subgraph's curation pool in the same transaction it is published. ![Curation Pool](/img/curate-own-subgraph-tx.png) -Alternatively, you can add GRT signal to a published subgraph from Graph Explorer. +Alternatively, you can add GRT signal to a published Subgraph from Graph Explorer. ![Signal from Explorer](/img/signal-from-explorer.png) From a95bec58cbc36a545f235c80309c7d3b45fd28b3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:24:53 -0500 Subject: [PATCH 0944/1789] New translations publishing-a-subgraph.mdx (Polish) --- .../publishing/publishing-a-subgraph.mdx | 45 ++++++++++--------- 1 file changed, 23 insertions(+), 22 deletions(-) diff --git a/website/src/pages/pl/subgraphs/developing/publishing/publishing-a-subgraph.mdx b/website/src/pages/pl/subgraphs/developing/publishing/publishing-a-subgraph.mdx index dca943ad3152..2bc0ec5f514c 100644 --- a/website/src/pages/pl/subgraphs/developing/publishing/publishing-a-subgraph.mdx +++ b/website/src/pages/pl/subgraphs/developing/publishing/publishing-a-subgraph.mdx @@ -1,10 +1,11 @@ --- title: Publishing a Subgraph to the Decentralized Network +sidebarTitle: Publishing to the Decentralized Network --- -Once you have [deployed your subgraph to Subgraph Studio](/deploying/deploying-a-subgraph-to-studio/) and it's ready to go into production, you can publish it to the decentralized network. +Once you have [deployed your Subgraph to Subgraph Studio](/deploying/deploying-a-subgraph-to-studio/) and it's ready to go into production, you can publish it to the decentralized network. -When you publish a subgraph to the decentralized network, you make it available for: +When you publish a Subgraph to the decentralized network, you make it available for: - [Curators](/resources/roles/curating/) to begin curating it. - [Indexers](/indexing/overview/) to begin indexing it. @@ -17,33 +18,33 @@ Check out the list of [supported networks](/supported-networks/). 1. Go to the [Subgraph Studio](https://thegraph.com/studio/) dashboard 2. Click on the **Publish** button -3. Your subgraph will now be visible in [Graph Explorer](https://thegraph.com/explorer/). +3. Your Subgraph will now be visible in [Graph Explorer](https://thegraph.com/explorer/). -All published versions of an existing subgraph can: +All published versions of an existing Subgraph can: - Be published to Arbitrum One. [Learn more about The Graph Network on Arbitrum](/archived/arbitrum/arbitrum-faq/). -- Index data on any of the [supported networks](/supported-networks/), regardless of the network on which the subgraph was published. +- Index data on any of the [supported networks](/supported-networks/), regardless of the network on which the Subgraph was published. -### Updating metadata for a published subgraph +### Updating metadata for a published Subgraph -- After publishing your subgraph to the decentralized network, you can update the metadata anytime in Subgraph Studio. +- After publishing your Subgraph to the decentralized network, you can update the metadata anytime in Subgraph Studio. - Once you’ve saved your changes and published the updates, they will appear in Graph Explorer. - It's important to note that this process will not create a new version since your deployment has not changed. ## Publishing from the CLI -As of version 0.73.0, you can also publish your subgraph with the [`graph-cli`](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). +As of version 0.73.0, you can also publish your Subgraph with the [`graph-cli`](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). 1. Open the `graph-cli`. 2. Use the following commands: `graph codegen && graph build` then `graph publish`. -3. A window will open, allowing you to connect your wallet, add metadata, and deploy your finalized subgraph to a network of your choice. +3. A window will open, allowing you to connect your wallet, add metadata, and deploy your finalized Subgraph to a network of your choice. ![cli-ui](/img/cli-ui.png) ### Customizing your deployment -You can upload your subgraph build to a specific IPFS node and further customize your deployment with the following flags: +You can upload your Subgraph build to a specific IPFS node and further customize your deployment with the following flags: ``` USAGE @@ -61,33 +62,33 @@ FLAGS ``` -## Adding signal to your subgraph +## Adding signal to your Subgraph -Developers can add GRT signal to their subgraphs to incentivize Indexers to query the subgraph. +Developers can add GRT signal to their Subgraphs to incentivize Indexers to query the Subgraph. -- If a subgraph is eligible for indexing rewards, Indexers who provide a "proof of indexing" will receive a GRT reward, based on the amount of GRT signalled. +- If a Subgraph is eligible for indexing rewards, Indexers who provide a "proof of indexing" will receive a GRT reward, based on the amount of GRT signalled. -- You can check indexing reward eligibility based on subgraph feature usage [here](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md). +- You can check indexing reward eligibility based on Subgraph feature usage [here](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md). - Specific supported networks can be checked [here](/supported-networks/). -> Adding signal to a subgraph which is not eligible for rewards will not attract additional Indexers. +> Adding signal to a Subgraph which is not eligible for rewards will not attract additional Indexers. > -> If your subgraph is eligible for rewards, it is recommended that you curate your own subgraph with at least 3,000 GRT in order to attract additional indexers to index your subgraph. +> If your Subgraph is eligible for rewards, it is recommended that you curate your own Subgraph with at least 3,000 GRT in order to attract additional indexers to index your Subgraph. -The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all subgraphs. However, signaling GRT on a particular subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. +The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all Subgraphs. However, signaling GRT on a particular Subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. -When signaling, Curators can decide to signal on a specific version of the subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. +When signaling, Curators can decide to signal on a specific version of the Subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. -Indexers can find subgraphs to index based on curation signals they see in Graph Explorer. +Indexers can find Subgraphs to index based on curation signals they see in Graph Explorer. -![Explorer subgraphs](/img/explorer-subgraphs.png) +![Explorer Subgraphs](/img/explorer-subgraphs.png) -Subgraph Studio enables you to add signal to your subgraph by adding GRT to your subgraph's curation pool in the same transaction it is published. +Subgraph Studio enables you to add signal to your Subgraph by adding GRT to your Subgraph's curation pool in the same transaction it is published. ![Curation Pool](/img/curate-own-subgraph-tx.png) -Alternatively, you can add GRT signal to a published subgraph from Graph Explorer. +Alternatively, you can add GRT signal to a published Subgraph from Graph Explorer. ![Signal from Explorer](/img/signal-from-explorer.png) From 669cc3eafe554b587fce04be457dc33320dba0e5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:24:54 -0500 Subject: [PATCH 0945/1789] New translations publishing-a-subgraph.mdx (Portuguese) --- .../publishing/publishing-a-subgraph.mdx | 45 ++++++++++--------- 1 file changed, 23 insertions(+), 22 deletions(-) diff --git a/website/src/pages/pt/subgraphs/developing/publishing/publishing-a-subgraph.mdx b/website/src/pages/pt/subgraphs/developing/publishing/publishing-a-subgraph.mdx index ad08b1c68cf8..9c6ab402e3a5 100644 --- a/website/src/pages/pt/subgraphs/developing/publishing/publishing-a-subgraph.mdx +++ b/website/src/pages/pt/subgraphs/developing/publishing/publishing-a-subgraph.mdx @@ -1,10 +1,11 @@ --- title: Como Editar um Subgraph na Rede Descentralizada +sidebarTitle: Publishing to the Decentralized Network --- -Once you have [deployed your subgraph to Subgraph Studio](/deploying/deploying-a-subgraph-to-studio/) and it's ready to go into production, you can publish it to the decentralized network. +Once you have [deployed your Subgraph to Subgraph Studio](/deploying/deploying-a-subgraph-to-studio/) and it's ready to go into production, you can publish it to the decentralized network. -Ao editar um subgraph à rede descentralizada, ele será disponibilizado para: +When you publish a Subgraph to the decentralized network, you make it available for: - [Curators](/resources/roles/curating/) to begin curating it. - [Indexers](/indexing/overview/) to begin indexing it. @@ -17,33 +18,33 @@ Check out the list of [supported networks](/supported-networks/). 1. Go to the [Subgraph Studio](https://thegraph.com/studio/) dashboard 2. Click on the **Publish** button -3. Your subgraph will now be visible in [Graph Explorer](https://thegraph.com/explorer/). +3. Your Subgraph will now be visible in [Graph Explorer](https://thegraph.com/explorer/). -Todas as versões editadas de um subgraph existente podem: +All published versions of an existing Subgraph can: - Be published to Arbitrum One. [Learn more about The Graph Network on Arbitrum](/archived/arbitrum/arbitrum-faq/). -- Index data on any of the [supported networks](/supported-networks/), regardless of the network on which the subgraph was published. +- Index data on any of the [supported networks](/supported-networks/), regardless of the network on which the Subgraph was published. -### Como atualizar metadados para um subgraph editado +### Updating metadata for a published Subgraph -- Após editar o seu subgraph à rede descentralizada, será possível editar os metadados a qualquer hora no Subgraph Studio. +- After publishing your Subgraph to the decentralized network, you can update the metadata anytime in Subgraph Studio. - Após salvar as suas mudanças e publicar as atualizações, elas aparecerão no Graph Explorer. - É importante notar que este processo não criará uma nova versão, já que a sua edição não terá mudado. ## Publicação da CLI -As of version 0.73.0, you can also publish your subgraph with the [`graph-cli`](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). +As of version 0.73.0, you can also publish your Subgraph with the [`graph-cli`](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). 1. Open the `graph-cli`. 2. Use the following commands: `graph codegen && graph build` then `graph publish`. -3. Uma janela será aberta para o programador conectar a sua carteira, adicionar metadados e lançar o seu subgraph finalizado a uma rede de sua escolha. +3. A window will open, allowing you to connect your wallet, add metadata, and deploy your finalized Subgraph to a network of your choice. ![cli-ui](/img/cli-ui.png) ### Como personalizar o seu lançamento -É possível enviar a sua build a um node IPFS específico e personalizar ainda mais o seu lançamento com as seguintes flags: +You can upload your Subgraph build to a specific IPFS node and further customize your deployment with the following flags: ``` USAGE @@ -61,33 +62,33 @@ FLAGS ``` -## Como adicionar sinal ao seu subgraph +## Adding signal to your Subgraph -Programadores podem adicionar sinal de GRT aos seus subgraphs para incentivar Indexadores a consultarem o subgraph. +Developers can add GRT signal to their Subgraphs to incentivize Indexers to query the Subgraph. -- Se um subgraph for elegível para recompensas de indexação, Indexadores que providenciarem uma "prova de indexação" receberão uma recompensa em GRT com base na quantidade de GRT sinalizada. +- If a Subgraph is eligible for indexing rewards, Indexers who provide a "proof of indexing" will receive a GRT reward, based on the amount of GRT signalled. -- You can check indexing reward eligibility based on subgraph feature usage [here](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md). +- You can check indexing reward eligibility based on Subgraph feature usage [here](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md). - Specific supported networks can be checked [here](/supported-networks/). -> Adicionar sinais a um subgraph que não for elegível não atrairá mais Indexadores. +> Adding signal to a Subgraph which is not eligible for rewards will not attract additional Indexers. > -> Se o seu subgraph for elegível a recompensas, recomendamos que cure o seu próprio subgraph com, no mínimo, 3000 GRT para atrair mais Indexadores ao seu subgraph. +> If your Subgraph is eligible for rewards, it is recommended that you curate your own Subgraph with at least 3,000 GRT in order to attract additional indexers to index your Subgraph. -The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all subgraphs. However, signaling GRT on a particular subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. +The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all Subgraphs. However, signaling GRT on a particular Subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. -Ao sinalizar, Curadores podem decidir entre sinalizar numa versão específica do subgraph ou sinalizar com a automigração. Caso sinalizem com a automigração, as ações de um curador sempre serão atualizadas à versão mais recente publicada pelo programador. Se decidirem sinalizar numa versão específica, as ações sempre permanecerão nesta versão específica. +When signaling, Curators can decide to signal on a specific version of the Subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. -Os indexadores podem achar subgraphs para indexar com base em sinais de curadoria que veem no Graph Explorer. +Indexers can find Subgraphs to index based on curation signals they see in Graph Explorer. -![Subgraphs do Explorer](/img/explorer-subgraphs.png) +![Explorer Subgraphs](/img/explorer-subgraphs.png) -O Subgraph Studio lhe permite adicionar sinais ao seu subgraph ao adicionar GRT ao pool de curadoria na mesma transação em que são publicados. +Subgraph Studio enables you to add signal to your Subgraph by adding GRT to your Subgraph's curation pool in the same transaction it is published. ![Curation Pool](/img/curate-own-subgraph-tx.png) -Como alternativa, é possível adicionar sinais em GRT a um subgraph editado a partir do Graph Explorer. +Alternatively, you can add GRT signal to a published Subgraph from Graph Explorer. ![Signal from Explorer](/img/signal-from-explorer.png) From 30d2c77bc3bd291960bcc8b6a38a742fe79aad52 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:24:55 -0500 Subject: [PATCH 0946/1789] New translations publishing-a-subgraph.mdx (Russian) --- .../publishing/publishing-a-subgraph.mdx | 45 ++++++++++--------- 1 file changed, 23 insertions(+), 22 deletions(-) diff --git a/website/src/pages/ru/subgraphs/developing/publishing/publishing-a-subgraph.mdx b/website/src/pages/ru/subgraphs/developing/publishing/publishing-a-subgraph.mdx index bf789c87b2b0..812ee8576830 100644 --- a/website/src/pages/ru/subgraphs/developing/publishing/publishing-a-subgraph.mdx +++ b/website/src/pages/ru/subgraphs/developing/publishing/publishing-a-subgraph.mdx @@ -1,10 +1,11 @@ --- title: Публикация подграфа в децентрализованной сети +sidebarTitle: Publishing to the Decentralized Network --- -Once you have [deployed your subgraph to Subgraph Studio](/deploying/deploying-a-subgraph-to-studio/) and it's ready to go into production, you can publish it to the decentralized network. +Once you have [deployed your Subgraph to Subgraph Studio](/deploying/deploying-a-subgraph-to-studio/) and it's ready to go into production, you can publish it to the decentralized network. -Публикуя субграф в децентрализованной сети, Вы делаете его доступным для: +When you publish a Subgraph to the decentralized network, you make it available for: - [Curators](/resources/roles/curating/) to begin curating it. - [Indexers](/indexing/overview/) to begin indexing it. @@ -17,33 +18,33 @@ Check out the list of [supported networks](/supported-networks/). 1. Go to the [Subgraph Studio](https://thegraph.com/studio/) dashboard 2. Click on the **Publish** button -3. Your subgraph will now be visible in [Graph Explorer](https://thegraph.com/explorer/). +3. Your Subgraph will now be visible in [Graph Explorer](https://thegraph.com/explorer/). -Все опубликованные версии существующего субграфа могут: +All published versions of an existing Subgraph can: - Be published to Arbitrum One. [Learn more about The Graph Network on Arbitrum](/archived/arbitrum/arbitrum-faq/). -- Index data on any of the [supported networks](/supported-networks/), regardless of the network on which the subgraph was published. +- Index data on any of the [supported networks](/supported-networks/), regardless of the network on which the Subgraph was published. -### Обновление метаданных опубликованного субграфа +### Updating metadata for a published Subgraph -- После публикации своего субграфа в децентрализованной сети Вы можете в любое время обновить метаданные в Subgraph Studio. +- After publishing your Subgraph to the decentralized network, you can update the metadata anytime in Subgraph Studio. - После сохранения изменений и публикации обновлений они появятся в Graph Explorer. - Важно отметить, что этот процесс не приведет к созданию новой версии, поскольку Ваше развертывание не изменилось. ## Публикация с помощью CLI -As of version 0.73.0, you can also publish your subgraph with the [`graph-cli`](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). +As of version 0.73.0, you can also publish your Subgraph with the [`graph-cli`](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). 1. Откройте `graph-cli`. 2. Use the following commands: `graph codegen && graph build` then `graph publish`. -3. Откроется окно, где Вы сможете подключить свой кошелек, добавить метаданные и развернуть финализированный субграф в выбранной Вами сети. +3. A window will open, allowing you to connect your wallet, add metadata, and deploy your finalized Subgraph to a network of your choice. ![cli-ui](/img/cli-ui.png) ### Настройка Вашего развертывания -Вы можете загрузить сборку своего субграфа на конкретную ноду IPFS и дополнительно настроить развертывание с помощью следующих флагов: +You can upload your Subgraph build to a specific IPFS node and further customize your deployment with the following flags: ``` USAGE @@ -61,33 +62,33 @@ FLAGS ``` -## Добавление сигнала к Вашему субграфу +## Adding signal to your Subgraph -Разработчики могут добавлять сигнал GRT в свои субграфы, чтобы стимулировать Индексаторов запрашивать субграф. +Developers can add GRT signal to their Subgraphs to incentivize Indexers to query the Subgraph. -- Если субграф имеет право на вознаграждение за индексирование, Индексаторы, предоставившие «доказательство индексирования», получат вознаграждение GRT в зависимости от заявленной суммы GRT. +- If a Subgraph is eligible for indexing rewards, Indexers who provide a "proof of indexing" will receive a GRT reward, based on the amount of GRT signalled. -- You can check indexing reward eligibility based on subgraph feature usage [here](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md). +- You can check indexing reward eligibility based on Subgraph feature usage [here](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md). - Specific supported networks can be checked [here](/supported-networks/). -> Добавление сигнала в субграф, который не имеет права на получение вознаграждения, не привлечет дополнительных Индексаторов. +> Adding signal to a Subgraph which is not eligible for rewards will not attract additional Indexers. > -> Если Ваш субграф имеет право на получение вознаграждения, рекомендуется курировать собственный субграф, добавив как минимум 3,000 GRT, чтобы привлечь дополнительных Индексаторов для индексирования Вашего субграфа. +> If your Subgraph is eligible for rewards, it is recommended that you curate your own Subgraph with at least 3,000 GRT in order to attract additional indexers to index your Subgraph. -The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all subgraphs. However, signaling GRT on a particular subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. +The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all Subgraphs. However, signaling GRT on a particular Subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. -При подаче сигнала Кураторы могут решить подать сигнал на определенную версию субграфа или использовать автомиграцию. Если они подают сигнал с помощью автомиграции, доли куратора всегда будут обновляться до последней версии, опубликованной разработчиком. Если же они решат подать сигнал на определенную версию, доли всегда будут оставаться на этой конкретной версии. +When signaling, Curators can decide to signal on a specific version of the Subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. -Индексаторы могут находить субграфы для индексирования на основе сигналов курирования, которые они видят в Graph Explorer. +Indexers can find Subgraphs to index based on curation signals they see in Graph Explorer. -![Explorer subgraphs](/img/explorer-subgraphs.png) +![Explorer Subgraphs](/img/explorer-subgraphs.png) -Subgraph Studio позволяет Вам добавлять сигнал в Ваш субграф, добавляя GRT в пул курирования Вашего субграфа в той же транзакции, в которой он публикуется. +Subgraph Studio enables you to add signal to your Subgraph by adding GRT to your Subgraph's curation pool in the same transaction it is published. ![Curation Pool](/img/curate-own-subgraph-tx.png) -Кроме того, Вы можете добавить сигнал GRT к опубликованному субграфу из Graph Explorer. +Alternatively, you can add GRT signal to a published Subgraph from Graph Explorer. ![Signal from Explorer](/img/signal-from-explorer.png) From 4557be15123f11eacd4f5b35f2428ec1c2581220 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:24:56 -0500 Subject: [PATCH 0947/1789] New translations publishing-a-subgraph.mdx (Swedish) --- .../publishing/publishing-a-subgraph.mdx | 45 ++++++++++--------- 1 file changed, 23 insertions(+), 22 deletions(-) diff --git a/website/src/pages/sv/subgraphs/developing/publishing/publishing-a-subgraph.mdx b/website/src/pages/sv/subgraphs/developing/publishing/publishing-a-subgraph.mdx index 24079d30b9b4..e13f4a7f9f7c 100644 --- a/website/src/pages/sv/subgraphs/developing/publishing/publishing-a-subgraph.mdx +++ b/website/src/pages/sv/subgraphs/developing/publishing/publishing-a-subgraph.mdx @@ -1,10 +1,11 @@ --- title: Publicera en Subgraph på Det Decentraliserade Nätverket +sidebarTitle: Publishing to the Decentralized Network --- -Once you have [deployed your subgraph to Subgraph Studio](/deploying/deploying-a-subgraph-to-studio/) and it's ready to go into production, you can publish it to the decentralized network. +Once you have [deployed your Subgraph to Subgraph Studio](/deploying/deploying-a-subgraph-to-studio/) and it's ready to go into production, you can publish it to the decentralized network. -When you publish a subgraph to the decentralized network, you make it available for: +When you publish a Subgraph to the decentralized network, you make it available for: - [Curators](/resources/roles/curating/) to begin curating it. - [Indexers](/indexing/overview/) to begin indexing it. @@ -17,33 +18,33 @@ Check out the list of [supported networks](/supported-networks/). 1. Go to the [Subgraph Studio](https://thegraph.com/studio/) dashboard 2. Click on the **Publish** button -3. Your subgraph will now be visible in [Graph Explorer](https://thegraph.com/explorer/). +3. Your Subgraph will now be visible in [Graph Explorer](https://thegraph.com/explorer/). -All published versions of an existing subgraph can: +All published versions of an existing Subgraph can: - Be published to Arbitrum One. [Learn more about The Graph Network on Arbitrum](/archived/arbitrum/arbitrum-faq/). -- Index data on any of the [supported networks](/supported-networks/), regardless of the network on which the subgraph was published. +- Index data on any of the [supported networks](/supported-networks/), regardless of the network on which the Subgraph was published. -### Uppdatera metadata för en publicerad subgraph +### Updating metadata for a published Subgraph -- After publishing your subgraph to the decentralized network, you can update the metadata anytime in Subgraph Studio. +- After publishing your Subgraph to the decentralized network, you can update the metadata anytime in Subgraph Studio. - Once you’ve saved your changes and published the updates, they will appear in Graph Explorer. - It's important to note that this process will not create a new version since your deployment has not changed. ## Publishing from the CLI -As of version 0.73.0, you can also publish your subgraph with the [`graph-cli`](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). +As of version 0.73.0, you can also publish your Subgraph with the [`graph-cli`](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). 1. Open the `graph-cli`. 2. Use the following commands: `graph codegen && graph build` then `graph publish`. -3. A window will open, allowing you to connect your wallet, add metadata, and deploy your finalized subgraph to a network of your choice. +3. A window will open, allowing you to connect your wallet, add metadata, and deploy your finalized Subgraph to a network of your choice. ![cli-ui](/img/cli-ui.png) ### Customizing your deployment -You can upload your subgraph build to a specific IPFS node and further customize your deployment with the following flags: +You can upload your Subgraph build to a specific IPFS node and further customize your deployment with the following flags: ``` USAGE @@ -61,33 +62,33 @@ FLAGS ``` -## Adding signal to your subgraph +## Adding signal to your Subgraph -Developers can add GRT signal to their subgraphs to incentivize Indexers to query the subgraph. +Developers can add GRT signal to their Subgraphs to incentivize Indexers to query the Subgraph. -- If a subgraph is eligible for indexing rewards, Indexers who provide a "proof of indexing" will receive a GRT reward, based on the amount of GRT signalled. +- If a Subgraph is eligible for indexing rewards, Indexers who provide a "proof of indexing" will receive a GRT reward, based on the amount of GRT signalled. -- You can check indexing reward eligibility based on subgraph feature usage [here](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md). +- You can check indexing reward eligibility based on Subgraph feature usage [here](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md). - Specific supported networks can be checked [here](/supported-networks/). -> Adding signal to a subgraph which is not eligible for rewards will not attract additional Indexers. +> Adding signal to a Subgraph which is not eligible for rewards will not attract additional Indexers. > -> If your subgraph is eligible for rewards, it is recommended that you curate your own subgraph with at least 3,000 GRT in order to attract additional indexers to index your subgraph. +> If your Subgraph is eligible for rewards, it is recommended that you curate your own Subgraph with at least 3,000 GRT in order to attract additional indexers to index your Subgraph. -The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all subgraphs. However, signaling GRT on a particular subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. +The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all Subgraphs. However, signaling GRT on a particular Subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. -When signaling, Curators can decide to signal on a specific version of the subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. +When signaling, Curators can decide to signal on a specific version of the Subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. -Indexers can find subgraphs to index based on curation signals they see in Graph Explorer. +Indexers can find Subgraphs to index based on curation signals they see in Graph Explorer. -![Explorer subgraphs](/img/explorer-subgraphs.png) +![Explorer Subgraphs](/img/explorer-subgraphs.png) -Subgraph Studio enables you to add signal to your subgraph by adding GRT to your subgraph's curation pool in the same transaction it is published. +Subgraph Studio enables you to add signal to your Subgraph by adding GRT to your Subgraph's curation pool in the same transaction it is published. ![Curation Pool](/img/curate-own-subgraph-tx.png) -Alternatively, you can add GRT signal to a published subgraph from Graph Explorer. +Alternatively, you can add GRT signal to a published Subgraph from Graph Explorer. ![Signal from Explorer](/img/signal-from-explorer.png) From 613ee672540ef49f2c921892f84cc4114521d280 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:24:57 -0500 Subject: [PATCH 0948/1789] New translations publishing-a-subgraph.mdx (Turkish) --- .../publishing/publishing-a-subgraph.mdx | 45 ++++++++++--------- 1 file changed, 23 insertions(+), 22 deletions(-) diff --git a/website/src/pages/tr/subgraphs/developing/publishing/publishing-a-subgraph.mdx b/website/src/pages/tr/subgraphs/developing/publishing/publishing-a-subgraph.mdx index 861f9e6a49f4..09d0a69371b2 100644 --- a/website/src/pages/tr/subgraphs/developing/publishing/publishing-a-subgraph.mdx +++ b/website/src/pages/tr/subgraphs/developing/publishing/publishing-a-subgraph.mdx @@ -1,10 +1,11 @@ --- title: Bir Subgraph'i Merkeziyetsiz Ağda Yayımlamak +sidebarTitle: Publishing to the Decentralized Network --- -[Subgraph'inizi Subgraph Studio'ya dağıttıktan](/deploying/deploying-a-subgraph-to-studio/) ve üretime hazır hale getirdikten sonra, merkeziyetsiz ağda yayımlayabilirsiniz. +Once you have [deployed your Subgraph to Subgraph Studio](/deploying/deploying-a-subgraph-to-studio/) and it's ready to go into production, you can publish it to the decentralized network. -Bir subgraph'i merkeziyetsiz ağda yayımladığınızda, onu şu amaçlarla kullanılabilir hale getirirsiniz: +When you publish a Subgraph to the decentralized network, you make it available for: - [Küratörler](/resources/roles/curating/) tarafından kürasyona başlanması. - [Endeksleyiciler](/indexing/overview/) tarafından endekslenmeye başlanması. @@ -17,33 +18,33 @@ Bir subgraph'i merkeziyetsiz ağda yayımladığınızda, onu şu amaçlarla kul 1. [Subgraph Studio](https://thegraph.com/studio/) paneline gidin 2. **Publish** düğmesine tıklayın -3. Subgraph'iniz artık [Graph Gezgini](https://thegraph.com/explorer/) içinde görünür olacak. +3. Your Subgraph will now be visible in [Graph Explorer](https://thegraph.com/explorer/). -Mevcut bir subgraph'in yayımlanmış tüm sürümleri şunları yapabilir: +All published versions of an existing Subgraph can: - Arbitrum One'da yayımlanabilir. [The Graph Ağı'nın Arbitrum üzerindeki durumu hakkında daha fazla bilgi edinin](/archived/arbitrum/arbitrum-faq/). -- Subgraph'in yayımlandığı ağdan bağımsız olarak, [desteklenen ağlar](/supported-networks/) üzerindeki herhangi bir ağda veri endeksleyebilir. +- Index data on any of the [supported networks](/supported-networks/), regardless of the network on which the Subgraph was published. -### Yayınlanan bir subgraph için üst veri güncelleme +### Updating metadata for a published Subgraph -- Merkeziyetsiz ağda subgraph'inizi yayımladıktan sonra, Subgraph Studio'da metaveriyi istediğiniz zaman güncelleyebilirsiniz. +- After publishing your Subgraph to the decentralized network, you can update the metadata anytime in Subgraph Studio. - Yaptığınız değişiklikleri kaydedip güncellemeleri yayımladığınızda, bu güncellemeler Graph Gezgini'nde görünecektir. - Dağıtımınız değişmediği için bu işlemin yeni bir sürüm oluşturmayacağını unutmamak önemlidir. ## CLI'den Yayımlama -0.73.0 sürümünden itibaren subgraph'inizi [`graph-cli`](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) ile de yayımlayabilirsiniz. +As of version 0.73.0, you can also publish your Subgraph with the [`graph-cli`](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). 1. `graph-cli`yi açın. 2. Aşağıdaki komutları kullanın: `graph codegen && graph build` ardından `graph publish`. -3. Bir pencere açılır ve cüzdanınızı bağlamanıza, metaveri eklemenize ve tamamlanmış subgraph'inizi tercih ettiğiniz bir ağa dağıtmanıza olanak tanır. +3. A window will open, allowing you to connect your wallet, add metadata, and deploy your finalized Subgraph to a network of your choice. ![cli-ui](/img/cli-ui.png) ### Dağıtımınızı özelleştirme -Aşağıdaki bayraklarla subgraph derlemenizi belirli bir IPFS düğümüne yükleyebilir ve dağıtımınızı daha fazla özelleştirebilirsiniz: +You can upload your Subgraph build to a specific IPFS node and further customize your deployment with the following flags: ``` KULLANIM @@ -61,33 +62,33 @@ BAYRAKLAR ``` -## Subgraph'inize sinyal ekleme +## Adding signal to your Subgraph -Geliştiriciler, Endeksleyicileri bir subgraph'i sorgulamaya teşvik etmek için subgraph'lerine GRT sinyali ekleyebilirler. +Developers can add GRT signal to their Subgraphs to incentivize Indexers to query the Subgraph. -- Bir subgraph endeksleme ödüllerine uygun ise, "endeksleme ispatı" sağlayan Endeksleyiciler, sinyallenen GRT miktarına bağlı olarak GRT ödülü alır. +- If a Subgraph is eligible for indexing rewards, Indexers who provide a "proof of indexing" will receive a GRT reward, based on the amount of GRT signalled. -- Subgraph'inizin endeksleme ödüllerine uygunluğunu (bu, subgraph özellik kullanımına bağlıdır) [buradan](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) kontrol edebilirsiniz. +- You can check indexing reward eligibility based on Subgraph feature usage [here](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md). - Desteklenen spesifik ağları [buradan](/supported-networks/) inceleyebilirsiniz. -> Eğer bir subgraph ödüllere uygun değilse, bu subgraph'e sinyal eklemek ek Endeksleyicileri çekmeyecektir. +> Adding signal to a Subgraph which is not eligible for rewards will not attract additional Indexers. > -> Subgraph'iniz ödüllere uygunsa, subgraph'inizi en az 3.000 GRT ile küratörlüğünü yapmanız, ek Endeksleyicilerin subgraph'inizi endekslemesini sağlamak için önerilir. +> If your Subgraph is eligible for rewards, it is recommended that you curate your own Subgraph with at least 3,000 GRT in order to attract additional indexers to index your Subgraph. -[Sunrise Yükseltmesi Endeksleyicisi](/archived/sunrise/#what-is-the-upgrade-indexer), tüm subgraph'lerin endekslenmesini sağlar. Ancak, belirli bir subgraph'e GRT sinyali eklemek, daha fazla Endeksleyiciyi bu subgraph'e çekecektir. Küratörlük yoluyla ek Endeksleyicilerin teşvik edilmesi, sorgular için hizmet kalitesini artırmayı, gecikmeyi azaltmayı ve ağ kullanılabilirliğini iyileştirmeyi amaçlar. +The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all Subgraphs. However, signaling GRT on a particular Subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. -Sinyal verirken, Küratörler belirli bir subgraph sürümüne sinyal vermeyi veya otomatik geçiş (auto-migrate) özelliğini kullanmayı seçebilirler. Eğer otomatik geçiş özelliğini kullanarak sinyal verirlerse, bir küratörün payları her zaman geliştirici tarafından yayımlanan en son sürüme göre güncellenir. Bunun yerine belirli bir sürüme sinyal vermeyi seçerlerse, paylar her zaman bu belirli sürümdeki haliyle kalır. +When signaling, Curators can decide to signal on a specific version of the Subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. -Endeksleyiciler, Graph Gezgini'nde gördükleri küratörlük sinyallerine göre endeksleyecekleri subgraph'leri bulabilirler. +Indexers can find Subgraphs to index based on curation signals they see in Graph Explorer. -![Gezgin subgraph'leri](/img/explorer-subgraphs.png) +![Explorer Subgraphs](/img/explorer-subgraphs.png) -Subgraph Studio; subgraph'inizi yayımladığınız işlemde, subgraph'inizin küratörlük havuzuna GRT ekleyerek subgraph'inize sinyal eklemenize olanak tanır. +Subgraph Studio enables you to add signal to your Subgraph by adding GRT to your Subgraph's curation pool in the same transaction it is published. ![Kürasyon Havuzu](/img/curate-own-subgraph-tx.png) -Alternatif olarak, yayımlanmış bir subgraph'e Graph Gezgini üzerinden GRT sinyali ekleyebilirsiniz. +Alternatively, you can add GRT signal to a published Subgraph from Graph Explorer. ![Gezgin'den sinyal ekleme](/img/signal-from-explorer.png) From bfd50f1e040690e9e64a468a52a41e0815d05c1a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:24:58 -0500 Subject: [PATCH 0949/1789] New translations publishing-a-subgraph.mdx (Ukrainian) --- .../publishing/publishing-a-subgraph.mdx | 45 ++++++++++--------- 1 file changed, 23 insertions(+), 22 deletions(-) diff --git a/website/src/pages/uk/subgraphs/developing/publishing/publishing-a-subgraph.mdx b/website/src/pages/uk/subgraphs/developing/publishing/publishing-a-subgraph.mdx index dca943ad3152..2bc0ec5f514c 100644 --- a/website/src/pages/uk/subgraphs/developing/publishing/publishing-a-subgraph.mdx +++ b/website/src/pages/uk/subgraphs/developing/publishing/publishing-a-subgraph.mdx @@ -1,10 +1,11 @@ --- title: Publishing a Subgraph to the Decentralized Network +sidebarTitle: Publishing to the Decentralized Network --- -Once you have [deployed your subgraph to Subgraph Studio](/deploying/deploying-a-subgraph-to-studio/) and it's ready to go into production, you can publish it to the decentralized network. +Once you have [deployed your Subgraph to Subgraph Studio](/deploying/deploying-a-subgraph-to-studio/) and it's ready to go into production, you can publish it to the decentralized network. -When you publish a subgraph to the decentralized network, you make it available for: +When you publish a Subgraph to the decentralized network, you make it available for: - [Curators](/resources/roles/curating/) to begin curating it. - [Indexers](/indexing/overview/) to begin indexing it. @@ -17,33 +18,33 @@ Check out the list of [supported networks](/supported-networks/). 1. Go to the [Subgraph Studio](https://thegraph.com/studio/) dashboard 2. Click on the **Publish** button -3. Your subgraph will now be visible in [Graph Explorer](https://thegraph.com/explorer/). +3. Your Subgraph will now be visible in [Graph Explorer](https://thegraph.com/explorer/). -All published versions of an existing subgraph can: +All published versions of an existing Subgraph can: - Be published to Arbitrum One. [Learn more about The Graph Network on Arbitrum](/archived/arbitrum/arbitrum-faq/). -- Index data on any of the [supported networks](/supported-networks/), regardless of the network on which the subgraph was published. +- Index data on any of the [supported networks](/supported-networks/), regardless of the network on which the Subgraph was published. -### Updating metadata for a published subgraph +### Updating metadata for a published Subgraph -- After publishing your subgraph to the decentralized network, you can update the metadata anytime in Subgraph Studio. +- After publishing your Subgraph to the decentralized network, you can update the metadata anytime in Subgraph Studio. - Once you’ve saved your changes and published the updates, they will appear in Graph Explorer. - It's important to note that this process will not create a new version since your deployment has not changed. ## Publishing from the CLI -As of version 0.73.0, you can also publish your subgraph with the [`graph-cli`](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). +As of version 0.73.0, you can also publish your Subgraph with the [`graph-cli`](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). 1. Open the `graph-cli`. 2. Use the following commands: `graph codegen && graph build` then `graph publish`. -3. A window will open, allowing you to connect your wallet, add metadata, and deploy your finalized subgraph to a network of your choice. +3. A window will open, allowing you to connect your wallet, add metadata, and deploy your finalized Subgraph to a network of your choice. ![cli-ui](/img/cli-ui.png) ### Customizing your deployment -You can upload your subgraph build to a specific IPFS node and further customize your deployment with the following flags: +You can upload your Subgraph build to a specific IPFS node and further customize your deployment with the following flags: ``` USAGE @@ -61,33 +62,33 @@ FLAGS ``` -## Adding signal to your subgraph +## Adding signal to your Subgraph -Developers can add GRT signal to their subgraphs to incentivize Indexers to query the subgraph. +Developers can add GRT signal to their Subgraphs to incentivize Indexers to query the Subgraph. -- If a subgraph is eligible for indexing rewards, Indexers who provide a "proof of indexing" will receive a GRT reward, based on the amount of GRT signalled. +- If a Subgraph is eligible for indexing rewards, Indexers who provide a "proof of indexing" will receive a GRT reward, based on the amount of GRT signalled. -- You can check indexing reward eligibility based on subgraph feature usage [here](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md). +- You can check indexing reward eligibility based on Subgraph feature usage [here](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md). - Specific supported networks can be checked [here](/supported-networks/). -> Adding signal to a subgraph which is not eligible for rewards will not attract additional Indexers. +> Adding signal to a Subgraph which is not eligible for rewards will not attract additional Indexers. > -> If your subgraph is eligible for rewards, it is recommended that you curate your own subgraph with at least 3,000 GRT in order to attract additional indexers to index your subgraph. +> If your Subgraph is eligible for rewards, it is recommended that you curate your own Subgraph with at least 3,000 GRT in order to attract additional indexers to index your Subgraph. -The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all subgraphs. However, signaling GRT on a particular subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. +The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all Subgraphs. However, signaling GRT on a particular Subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. -When signaling, Curators can decide to signal on a specific version of the subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. +When signaling, Curators can decide to signal on a specific version of the Subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. -Indexers can find subgraphs to index based on curation signals they see in Graph Explorer. +Indexers can find Subgraphs to index based on curation signals they see in Graph Explorer. -![Explorer subgraphs](/img/explorer-subgraphs.png) +![Explorer Subgraphs](/img/explorer-subgraphs.png) -Subgraph Studio enables you to add signal to your subgraph by adding GRT to your subgraph's curation pool in the same transaction it is published. +Subgraph Studio enables you to add signal to your Subgraph by adding GRT to your Subgraph's curation pool in the same transaction it is published. ![Curation Pool](/img/curate-own-subgraph-tx.png) -Alternatively, you can add GRT signal to a published subgraph from Graph Explorer. +Alternatively, you can add GRT signal to a published Subgraph from Graph Explorer. ![Signal from Explorer](/img/signal-from-explorer.png) From 96df5f286250f411734343ab3e9c8fdc9a610ae8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:24:59 -0500 Subject: [PATCH 0950/1789] New translations publishing-a-subgraph.mdx (Chinese Simplified) --- .../publishing/publishing-a-subgraph.mdx | 45 ++++++++++--------- 1 file changed, 23 insertions(+), 22 deletions(-) diff --git a/website/src/pages/zh/subgraphs/developing/publishing/publishing-a-subgraph.mdx b/website/src/pages/zh/subgraphs/developing/publishing/publishing-a-subgraph.mdx index e1d2731b4617..1b5abd13c3ac 100644 --- a/website/src/pages/zh/subgraphs/developing/publishing/publishing-a-subgraph.mdx +++ b/website/src/pages/zh/subgraphs/developing/publishing/publishing-a-subgraph.mdx @@ -1,10 +1,11 @@ --- title: 向去中心化的网络发布子图 +sidebarTitle: Publishing to the Decentralized Network --- -Once you have [deployed your subgraph to Subgraph Studio](/deploying/deploying-a-subgraph-to-studio/) and it's ready to go into production, you can publish it to the decentralized network. +Once you have [deployed your Subgraph to Subgraph Studio](/deploying/deploying-a-subgraph-to-studio/) and it's ready to go into production, you can publish it to the decentralized network. -When you publish a subgraph to the decentralized network, you make it available for: +When you publish a Subgraph to the decentralized network, you make it available for: - [Curators](/resources/roles/curating/) to begin curating it. - [Indexers](/indexing/overview/) to begin indexing it. @@ -17,33 +18,33 @@ Check out the list of [supported networks](/supported-networks/). 1. Go to the [Subgraph Studio](https://thegraph.com/studio/) dashboard 2. Click on the **Publish** button -3. Your subgraph will now be visible in [Graph Explorer](https://thegraph.com/explorer/). +3. Your Subgraph will now be visible in [Graph Explorer](https://thegraph.com/explorer/). -All published versions of an existing subgraph can: +All published versions of an existing Subgraph can: - Be published to Arbitrum One. [Learn more about The Graph Network on Arbitrum](/archived/arbitrum/arbitrum-faq/). -- Index data on any of the [supported networks](/supported-networks/), regardless of the network on which the subgraph was published. +- Index data on any of the [supported networks](/supported-networks/), regardless of the network on which the Subgraph was published. -### 更新已发布的子图的元数据 +### Updating metadata for a published Subgraph -- After publishing your subgraph to the decentralized network, you can update the metadata anytime in Subgraph Studio. +- After publishing your Subgraph to the decentralized network, you can update the metadata anytime in Subgraph Studio. - Once you’ve saved your changes and published the updates, they will appear in Graph Explorer. - It's important to note that this process will not create a new version since your deployment has not changed. ## Publishing from the CLI -As of version 0.73.0, you can also publish your subgraph with the [`graph-cli`](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). +As of version 0.73.0, you can also publish your Subgraph with the [`graph-cli`](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). 1. Open the `graph-cli`. 2. Use the following commands: `graph codegen && graph build` then `graph publish`. -3. A window will open, allowing you to connect your wallet, add metadata, and deploy your finalized subgraph to a network of your choice. +3. A window will open, allowing you to connect your wallet, add metadata, and deploy your finalized Subgraph to a network of your choice. ![cli-ui](/img/cli-ui.png) ### Customizing your deployment -You can upload your subgraph build to a specific IPFS node and further customize your deployment with the following flags: +You can upload your Subgraph build to a specific IPFS node and further customize your deployment with the following flags: ``` USAGE @@ -61,33 +62,33 @@ FLAGS ``` -## Adding signal to your subgraph +## Adding signal to your Subgraph -Developers can add GRT signal to their subgraphs to incentivize Indexers to query the subgraph. +Developers can add GRT signal to their Subgraphs to incentivize Indexers to query the Subgraph. -- If a subgraph is eligible for indexing rewards, Indexers who provide a "proof of indexing" will receive a GRT reward, based on the amount of GRT signalled. +- If a Subgraph is eligible for indexing rewards, Indexers who provide a "proof of indexing" will receive a GRT reward, based on the amount of GRT signalled. -- You can check indexing reward eligibility based on subgraph feature usage [here](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md). +- You can check indexing reward eligibility based on Subgraph feature usage [here](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md). - Specific supported networks can be checked [here](/supported-networks/). -> Adding signal to a subgraph which is not eligible for rewards will not attract additional Indexers. +> Adding signal to a Subgraph which is not eligible for rewards will not attract additional Indexers. > -> If your subgraph is eligible for rewards, it is recommended that you curate your own subgraph with at least 3,000 GRT in order to attract additional indexers to index your subgraph. +> If your Subgraph is eligible for rewards, it is recommended that you curate your own Subgraph with at least 3,000 GRT in order to attract additional indexers to index your Subgraph. -The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all subgraphs. However, signaling GRT on a particular subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. +The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all Subgraphs. However, signaling GRT on a particular Subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. -When signaling, Curators can decide to signal on a specific version of the subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. +When signaling, Curators can decide to signal on a specific version of the Subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. -Indexers can find subgraphs to index based on curation signals they see in Graph Explorer. +Indexers can find Subgraphs to index based on curation signals they see in Graph Explorer. -![Explorer subgraphs](/img/explorer-subgraphs.png) +![Explorer Subgraphs](/img/explorer-subgraphs.png) -Subgraph Studio enables you to add signal to your subgraph by adding GRT to your subgraph's curation pool in the same transaction it is published. +Subgraph Studio enables you to add signal to your Subgraph by adding GRT to your Subgraph's curation pool in the same transaction it is published. ![Curation Pool](/img/curate-own-subgraph-tx.png) -Alternatively, you can add GRT signal to a published subgraph from Graph Explorer. +Alternatively, you can add GRT signal to a published Subgraph from Graph Explorer. ![Signal from Explorer](/img/signal-from-explorer.png) From d14f4066c848ed849a64433c71d257062dbd0718 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:25:00 -0500 Subject: [PATCH 0951/1789] New translations publishing-a-subgraph.mdx (Urdu (Pakistan)) --- .../publishing/publishing-a-subgraph.mdx | 45 ++++++++++--------- 1 file changed, 23 insertions(+), 22 deletions(-) diff --git a/website/src/pages/ur/subgraphs/developing/publishing/publishing-a-subgraph.mdx b/website/src/pages/ur/subgraphs/developing/publishing/publishing-a-subgraph.mdx index 36ce4650b242..0029f0a41559 100644 --- a/website/src/pages/ur/subgraphs/developing/publishing/publishing-a-subgraph.mdx +++ b/website/src/pages/ur/subgraphs/developing/publishing/publishing-a-subgraph.mdx @@ -1,10 +1,11 @@ --- title: ڈیسینٹرلائزڈ نیٹ ورک پر سب گراف شائع کرنا +sidebarTitle: Publishing to the Decentralized Network --- -Once you have [deployed your subgraph to Subgraph Studio](/deploying/deploying-a-subgraph-to-studio/) and it's ready to go into production, you can publish it to the decentralized network. +Once you have [deployed your Subgraph to Subgraph Studio](/deploying/deploying-a-subgraph-to-studio/) and it's ready to go into production, you can publish it to the decentralized network. -When you publish a subgraph to the decentralized network, you make it available for: +When you publish a Subgraph to the decentralized network, you make it available for: - [Curators](/resources/roles/curating/) to begin curating it. - [Indexers](/indexing/overview/) to begin indexing it. @@ -17,33 +18,33 @@ Check out the list of [supported networks](/supported-networks/). 1. Go to the [Subgraph Studio](https://thegraph.com/studio/) dashboard 2. Click on the **Publish** button -3. Your subgraph will now be visible in [Graph Explorer](https://thegraph.com/explorer/). +3. Your Subgraph will now be visible in [Graph Explorer](https://thegraph.com/explorer/). -All published versions of an existing subgraph can: +All published versions of an existing Subgraph can: - Be published to Arbitrum One. [Learn more about The Graph Network on Arbitrum](/archived/arbitrum/arbitrum-faq/). -- Index data on any of the [supported networks](/supported-networks/), regardless of the network on which the subgraph was published. +- Index data on any of the [supported networks](/supported-networks/), regardless of the network on which the Subgraph was published. -### شائع شدہ سب گراف کے لیے میٹا ڈیٹا کو اپ ڈیٹ کرنا +### Updating metadata for a published Subgraph -- After publishing your subgraph to the decentralized network, you can update the metadata anytime in Subgraph Studio. +- After publishing your Subgraph to the decentralized network, you can update the metadata anytime in Subgraph Studio. - Once you’ve saved your changes and published the updates, they will appear in Graph Explorer. - It's important to note that this process will not create a new version since your deployment has not changed. ## Publishing from the CLI -As of version 0.73.0, you can also publish your subgraph with the [`graph-cli`](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). +As of version 0.73.0, you can also publish your Subgraph with the [`graph-cli`](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). 1. Open the `graph-cli`. 2. Use the following commands: `graph codegen && graph build` then `graph publish`. -3. A window will open, allowing you to connect your wallet, add metadata, and deploy your finalized subgraph to a network of your choice. +3. A window will open, allowing you to connect your wallet, add metadata, and deploy your finalized Subgraph to a network of your choice. ![cli-ui](/img/cli-ui.png) ### Customizing your deployment -You can upload your subgraph build to a specific IPFS node and further customize your deployment with the following flags: +You can upload your Subgraph build to a specific IPFS node and further customize your deployment with the following flags: ``` USAGE @@ -61,33 +62,33 @@ FLAGS ``` -## Adding signal to your subgraph +## Adding signal to your Subgraph -Developers can add GRT signal to their subgraphs to incentivize Indexers to query the subgraph. +Developers can add GRT signal to their Subgraphs to incentivize Indexers to query the Subgraph. -- If a subgraph is eligible for indexing rewards, Indexers who provide a "proof of indexing" will receive a GRT reward, based on the amount of GRT signalled. +- If a Subgraph is eligible for indexing rewards, Indexers who provide a "proof of indexing" will receive a GRT reward, based on the amount of GRT signalled. -- You can check indexing reward eligibility based on subgraph feature usage [here](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md). +- You can check indexing reward eligibility based on Subgraph feature usage [here](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md). - Specific supported networks can be checked [here](/supported-networks/). -> Adding signal to a subgraph which is not eligible for rewards will not attract additional Indexers. +> Adding signal to a Subgraph which is not eligible for rewards will not attract additional Indexers. > -> If your subgraph is eligible for rewards, it is recommended that you curate your own subgraph with at least 3,000 GRT in order to attract additional indexers to index your subgraph. +> If your Subgraph is eligible for rewards, it is recommended that you curate your own Subgraph with at least 3,000 GRT in order to attract additional indexers to index your Subgraph. -The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all subgraphs. However, signaling GRT on a particular subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. +The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all Subgraphs. However, signaling GRT on a particular Subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. -When signaling, Curators can decide to signal on a specific version of the subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. +When signaling, Curators can decide to signal on a specific version of the Subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. -Indexers can find subgraphs to index based on curation signals they see in Graph Explorer. +Indexers can find Subgraphs to index based on curation signals they see in Graph Explorer. -![Explorer subgraphs](/img/explorer-subgraphs.png) +![Explorer Subgraphs](/img/explorer-subgraphs.png) -Subgraph Studio enables you to add signal to your subgraph by adding GRT to your subgraph's curation pool in the same transaction it is published. +Subgraph Studio enables you to add signal to your Subgraph by adding GRT to your Subgraph's curation pool in the same transaction it is published. ![Curation Pool](/img/curate-own-subgraph-tx.png) -Alternatively, you can add GRT signal to a published subgraph from Graph Explorer. +Alternatively, you can add GRT signal to a published Subgraph from Graph Explorer. ![Signal from Explorer](/img/signal-from-explorer.png) From b361d9e47d269362e0cc41e5af7f00517c7cd0f1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:25:01 -0500 Subject: [PATCH 0952/1789] New translations publishing-a-subgraph.mdx (Vietnamese) --- .../publishing/publishing-a-subgraph.mdx | 45 ++++++++++--------- 1 file changed, 23 insertions(+), 22 deletions(-) diff --git a/website/src/pages/vi/subgraphs/developing/publishing/publishing-a-subgraph.mdx b/website/src/pages/vi/subgraphs/developing/publishing/publishing-a-subgraph.mdx index dca943ad3152..2bc0ec5f514c 100644 --- a/website/src/pages/vi/subgraphs/developing/publishing/publishing-a-subgraph.mdx +++ b/website/src/pages/vi/subgraphs/developing/publishing/publishing-a-subgraph.mdx @@ -1,10 +1,11 @@ --- title: Publishing a Subgraph to the Decentralized Network +sidebarTitle: Publishing to the Decentralized Network --- -Once you have [deployed your subgraph to Subgraph Studio](/deploying/deploying-a-subgraph-to-studio/) and it's ready to go into production, you can publish it to the decentralized network. +Once you have [deployed your Subgraph to Subgraph Studio](/deploying/deploying-a-subgraph-to-studio/) and it's ready to go into production, you can publish it to the decentralized network. -When you publish a subgraph to the decentralized network, you make it available for: +When you publish a Subgraph to the decentralized network, you make it available for: - [Curators](/resources/roles/curating/) to begin curating it. - [Indexers](/indexing/overview/) to begin indexing it. @@ -17,33 +18,33 @@ Check out the list of [supported networks](/supported-networks/). 1. Go to the [Subgraph Studio](https://thegraph.com/studio/) dashboard 2. Click on the **Publish** button -3. Your subgraph will now be visible in [Graph Explorer](https://thegraph.com/explorer/). +3. Your Subgraph will now be visible in [Graph Explorer](https://thegraph.com/explorer/). -All published versions of an existing subgraph can: +All published versions of an existing Subgraph can: - Be published to Arbitrum One. [Learn more about The Graph Network on Arbitrum](/archived/arbitrum/arbitrum-faq/). -- Index data on any of the [supported networks](/supported-networks/), regardless of the network on which the subgraph was published. +- Index data on any of the [supported networks](/supported-networks/), regardless of the network on which the Subgraph was published. -### Updating metadata for a published subgraph +### Updating metadata for a published Subgraph -- After publishing your subgraph to the decentralized network, you can update the metadata anytime in Subgraph Studio. +- After publishing your Subgraph to the decentralized network, you can update the metadata anytime in Subgraph Studio. - Once you’ve saved your changes and published the updates, they will appear in Graph Explorer. - It's important to note that this process will not create a new version since your deployment has not changed. ## Publishing from the CLI -As of version 0.73.0, you can also publish your subgraph with the [`graph-cli`](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). +As of version 0.73.0, you can also publish your Subgraph with the [`graph-cli`](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). 1. Open the `graph-cli`. 2. Use the following commands: `graph codegen && graph build` then `graph publish`. -3. A window will open, allowing you to connect your wallet, add metadata, and deploy your finalized subgraph to a network of your choice. +3. A window will open, allowing you to connect your wallet, add metadata, and deploy your finalized Subgraph to a network of your choice. ![cli-ui](/img/cli-ui.png) ### Customizing your deployment -You can upload your subgraph build to a specific IPFS node and further customize your deployment with the following flags: +You can upload your Subgraph build to a specific IPFS node and further customize your deployment with the following flags: ``` USAGE @@ -61,33 +62,33 @@ FLAGS ``` -## Adding signal to your subgraph +## Adding signal to your Subgraph -Developers can add GRT signal to their subgraphs to incentivize Indexers to query the subgraph. +Developers can add GRT signal to their Subgraphs to incentivize Indexers to query the Subgraph. -- If a subgraph is eligible for indexing rewards, Indexers who provide a "proof of indexing" will receive a GRT reward, based on the amount of GRT signalled. +- If a Subgraph is eligible for indexing rewards, Indexers who provide a "proof of indexing" will receive a GRT reward, based on the amount of GRT signalled. -- You can check indexing reward eligibility based on subgraph feature usage [here](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md). +- You can check indexing reward eligibility based on Subgraph feature usage [here](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md). - Specific supported networks can be checked [here](/supported-networks/). -> Adding signal to a subgraph which is not eligible for rewards will not attract additional Indexers. +> Adding signal to a Subgraph which is not eligible for rewards will not attract additional Indexers. > -> If your subgraph is eligible for rewards, it is recommended that you curate your own subgraph with at least 3,000 GRT in order to attract additional indexers to index your subgraph. +> If your Subgraph is eligible for rewards, it is recommended that you curate your own Subgraph with at least 3,000 GRT in order to attract additional indexers to index your Subgraph. -The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all subgraphs. However, signaling GRT on a particular subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. +The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all Subgraphs. However, signaling GRT on a particular Subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. -When signaling, Curators can decide to signal on a specific version of the subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. +When signaling, Curators can decide to signal on a specific version of the Subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. -Indexers can find subgraphs to index based on curation signals they see in Graph Explorer. +Indexers can find Subgraphs to index based on curation signals they see in Graph Explorer. -![Explorer subgraphs](/img/explorer-subgraphs.png) +![Explorer Subgraphs](/img/explorer-subgraphs.png) -Subgraph Studio enables you to add signal to your subgraph by adding GRT to your subgraph's curation pool in the same transaction it is published. +Subgraph Studio enables you to add signal to your Subgraph by adding GRT to your Subgraph's curation pool in the same transaction it is published. ![Curation Pool](/img/curate-own-subgraph-tx.png) -Alternatively, you can add GRT signal to a published subgraph from Graph Explorer. +Alternatively, you can add GRT signal to a published Subgraph from Graph Explorer. ![Signal from Explorer](/img/signal-from-explorer.png) From 93789a4aae83c93a90d69c4a2215c2c0a68ec589 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:25:02 -0500 Subject: [PATCH 0953/1789] New translations publishing-a-subgraph.mdx (Marathi) --- .../publishing/publishing-a-subgraph.mdx | 45 ++++++++++--------- 1 file changed, 23 insertions(+), 22 deletions(-) diff --git a/website/src/pages/mr/subgraphs/developing/publishing/publishing-a-subgraph.mdx b/website/src/pages/mr/subgraphs/developing/publishing/publishing-a-subgraph.mdx index 50c8077f371a..78b641e5ae0a 100644 --- a/website/src/pages/mr/subgraphs/developing/publishing/publishing-a-subgraph.mdx +++ b/website/src/pages/mr/subgraphs/developing/publishing/publishing-a-subgraph.mdx @@ -1,10 +1,11 @@ --- title: विकेंद्रीकृत नेटवर्कवर सबग्राफ प्रकाशित करणे +sidebarTitle: Publishing to the Decentralized Network --- -Once you have [deployed your subgraph to Subgraph Studio](/deploying/deploying-a-subgraph-to-studio/) and it's ready to go into production, you can publish it to the decentralized network. +Once you have [deployed your Subgraph to Subgraph Studio](/deploying/deploying-a-subgraph-to-studio/) and it's ready to go into production, you can publish it to the decentralized network. -When you publish a subgraph to the decentralized network, you make it available for: +When you publish a Subgraph to the decentralized network, you make it available for: - [Curators](/resources/roles/curating/) to begin curating it. - [Indexers](/indexing/overview/) to begin indexing it. @@ -17,33 +18,33 @@ Check out the list of [supported networks](/supported-networks/). 1. Go to the [Subgraph Studio](https://thegraph.com/studio/) dashboard 2. Click on the **Publish** button -3. Your subgraph will now be visible in [Graph Explorer](https://thegraph.com/explorer/). +3. Your Subgraph will now be visible in [Graph Explorer](https://thegraph.com/explorer/). -All published versions of an existing subgraph can: +All published versions of an existing Subgraph can: - Be published to Arbitrum One. [Learn more about The Graph Network on Arbitrum](/archived/arbitrum/arbitrum-faq/). -- Index data on any of the [supported networks](/supported-networks/), regardless of the network on which the subgraph was published. +- Index data on any of the [supported networks](/supported-networks/), regardless of the network on which the Subgraph was published. -### प्रकाशित सबग्राफसाठी मेटाडेटा अपडेट करत आहे +### Updating metadata for a published Subgraph -- After publishing your subgraph to the decentralized network, you can update the metadata anytime in Subgraph Studio. +- After publishing your Subgraph to the decentralized network, you can update the metadata anytime in Subgraph Studio. - Once you’ve saved your changes and published the updates, they will appear in Graph Explorer. - It's important to note that this process will not create a new version since your deployment has not changed. ## Publishing from the CLI -As of version 0.73.0, you can also publish your subgraph with the [`graph-cli`](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). +As of version 0.73.0, you can also publish your Subgraph with the [`graph-cli`](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). 1. Open the `graph-cli`. 2. Use the following commands: `graph codegen && graph build` then `graph publish`. -3. A window will open, allowing you to connect your wallet, add metadata, and deploy your finalized subgraph to a network of your choice. +3. A window will open, allowing you to connect your wallet, add metadata, and deploy your finalized Subgraph to a network of your choice. ![cli-ui](/img/cli-ui.png) ### Customizing your deployment -You can upload your subgraph build to a specific IPFS node and further customize your deployment with the following flags: +You can upload your Subgraph build to a specific IPFS node and further customize your deployment with the following flags: ``` USAGE @@ -61,33 +62,33 @@ FLAGS ``` -## Adding signal to your subgraph +## Adding signal to your Subgraph -Developers can add GRT signal to their subgraphs to incentivize Indexers to query the subgraph. +Developers can add GRT signal to their Subgraphs to incentivize Indexers to query the Subgraph. -- If a subgraph is eligible for indexing rewards, Indexers who provide a "proof of indexing" will receive a GRT reward, based on the amount of GRT signalled. +- If a Subgraph is eligible for indexing rewards, Indexers who provide a "proof of indexing" will receive a GRT reward, based on the amount of GRT signalled. -- You can check indexing reward eligibility based on subgraph feature usage [here](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md). +- You can check indexing reward eligibility based on Subgraph feature usage [here](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md). - Specific supported networks can be checked [here](/supported-networks/). -> Adding signal to a subgraph which is not eligible for rewards will not attract additional Indexers. +> Adding signal to a Subgraph which is not eligible for rewards will not attract additional Indexers. > -> If your subgraph is eligible for rewards, it is recommended that you curate your own subgraph with at least 3,000 GRT in order to attract additional indexers to index your subgraph. +> If your Subgraph is eligible for rewards, it is recommended that you curate your own Subgraph with at least 3,000 GRT in order to attract additional indexers to index your Subgraph. -The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all subgraphs. However, signaling GRT on a particular subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. +The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all Subgraphs. However, signaling GRT on a particular Subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. -When signaling, Curators can decide to signal on a specific version of the subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. +When signaling, Curators can decide to signal on a specific version of the Subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. -Indexers can find subgraphs to index based on curation signals they see in Graph Explorer. +Indexers can find Subgraphs to index based on curation signals they see in Graph Explorer. -![Explorer subgraphs](/img/explorer-subgraphs.png) +![Explorer Subgraphs](/img/explorer-subgraphs.png) -Subgraph Studio enables you to add signal to your subgraph by adding GRT to your subgraph's curation pool in the same transaction it is published. +Subgraph Studio enables you to add signal to your Subgraph by adding GRT to your Subgraph's curation pool in the same transaction it is published. ![Curation Pool](/img/curate-own-subgraph-tx.png) -Alternatively, you can add GRT signal to a published subgraph from Graph Explorer. +Alternatively, you can add GRT signal to a published Subgraph from Graph Explorer. ![Signal from Explorer](/img/signal-from-explorer.png) From 95a43ee873b209f3132e3f0199fd0636099a5aa5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:25:03 -0500 Subject: [PATCH 0954/1789] New translations publishing-a-subgraph.mdx (Hindi) --- .../publishing/publishing-a-subgraph.mdx | 45 ++++++++++--------- 1 file changed, 23 insertions(+), 22 deletions(-) diff --git a/website/src/pages/hi/subgraphs/developing/publishing/publishing-a-subgraph.mdx b/website/src/pages/hi/subgraphs/developing/publishing/publishing-a-subgraph.mdx index 51a773bb8012..ac80b5d2dde3 100644 --- a/website/src/pages/hi/subgraphs/developing/publishing/publishing-a-subgraph.mdx +++ b/website/src/pages/hi/subgraphs/developing/publishing/publishing-a-subgraph.mdx @@ -1,10 +1,11 @@ --- title: विकेंद्रीकृत नेटवर्क के लिए एक सबग्राफ प्रकाशित करना +sidebarTitle: Publishing to the Decentralized Network --- -Once you have [deployed your subgraph to Subgraph Studio](/deploying/deploying-a-subgraph-to-studio/) and it's ready to go into production, you can publish it to the decentralized network. +Once you have [deployed your Subgraph to Subgraph Studio](/deploying/deploying-a-subgraph-to-studio/) and it's ready to go into production, you can publish it to the decentralized network. -जब आप एक subgraph को विकेंद्रीकृत नेटवर्क पर प्रकाशित करते हैं, तो आप इसे उपलब्ध कराते हैं: +When you publish a Subgraph to the decentralized network, you make it available for: - [Curators](/resources/roles/curating/) to begin curating it. - [Indexers](/indexing/overview/) to begin indexing it. @@ -17,33 +18,33 @@ Check out the list of [supported networks](/supported-networks/). 1. Go to the [Subgraph Studio](https://thegraph.com/studio/) dashboard 2. Click on the **Publish** button -3. Your subgraph will now be visible in [Graph Explorer](https://thegraph.com/explorer/). +3. Your Subgraph will now be visible in [Graph Explorer](https://thegraph.com/explorer/). -एक मौजूदा subgraph के सभी प्रकाशित संस्करण कर सकते हैं: +All published versions of an existing Subgraph can: - Be published to Arbitrum One. [Learn more about The Graph Network on Arbitrum](/archived/arbitrum/arbitrum-faq/). -- Index data on any of the [supported networks](/supported-networks/), regardless of the network on which the subgraph was published. +- Index data on any of the [supported networks](/supported-networks/), regardless of the network on which the Subgraph was published. -### प्रकाशित सबग्राफ के लिए मेटाडेटा अपडेट करना +### Updating metadata for a published Subgraph -- अपने सबग्राफ को विकेंद्रीकृत नेटवर्क पर प्रकाशित करने के बाद, आप Subgraph Studio में किसी भी समय मेटाडेटा को अपडेट कर सकते हैं। +- After publishing your Subgraph to the decentralized network, you can update the metadata anytime in Subgraph Studio. - एक बार जब आप अपने परिवर्तनों को सहेज लेते हैं और अपडेट प्रकाशित कर देते हैं, तो वे Graph Explorer में दिखाई देंगे। - यह ध्यान रखना महत्वपूर्ण है कि इस प्रक्रिया से कोई नया संस्करण नहीं बनेगा क्योंकि आपका डिप्लॉयमेंट नहीं बदला है। ## Publishing from the CLI -As of version 0.73.0, you can also publish your subgraph with the [`graph-cli`](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). +As of version 0.73.0, you can also publish your Subgraph with the [`graph-cli`](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). 1. `graph-cli` खोलें। 2. Use the following commands: `graph codegen && graph build` then `graph publish`. -3. एक विंडो खुलेगी, जो आपको अपनी वॉलेट कनेक्ट करने, मेटाडेटा जोड़ने, और अपने अंतिम Subgraph को आपकी पसंद के नेटवर्क पर डिप्लॉय करने की अनुमति देगी। +3. A window will open, allowing you to connect your wallet, add metadata, and deploy your finalized Subgraph to a network of your choice. ![cli-ui](/img/cli-ui.png) ### अपने डिप्लॉयमेंट को अनुकूलित करना -आप अपने Subgraph बिल्ड को एक विशेष IPFSनोड पर अपलोड कर सकते हैं और निम्नलिखित फ्लैग्स के साथ अपने डिप्लॉयमेंट को और अनुकूलित कर सकते हैं: +You can upload your Subgraph build to a specific IPFS node and further customize your deployment with the following flags: ``` USAGE @@ -61,33 +62,33 @@ FLAGS ``` -## Adding signal to your subgraph +## Adding signal to your Subgraph -डेवलपर्स अपने Subgraph में GRT सिग्नल जोड़ सकते हैं ताकि Indexer को Subgraph पर क्वेरी करने के लिए प्रेरित किया जा सके। +Developers can add GRT signal to their Subgraphs to incentivize Indexers to query the Subgraph. -- यदि कोई Subgraph इंडेक्सिंग पुरस्कारों के लिए पात्र है, तो जो Indexer "इंडेक्सिंग का प्रमाण" प्रदान करते हैं, उन्हें संकेतित GRTकी मात्रा के आधार पर GRT पुरस्कार मिलेगा। +- If a Subgraph is eligible for indexing rewards, Indexers who provide a "proof of indexing" will receive a GRT reward, based on the amount of GRT signalled. -- You can check indexing reward eligibility based on subgraph feature usage [here](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md). +- You can check indexing reward eligibility based on Subgraph feature usage [here](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md). - Specific supported networks can be checked [here](/supported-networks/). -> Adding signal to a subgraph which is not eligible for rewards will not attract additional Indexers. +> Adding signal to a Subgraph which is not eligible for rewards will not attract additional Indexers. > -> यदि आपका Subgraph पुरस्कारों के लिए पात्र है, तो यह अनुशंसा की जाती है कि आप अपने Subgraph को कम से कम 3,000 GRT के साथ क्यूरेट करें ताकि अधिक Indexer को आपके सबग्राफ़ को इंडेक्स करने के लिए आकर्षित किया जा सके। +> If your Subgraph is eligible for rewards, it is recommended that you curate your own Subgraph with at least 3,000 GRT in order to attract additional indexers to index your Subgraph. -The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all subgraphs. However, signaling GRT on a particular subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. +The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all Subgraphs. However, signaling GRT on a particular Subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. -When signaling, Curators can decide to signal on a specific version of the subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. +When signaling, Curators can decide to signal on a specific version of the Subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. -Indexers can find subgraphs to index based on curation signals they see in Graph Explorer. +Indexers can find Subgraphs to index based on curation signals they see in Graph Explorer. -![Explorer सबग्राफ](/img/explorer-subgraphs.png) +![Explorer Subgraphs](/img/explorer-subgraphs.png) -Subgraph Studio आपको अपने सबग्राफ़ में सिग्नल जोड़ने की सुविधा देता है, जिसमें आप अपने सबग्राफ़ के क्यूरेशन पूल में उसी लेन-देन के साथ GRT जोड़ सकते हैं, जब इसे प्रकाशित किया जाता है. +Subgraph Studio enables you to add signal to your Subgraph by adding GRT to your Subgraph's curation pool in the same transaction it is published. ![Curation Pool](/img/curate-own-subgraph-tx.png) -Alternatively, you can add GRT signal to a published subgraph from Graph Explorer. +Alternatively, you can add GRT signal to a published Subgraph from Graph Explorer. ![Signal from Explorer](/img/signal-from-explorer.png) From 59d8cfdaf7afd6f36d833ac1d28b8a4be032142d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Tue, 25 Feb 2025 17:25:04 -0500 Subject: [PATCH 0955/1789] New translations publishing-a-subgraph.mdx (Swahili) --- .../publishing/publishing-a-subgraph.mdx | 95 +++++++++++++++++++ 1 file changed, 95 insertions(+) create mode 100644 website/src/pages/sw/subgraphs/developing/publishing/publishing-a-subgraph.mdx diff --git a/website/src/pages/sw/subgraphs/developing/publishing/publishing-a-subgraph.mdx b/website/src/pages/sw/subgraphs/developing/publishing/publishing-a-subgraph.mdx new file mode 100644 index 000000000000..2bc0ec5f514c --- /dev/null +++ b/website/src/pages/sw/subgraphs/developing/publishing/publishing-a-subgraph.mdx @@ -0,0 +1,95 @@ +--- +title: Publishing a Subgraph to the Decentralized Network +sidebarTitle: Publishing to the Decentralized Network +--- + +Once you have [deployed your Subgraph to Subgraph Studio](/deploying/deploying-a-subgraph-to-studio/) and it's ready to go into production, you can publish it to the decentralized network. + +When you publish a Subgraph to the decentralized network, you make it available for: + +- [Curators](/resources/roles/curating/) to begin curating it. +- [Indexers](/indexing/overview/) to begin indexing it. + + + +Check out the list of [supported networks](/supported-networks/). + +## Publishing from Subgraph Studio + +1. Go to the [Subgraph Studio](https://thegraph.com/studio/) dashboard +2. Click on the **Publish** button +3. Your Subgraph will now be visible in [Graph Explorer](https://thegraph.com/explorer/). + +All published versions of an existing Subgraph can: + +- Be published to Arbitrum One. [Learn more about The Graph Network on Arbitrum](/archived/arbitrum/arbitrum-faq/). + +- Index data on any of the [supported networks](/supported-networks/), regardless of the network on which the Subgraph was published. + +### Updating metadata for a published Subgraph + +- After publishing your Subgraph to the decentralized network, you can update the metadata anytime in Subgraph Studio. +- Once you’ve saved your changes and published the updates, they will appear in Graph Explorer. +- It's important to note that this process will not create a new version since your deployment has not changed. + +## Publishing from the CLI + +As of version 0.73.0, you can also publish your Subgraph with the [`graph-cli`](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). + +1. Open the `graph-cli`. +2. Use the following commands: `graph codegen && graph build` then `graph publish`. +3. A window will open, allowing you to connect your wallet, add metadata, and deploy your finalized Subgraph to a network of your choice. + +![cli-ui](/img/cli-ui.png) + +### Customizing your deployment + +You can upload your Subgraph build to a specific IPFS node and further customize your deployment with the following flags: + +``` +USAGE + $ graph publish [SUBGRAPH-MANIFEST] [-h] [--protocol-network arbitrum-one|arbitrum-sepolia --subgraph-id ] [-i ] [--ipfs-hash ] [--webapp-url + ] + +FLAGS + -h, --help Show CLI help. + -i, --ipfs= [default: https://api.thegraph.com/ipfs/api/v0] Upload build results to an IPFS node. + --ipfs-hash= IPFS hash of the subgraph manifest to deploy. + --protocol-network=